2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/metadata/debug-mono-symfile.h>
56 #include <mono/utils/mono-compiler.h>
57 #include <mono/utils/mono-memory-model.h>
58 #include <mono/metadata/mono-basic-block.h>
65 #include "jit-icalls.h"
67 #include "debugger-agent.h"
69 #define BRANCH_COST 10
70 #define INLINE_LENGTH_LIMIT 20
71 #define INLINE_FAILURE(msg) do { \
72 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
73 if (cfg->verbose_level >= 2) \
74 printf ("inline failed: %s\n", msg); \
75 goto inline_failure; \
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 2) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
105 goto exception_exit; \
108 #define OUT_OF_MEMORY_FAILURE do { \
109 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
110 goto exception_exit; \
112 /* Determine whenever 'ins' represents a load of the 'this' argument */
113 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
115 static int ldind_to_load_membase (int opcode);
116 static int stind_to_store_membase (int opcode);
118 int mono_op_to_op_imm (int opcode);
119 int mono_op_to_op_imm_noemul (int opcode);
121 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
122 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
123 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
125 /* helper methods signatures */
126 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
127 static MonoMethodSignature *helper_sig_domain_get = NULL;
128 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
129 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
130 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
131 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
132 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
135 * Instruction metadata
143 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
144 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
150 #if SIZEOF_REGISTER == 8
155 /* keep in sync with the enum in mini.h */
158 #include "mini-ops.h"
163 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
164 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
166 * This should contain the index of the last sreg + 1. This is not the same
167 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
169 const gint8 ins_sreg_counts[] = {
170 #include "mini-ops.h"
175 #define MONO_INIT_VARINFO(vi,id) do { \
176 (vi)->range.first_use.pos.bid = 0xffff; \
182 mono_inst_set_src_registers (MonoInst *ins, int *regs)
184 ins->sreg1 = regs [0];
185 ins->sreg2 = regs [1];
186 ins->sreg3 = regs [2];
190 mono_alloc_ireg (MonoCompile *cfg)
192 return alloc_ireg (cfg);
196 mono_alloc_freg (MonoCompile *cfg)
198 return alloc_freg (cfg);
202 mono_alloc_preg (MonoCompile *cfg)
204 return alloc_preg (cfg);
208 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
210 return alloc_dreg (cfg, stack_type);
214 * mono_alloc_ireg_ref:
216 * Allocate an IREG, and mark it as holding a GC ref.
219 mono_alloc_ireg_ref (MonoCompile *cfg)
221 return alloc_ireg_ref (cfg);
225 * mono_alloc_ireg_mp:
227 * Allocate an IREG, and mark it as holding a managed pointer.
230 mono_alloc_ireg_mp (MonoCompile *cfg)
232 return alloc_ireg_mp (cfg);
236 * mono_alloc_ireg_copy:
238 * Allocate an IREG with the same GC type as VREG.
241 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
243 if (vreg_is_ref (cfg, vreg))
244 return alloc_ireg_ref (cfg);
245 else if (vreg_is_mp (cfg, vreg))
246 return alloc_ireg_mp (cfg);
248 return alloc_ireg (cfg);
252 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
258 switch (type->type) {
261 case MONO_TYPE_BOOLEAN:
273 case MONO_TYPE_FNPTR:
275 case MONO_TYPE_CLASS:
276 case MONO_TYPE_STRING:
277 case MONO_TYPE_OBJECT:
278 case MONO_TYPE_SZARRAY:
279 case MONO_TYPE_ARRAY:
283 #if SIZEOF_REGISTER == 8
292 case MONO_TYPE_VALUETYPE:
293 if (type->data.klass->enumtype) {
294 type = mono_class_enum_basetype (type->data.klass);
297 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
300 case MONO_TYPE_TYPEDBYREF:
302 case MONO_TYPE_GENERICINST:
303 type = &type->data.generic_class->container_class->byval_arg;
307 g_assert (cfg->generic_sharing_context);
310 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
316 mono_print_bb (MonoBasicBlock *bb, const char *msg)
321 printf ("\n%s %d: [IN: ", msg, bb->block_num);
322 for (i = 0; i < bb->in_count; ++i)
323 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
325 for (i = 0; i < bb->out_count; ++i)
326 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
328 for (tree = bb->code; tree; tree = tree->next)
329 mono_print_ins_index (-1, tree);
333 mono_create_helper_signatures (void)
335 helper_sig_domain_get = mono_create_icall_signature ("ptr");
336 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
337 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
338 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
339 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
340 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
341 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
345 * Can't put this at the beginning, since other files reference stuff from this
350 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
352 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
354 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
356 #define GET_BBLOCK(cfg,tblock,ip) do { \
357 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
359 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
360 NEW_BBLOCK (cfg, (tblock)); \
361 (tblock)->cil_code = (ip); \
362 ADD_BBLOCK (cfg, (tblock)); \
366 #if defined(TARGET_X86) || defined(TARGET_AMD64)
367 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
368 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
369 (dest)->dreg = alloc_ireg_mp ((cfg)); \
370 (dest)->sreg1 = (sr1); \
371 (dest)->sreg2 = (sr2); \
372 (dest)->inst_imm = (imm); \
373 (dest)->backend.shift_amount = (shift); \
374 MONO_ADD_INS ((cfg)->cbb, (dest)); \
378 #if SIZEOF_REGISTER == 8
379 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
380 /* FIXME: Need to add many more cases */ \
381 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
383 int dr = alloc_preg (cfg); \
384 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
385 (ins)->sreg2 = widen->dreg; \
389 #define ADD_WIDEN_OP(ins, arg1, arg2)
392 #define ADD_BINOP(op) do { \
393 MONO_INST_NEW (cfg, ins, (op)); \
395 ins->sreg1 = sp [0]->dreg; \
396 ins->sreg2 = sp [1]->dreg; \
397 type_from_op (ins, sp [0], sp [1]); \
399 /* Have to insert a widening op */ \
400 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
401 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
402 MONO_ADD_INS ((cfg)->cbb, (ins)); \
403 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
406 #define ADD_UNOP(op) do { \
407 MONO_INST_NEW (cfg, ins, (op)); \
409 ins->sreg1 = sp [0]->dreg; \
410 type_from_op (ins, sp [0], NULL); \
412 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
413 MONO_ADD_INS ((cfg)->cbb, (ins)); \
414 *sp++ = mono_decompose_opcode (cfg, ins); \
417 #define ADD_BINCOND(next_block) do { \
420 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
421 cmp->sreg1 = sp [0]->dreg; \
422 cmp->sreg2 = sp [1]->dreg; \
423 type_from_op (cmp, sp [0], sp [1]); \
425 type_from_op (ins, sp [0], sp [1]); \
426 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
427 GET_BBLOCK (cfg, tblock, target); \
428 link_bblock (cfg, bblock, tblock); \
429 ins->inst_true_bb = tblock; \
430 if ((next_block)) { \
431 link_bblock (cfg, bblock, (next_block)); \
432 ins->inst_false_bb = (next_block); \
433 start_new_bblock = 1; \
435 GET_BBLOCK (cfg, tblock, ip); \
436 link_bblock (cfg, bblock, tblock); \
437 ins->inst_false_bb = tblock; \
438 start_new_bblock = 2; \
440 if (sp != stack_start) { \
441 handle_stack_args (cfg, stack_start, sp - stack_start); \
442 CHECK_UNVERIFIABLE (cfg); \
444 MONO_ADD_INS (bblock, cmp); \
445 MONO_ADD_INS (bblock, ins); \
449 * link_bblock: Links two basic blocks
451 * links two basic blocks in the control flow graph, the 'from'
452 * argument is the starting block and the 'to' argument is the block
453 * the control flow ends to after 'from'.
456 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
458 MonoBasicBlock **newa;
462 if (from->cil_code) {
464 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
466 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
469 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
471 printf ("edge from entry to exit\n");
476 for (i = 0; i < from->out_count; ++i) {
477 if (to == from->out_bb [i]) {
483 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
484 for (i = 0; i < from->out_count; ++i) {
485 newa [i] = from->out_bb [i];
493 for (i = 0; i < to->in_count; ++i) {
494 if (from == to->in_bb [i]) {
500 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
501 for (i = 0; i < to->in_count; ++i) {
502 newa [i] = to->in_bb [i];
511 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
513 link_bblock (cfg, from, to);
517 * mono_find_block_region:
519 * We mark each basic block with a region ID. We use that to avoid BB
520 * optimizations when blocks are in different regions.
523 * A region token that encodes where this region is, and information
524 * about the clause owner for this block.
526 * The region encodes the try/catch/filter clause that owns this block
527 * as well as the type. -1 is a special value that represents a block
528 * that is in none of try/catch/filter.
531 mono_find_block_region (MonoCompile *cfg, int offset)
533 MonoMethodHeader *header = cfg->header;
534 MonoExceptionClause *clause;
537 for (i = 0; i < header->num_clauses; ++i) {
538 clause = &header->clauses [i];
539 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
540 (offset < (clause->handler_offset)))
541 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
543 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
544 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
545 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
546 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
547 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
549 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
552 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
553 return ((i + 1) << 8) | clause->flags;
560 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
567 for (i = 0; i < header->num_clauses; ++i) {
568 clause = &header->clauses [i];
569 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
570 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
571 if (clause->flags == type)
572 res = g_list_append (res, clause);
579 mono_create_spvar_for_region (MonoCompile *cfg, int region)
583 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
587 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
588 /* prevent it from being register allocated */
589 var->flags |= MONO_INST_INDIRECT;
591 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
595 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
597 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
601 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
605 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
609 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
610 /* prevent it from being register allocated */
611 var->flags |= MONO_INST_INDIRECT;
613 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
619 * Returns the type used in the eval stack when @type is loaded.
620 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
623 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
627 inst->klass = klass = mono_class_from_mono_type (type);
629 inst->type = STACK_MP;
634 switch (type->type) {
636 inst->type = STACK_INV;
640 case MONO_TYPE_BOOLEAN:
646 inst->type = STACK_I4;
651 case MONO_TYPE_FNPTR:
652 inst->type = STACK_PTR;
654 case MONO_TYPE_CLASS:
655 case MONO_TYPE_STRING:
656 case MONO_TYPE_OBJECT:
657 case MONO_TYPE_SZARRAY:
658 case MONO_TYPE_ARRAY:
659 inst->type = STACK_OBJ;
663 inst->type = STACK_I8;
667 inst->type = STACK_R8;
669 case MONO_TYPE_VALUETYPE:
670 if (type->data.klass->enumtype) {
671 type = mono_class_enum_basetype (type->data.klass);
675 inst->type = STACK_VTYPE;
678 case MONO_TYPE_TYPEDBYREF:
679 inst->klass = mono_defaults.typed_reference_class;
680 inst->type = STACK_VTYPE;
682 case MONO_TYPE_GENERICINST:
683 type = &type->data.generic_class->container_class->byval_arg;
686 case MONO_TYPE_MVAR :
687 /* FIXME: all the arguments must be references for now,
688 * later look inside cfg and see if the arg num is
691 g_assert (cfg->generic_sharing_context);
692 inst->type = STACK_OBJ;
695 g_error ("unknown type 0x%02x in eval stack type", type->type);
700 * The following tables are used to quickly validate the IL code in type_from_op ().
703 bin_num_table [STACK_MAX] [STACK_MAX] = {
704 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
709 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
710 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
711 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
716 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
719 /* reduce the size of this table */
721 bin_int_table [STACK_MAX] [STACK_MAX] = {
722 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
723 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
724 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
725 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
726 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
727 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
728 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
729 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
733 bin_comp_table [STACK_MAX] [STACK_MAX] = {
734 /* Inv i L p F & O vt */
736 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
737 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
738 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
739 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
740 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
741 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
742 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
745 /* reduce the size of this table */
747 shift_table [STACK_MAX] [STACK_MAX] = {
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
759 * Tables to map from the non-specific opcode to the matching
760 * type-specific opcode.
762 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
764 binops_op_map [STACK_MAX] = {
765 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
768 /* handles from CEE_NEG to CEE_CONV_U8 */
770 unops_op_map [STACK_MAX] = {
771 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
774 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
776 ovfops_op_map [STACK_MAX] = {
777 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
780 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
782 ovf2ops_op_map [STACK_MAX] = {
783 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
786 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
788 ovf3ops_op_map [STACK_MAX] = {
789 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
792 /* handles from CEE_BEQ to CEE_BLT_UN */
794 beqops_op_map [STACK_MAX] = {
795 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
798 /* handles from CEE_CEQ to CEE_CLT_UN */
800 ceqops_op_map [STACK_MAX] = {
801 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
805 * Sets ins->type (the type on the eval stack) according to the
806 * type of the opcode and the arguments to it.
807 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
809 * FIXME: this function sets ins->type unconditionally in some cases, but
810 * it should set it to invalid for some types (a conv.x on an object)
813 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
815 switch (ins->opcode) {
822 /* FIXME: check unverifiable args for STACK_MP */
823 ins->type = bin_num_table [src1->type] [src2->type];
824 ins->opcode += binops_op_map [ins->type];
831 ins->type = bin_int_table [src1->type] [src2->type];
832 ins->opcode += binops_op_map [ins->type];
837 ins->type = shift_table [src1->type] [src2->type];
838 ins->opcode += binops_op_map [ins->type];
843 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
844 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
845 ins->opcode = OP_LCOMPARE;
846 else if (src1->type == STACK_R8)
847 ins->opcode = OP_FCOMPARE;
849 ins->opcode = OP_ICOMPARE;
851 case OP_ICOMPARE_IMM:
852 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
853 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
854 ins->opcode = OP_LCOMPARE_IMM;
866 ins->opcode += beqops_op_map [src1->type];
869 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
870 ins->opcode += ceqops_op_map [src1->type];
876 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
877 ins->opcode += ceqops_op_map [src1->type];
881 ins->type = neg_table [src1->type];
882 ins->opcode += unops_op_map [ins->type];
885 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
886 ins->type = src1->type;
888 ins->type = STACK_INV;
889 ins->opcode += unops_op_map [ins->type];
895 ins->type = STACK_I4;
896 ins->opcode += unops_op_map [src1->type];
899 ins->type = STACK_R8;
900 switch (src1->type) {
903 ins->opcode = OP_ICONV_TO_R_UN;
906 ins->opcode = OP_LCONV_TO_R_UN;
910 case CEE_CONV_OVF_I1:
911 case CEE_CONV_OVF_U1:
912 case CEE_CONV_OVF_I2:
913 case CEE_CONV_OVF_U2:
914 case CEE_CONV_OVF_I4:
915 case CEE_CONV_OVF_U4:
916 ins->type = STACK_I4;
917 ins->opcode += ovf3ops_op_map [src1->type];
919 case CEE_CONV_OVF_I_UN:
920 case CEE_CONV_OVF_U_UN:
921 ins->type = STACK_PTR;
922 ins->opcode += ovf2ops_op_map [src1->type];
924 case CEE_CONV_OVF_I1_UN:
925 case CEE_CONV_OVF_I2_UN:
926 case CEE_CONV_OVF_I4_UN:
927 case CEE_CONV_OVF_U1_UN:
928 case CEE_CONV_OVF_U2_UN:
929 case CEE_CONV_OVF_U4_UN:
930 ins->type = STACK_I4;
931 ins->opcode += ovf2ops_op_map [src1->type];
934 ins->type = STACK_PTR;
935 switch (src1->type) {
937 ins->opcode = OP_ICONV_TO_U;
941 #if SIZEOF_VOID_P == 8
942 ins->opcode = OP_LCONV_TO_U;
944 ins->opcode = OP_MOVE;
948 ins->opcode = OP_LCONV_TO_U;
951 ins->opcode = OP_FCONV_TO_U;
957 ins->type = STACK_I8;
958 ins->opcode += unops_op_map [src1->type];
960 case CEE_CONV_OVF_I8:
961 case CEE_CONV_OVF_U8:
962 ins->type = STACK_I8;
963 ins->opcode += ovf3ops_op_map [src1->type];
965 case CEE_CONV_OVF_U8_UN:
966 case CEE_CONV_OVF_I8_UN:
967 ins->type = STACK_I8;
968 ins->opcode += ovf2ops_op_map [src1->type];
972 ins->type = STACK_R8;
973 ins->opcode += unops_op_map [src1->type];
976 ins->type = STACK_R8;
980 ins->type = STACK_I4;
981 ins->opcode += ovfops_op_map [src1->type];
986 ins->type = STACK_PTR;
987 ins->opcode += ovfops_op_map [src1->type];
995 ins->type = bin_num_table [src1->type] [src2->type];
996 ins->opcode += ovfops_op_map [src1->type];
997 if (ins->type == STACK_R8)
998 ins->type = STACK_INV;
1000 case OP_LOAD_MEMBASE:
1001 ins->type = STACK_PTR;
1003 case OP_LOADI1_MEMBASE:
1004 case OP_LOADU1_MEMBASE:
1005 case OP_LOADI2_MEMBASE:
1006 case OP_LOADU2_MEMBASE:
1007 case OP_LOADI4_MEMBASE:
1008 case OP_LOADU4_MEMBASE:
1009 ins->type = STACK_PTR;
1011 case OP_LOADI8_MEMBASE:
1012 ins->type = STACK_I8;
1014 case OP_LOADR4_MEMBASE:
1015 case OP_LOADR8_MEMBASE:
1016 ins->type = STACK_R8;
1019 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1023 if (ins->type == STACK_MP)
1024 ins->klass = mono_defaults.object_class;
1029 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1035 param_table [STACK_MAX] [STACK_MAX] = {
1040 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1044 switch (args->type) {
1054 for (i = 0; i < sig->param_count; ++i) {
1055 switch (args [i].type) {
1059 if (!sig->params [i]->byref)
1063 if (sig->params [i]->byref)
1065 switch (sig->params [i]->type) {
1066 case MONO_TYPE_CLASS:
1067 case MONO_TYPE_STRING:
1068 case MONO_TYPE_OBJECT:
1069 case MONO_TYPE_SZARRAY:
1070 case MONO_TYPE_ARRAY:
1077 if (sig->params [i]->byref)
1079 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1088 /*if (!param_table [args [i].type] [sig->params [i]->type])
1096 * When we need a pointer to the current domain many times in a method, we
1097 * call mono_domain_get() once and we store the result in a local variable.
1098 * This function returns the variable that represents the MonoDomain*.
1100 inline static MonoInst *
1101 mono_get_domainvar (MonoCompile *cfg)
1103 if (!cfg->domainvar)
1104 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1105 return cfg->domainvar;
1109 * The got_var contains the address of the Global Offset Table when AOT
1113 mono_get_got_var (MonoCompile *cfg)
1115 #ifdef MONO_ARCH_NEED_GOT_VAR
1116 if (!cfg->compile_aot)
1118 if (!cfg->got_var) {
1119 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1121 return cfg->got_var;
1128 mono_get_vtable_var (MonoCompile *cfg)
1130 g_assert (cfg->generic_sharing_context);
1132 if (!cfg->rgctx_var) {
1133 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1134 /* force the var to be stack allocated */
1135 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1138 return cfg->rgctx_var;
1142 type_from_stack_type (MonoInst *ins) {
1143 switch (ins->type) {
1144 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1145 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1146 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1147 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1149 return &ins->klass->this_arg;
1150 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1151 case STACK_VTYPE: return &ins->klass->byval_arg;
1153 g_error ("stack type %d to monotype not handled\n", ins->type);
1158 static G_GNUC_UNUSED int
1159 type_to_stack_type (MonoType *t)
1161 t = mono_type_get_underlying_type (t);
1165 case MONO_TYPE_BOOLEAN:
1168 case MONO_TYPE_CHAR:
1175 case MONO_TYPE_FNPTR:
1177 case MONO_TYPE_CLASS:
1178 case MONO_TYPE_STRING:
1179 case MONO_TYPE_OBJECT:
1180 case MONO_TYPE_SZARRAY:
1181 case MONO_TYPE_ARRAY:
1189 case MONO_TYPE_VALUETYPE:
1190 case MONO_TYPE_TYPEDBYREF:
1192 case MONO_TYPE_GENERICINST:
1193 if (mono_type_generic_inst_is_valuetype (t))
1199 g_assert_not_reached ();
1206 array_access_to_klass (int opcode)
1210 return mono_defaults.byte_class;
1212 return mono_defaults.uint16_class;
1215 return mono_defaults.int_class;
1218 return mono_defaults.sbyte_class;
1221 return mono_defaults.int16_class;
1224 return mono_defaults.int32_class;
1226 return mono_defaults.uint32_class;
1229 return mono_defaults.int64_class;
1232 return mono_defaults.single_class;
1235 return mono_defaults.double_class;
1236 case CEE_LDELEM_REF:
1237 case CEE_STELEM_REF:
1238 return mono_defaults.object_class;
1240 g_assert_not_reached ();
1246 * We try to share variables when possible
1249 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1254 /* inlining can result in deeper stacks */
1255 if (slot >= cfg->header->max_stack)
1256 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1258 pos = ins->type - 1 + slot * STACK_MAX;
1260 switch (ins->type) {
1267 if ((vnum = cfg->intvars [pos]))
1268 return cfg->varinfo [vnum];
1269 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1270 cfg->intvars [pos] = res->inst_c0;
1273 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1279 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1282 * Don't use this if a generic_context is set, since that means AOT can't
1283 * look up the method using just the image+token.
1284 * table == 0 means this is a reference made from a wrapper.
1286 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1287 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1288 jump_info_token->image = image;
1289 jump_info_token->token = token;
1290 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1295 * This function is called to handle items that are left on the evaluation stack
1296 * at basic block boundaries. What happens is that we save the values to local variables
1297 * and we reload them later when first entering the target basic block (with the
1298 * handle_loaded_temps () function).
1299 * A single joint point will use the same variables (stored in the array bb->out_stack or
1300 * bb->in_stack, if the basic block is before or after the joint point).
1302 * This function needs to be called _before_ emitting the last instruction of
1303 * the bb (i.e. before emitting a branch).
1304 * If the stack merge fails at a join point, cfg->unverifiable is set.
1307 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1310 MonoBasicBlock *bb = cfg->cbb;
1311 MonoBasicBlock *outb;
1312 MonoInst *inst, **locals;
1317 if (cfg->verbose_level > 3)
1318 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1319 if (!bb->out_scount) {
1320 bb->out_scount = count;
1321 //printf ("bblock %d has out:", bb->block_num);
1323 for (i = 0; i < bb->out_count; ++i) {
1324 outb = bb->out_bb [i];
1325 /* exception handlers are linked, but they should not be considered for stack args */
1326 if (outb->flags & BB_EXCEPTION_HANDLER)
1328 //printf (" %d", outb->block_num);
1329 if (outb->in_stack) {
1331 bb->out_stack = outb->in_stack;
1337 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1338 for (i = 0; i < count; ++i) {
1340 * try to reuse temps already allocated for this purpouse, if they occupy the same
1341 * stack slot and if they are of the same type.
1342 * This won't cause conflicts since if 'local' is used to
1343 * store one of the values in the in_stack of a bblock, then
1344 * the same variable will be used for the same outgoing stack
1346 * This doesn't work when inlining methods, since the bblocks
1347 * in the inlined methods do not inherit their in_stack from
1348 * the bblock they are inlined to. See bug #58863 for an
1351 if (cfg->inlined_method)
1352 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1354 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1359 for (i = 0; i < bb->out_count; ++i) {
1360 outb = bb->out_bb [i];
1361 /* exception handlers are linked, but they should not be considered for stack args */
1362 if (outb->flags & BB_EXCEPTION_HANDLER)
1364 if (outb->in_scount) {
1365 if (outb->in_scount != bb->out_scount) {
1366 cfg->unverifiable = TRUE;
1369 continue; /* check they are the same locals */
1371 outb->in_scount = count;
1372 outb->in_stack = bb->out_stack;
1375 locals = bb->out_stack;
1377 for (i = 0; i < count; ++i) {
1378 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1379 inst->cil_code = sp [i]->cil_code;
1380 sp [i] = locals [i];
1381 if (cfg->verbose_level > 3)
1382 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1386 * It is possible that the out bblocks already have in_stack assigned, and
1387 * the in_stacks differ. In this case, we will store to all the different
1394 /* Find a bblock which has a different in_stack */
1396 while (bindex < bb->out_count) {
1397 outb = bb->out_bb [bindex];
1398 /* exception handlers are linked, but they should not be considered for stack args */
1399 if (outb->flags & BB_EXCEPTION_HANDLER) {
1403 if (outb->in_stack != locals) {
1404 for (i = 0; i < count; ++i) {
1405 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1406 inst->cil_code = sp [i]->cil_code;
1407 sp [i] = locals [i];
1408 if (cfg->verbose_level > 3)
1409 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1411 locals = outb->in_stack;
1420 /* Emit code which loads interface_offsets [klass->interface_id]
1421 * The array is stored in memory before vtable.
1424 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1426 if (cfg->compile_aot) {
1427 int ioffset_reg = alloc_preg (cfg);
1428 int iid_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1435 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1440 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1442 int ibitmap_reg = alloc_preg (cfg);
1443 #ifdef COMPRESSED_INTERFACE_BITMAP
1445 MonoInst *res, *ins;
1446 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1447 MONO_ADD_INS (cfg->cbb, ins);
1449 if (cfg->compile_aot)
1450 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1452 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1453 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1454 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1456 int ibitmap_byte_reg = alloc_preg (cfg);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1460 if (cfg->compile_aot) {
1461 int iid_reg = alloc_preg (cfg);
1462 int shifted_iid_reg = alloc_preg (cfg);
1463 int ibitmap_byte_address_reg = alloc_preg (cfg);
1464 int masked_iid_reg = alloc_preg (cfg);
1465 int iid_one_bit_reg = alloc_preg (cfg);
1466 int iid_bit_reg = alloc_preg (cfg);
1467 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1469 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1472 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1473 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1474 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1476 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1483 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1484 * stored in "klass_reg" implements the interface "klass".
1487 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1489 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1493 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1494 * stored in "vtable_reg" implements the interface "klass".
1497 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1499 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1503 * Emit code which checks whenever the interface id of @klass is smaller than
1504 * than the value given by max_iid_reg.
1507 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1508 MonoBasicBlock *false_target)
1510 if (cfg->compile_aot) {
1511 int iid_reg = alloc_preg (cfg);
1512 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1513 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1520 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1523 /* Same as above, but obtains max_iid from a vtable */
1525 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1526 MonoBasicBlock *false_target)
1528 int max_iid_reg = alloc_preg (cfg);
1530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1531 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1534 /* Same as above, but obtains max_iid from a klass */
1536 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1537 MonoBasicBlock *false_target)
1539 int max_iid_reg = alloc_preg (cfg);
1541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1542 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1546 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1548 int idepth_reg = alloc_preg (cfg);
1549 int stypes_reg = alloc_preg (cfg);
1550 int stype = alloc_preg (cfg);
1552 mono_class_setup_supertypes (klass);
1554 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1559 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1562 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1563 } else if (cfg->compile_aot) {
1564 int const_reg = alloc_preg (cfg);
1565 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1566 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1574 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1576 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1580 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1582 int intf_reg = alloc_preg (cfg);
1584 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1585 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1590 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1594 * Variant of the above that takes a register to the class, not the vtable.
1597 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1599 int intf_bit_reg = alloc_preg (cfg);
1601 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1602 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1607 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1611 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1614 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1615 } else if (cfg->compile_aot) {
1616 int const_reg = alloc_preg (cfg);
1617 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1618 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1622 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1626 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1628 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1632 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1634 if (cfg->compile_aot) {
1635 int const_reg = alloc_preg (cfg);
1636 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1637 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1645 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1648 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1651 int rank_reg = alloc_preg (cfg);
1652 int eclass_reg = alloc_preg (cfg);
1654 g_assert (!klass_inst);
1655 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1657 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1658 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1660 if (klass->cast_class == mono_defaults.object_class) {
1661 int parent_reg = alloc_preg (cfg);
1662 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1663 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1664 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1665 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1666 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1667 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1668 } else if (klass->cast_class == mono_defaults.enum_class) {
1669 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1670 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1671 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1673 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1674 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1677 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1678 /* Check that the object is a vector too */
1679 int bounds_reg = alloc_preg (cfg);
1680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1682 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1685 int idepth_reg = alloc_preg (cfg);
1686 int stypes_reg = alloc_preg (cfg);
1687 int stype = alloc_preg (cfg);
1689 mono_class_setup_supertypes (klass);
1691 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1694 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1697 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1698 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1703 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1705 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1709 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1713 g_assert (val == 0);
1718 if ((size <= 4) && (size <= align)) {
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1724 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1727 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1729 #if SIZEOF_REGISTER == 8
1731 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1737 val_reg = alloc_preg (cfg);
1739 if (SIZEOF_REGISTER == 8)
1740 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1742 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1745 /* This could be optimized further if neccesary */
1747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1754 #if !NO_UNALIGNED_ACCESS
1755 if (SIZEOF_REGISTER == 8) {
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1775 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1787 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1794 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1795 g_assert (size < 10000);
1798 /* This could be optimized further if neccesary */
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1809 #if !NO_UNALIGNED_ACCESS
1810 if (SIZEOF_REGISTER == 8) {
1812 cur_reg = alloc_preg (cfg);
1813 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1823 cur_reg = alloc_preg (cfg);
1824 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1825 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1839 cur_reg = alloc_preg (cfg);
1840 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1841 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1849 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1852 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1855 type = mini_get_basic_type_from_generic (gsctx, type);
1856 switch (type->type) {
1857 case MONO_TYPE_VOID:
1858 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1861 case MONO_TYPE_BOOLEAN:
1864 case MONO_TYPE_CHAR:
1867 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1871 case MONO_TYPE_FNPTR:
1872 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1873 case MONO_TYPE_CLASS:
1874 case MONO_TYPE_STRING:
1875 case MONO_TYPE_OBJECT:
1876 case MONO_TYPE_SZARRAY:
1877 case MONO_TYPE_ARRAY:
1878 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1881 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1884 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1885 case MONO_TYPE_VALUETYPE:
1886 if (type->data.klass->enumtype) {
1887 type = mono_class_enum_basetype (type->data.klass);
1890 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1891 case MONO_TYPE_TYPEDBYREF:
1892 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1893 case MONO_TYPE_GENERICINST:
1894 type = &type->data.generic_class->container_class->byval_arg;
1897 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1903 * target_type_is_incompatible:
1904 * @cfg: MonoCompile context
1906 * Check that the item @arg on the evaluation stack can be stored
1907 * in the target type (can be a local, or field, etc).
1908 * The cfg arg can be used to check if we need verification or just
1911 * Returns: non-0 value if arg can't be stored on a target.
1914 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1916 MonoType *simple_type;
1919 if (target->byref) {
1920 /* FIXME: check that the pointed to types match */
1921 if (arg->type == STACK_MP)
1922 return arg->klass != mono_class_from_mono_type (target);
1923 if (arg->type == STACK_PTR)
1928 simple_type = mono_type_get_underlying_type (target);
1929 switch (simple_type->type) {
1930 case MONO_TYPE_VOID:
1934 case MONO_TYPE_BOOLEAN:
1937 case MONO_TYPE_CHAR:
1940 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1944 /* STACK_MP is needed when setting pinned locals */
1945 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1950 case MONO_TYPE_FNPTR:
1952 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1953 * in native int. (#688008).
1955 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1958 case MONO_TYPE_CLASS:
1959 case MONO_TYPE_STRING:
1960 case MONO_TYPE_OBJECT:
1961 case MONO_TYPE_SZARRAY:
1962 case MONO_TYPE_ARRAY:
1963 if (arg->type != STACK_OBJ)
1965 /* FIXME: check type compatibility */
1969 if (arg->type != STACK_I8)
1974 if (arg->type != STACK_R8)
1977 case MONO_TYPE_VALUETYPE:
1978 if (arg->type != STACK_VTYPE)
1980 klass = mono_class_from_mono_type (simple_type);
1981 if (klass != arg->klass)
1984 case MONO_TYPE_TYPEDBYREF:
1985 if (arg->type != STACK_VTYPE)
1987 klass = mono_class_from_mono_type (simple_type);
1988 if (klass != arg->klass)
1991 case MONO_TYPE_GENERICINST:
1992 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1993 if (arg->type != STACK_VTYPE)
1995 klass = mono_class_from_mono_type (simple_type);
1996 if (klass != arg->klass)
2000 if (arg->type != STACK_OBJ)
2002 /* FIXME: check type compatibility */
2006 case MONO_TYPE_MVAR:
2007 /* FIXME: all the arguments must be references for now,
2008 * later look inside cfg and see if the arg num is
2009 * really a reference
2011 g_assert (cfg->generic_sharing_context);
2012 if (arg->type != STACK_OBJ)
2016 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2022 * Prepare arguments for passing to a function call.
2023 * Return a non-zero value if the arguments can't be passed to the given
2025 * The type checks are not yet complete and some conversions may need
2026 * casts on 32 or 64 bit architectures.
2028 * FIXME: implement this using target_type_is_incompatible ()
2031 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2033 MonoType *simple_type;
2037 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2041 for (i = 0; i < sig->param_count; ++i) {
2042 if (sig->params [i]->byref) {
2043 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2047 simple_type = sig->params [i];
2048 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2050 switch (simple_type->type) {
2051 case MONO_TYPE_VOID:
2056 case MONO_TYPE_BOOLEAN:
2059 case MONO_TYPE_CHAR:
2062 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2068 case MONO_TYPE_FNPTR:
2069 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2072 case MONO_TYPE_CLASS:
2073 case MONO_TYPE_STRING:
2074 case MONO_TYPE_OBJECT:
2075 case MONO_TYPE_SZARRAY:
2076 case MONO_TYPE_ARRAY:
2077 if (args [i]->type != STACK_OBJ)
2082 if (args [i]->type != STACK_I8)
2087 if (args [i]->type != STACK_R8)
2090 case MONO_TYPE_VALUETYPE:
2091 if (simple_type->data.klass->enumtype) {
2092 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2095 if (args [i]->type != STACK_VTYPE)
2098 case MONO_TYPE_TYPEDBYREF:
2099 if (args [i]->type != STACK_VTYPE)
2102 case MONO_TYPE_GENERICINST:
2103 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2107 g_error ("unknown type 0x%02x in check_call_signature",
2115 callvirt_to_call (int opcode)
2120 case OP_VOIDCALLVIRT:
2129 g_assert_not_reached ();
2136 callvirt_to_call_membase (int opcode)
2140 return OP_CALL_MEMBASE;
2141 case OP_VOIDCALLVIRT:
2142 return OP_VOIDCALL_MEMBASE;
2144 return OP_FCALL_MEMBASE;
2146 return OP_LCALL_MEMBASE;
2148 return OP_VCALL_MEMBASE;
2150 g_assert_not_reached ();
2156 #ifdef MONO_ARCH_HAVE_IMT
2158 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2162 if (COMPILE_LLVM (cfg)) {
2163 method_reg = alloc_preg (cfg);
2166 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2167 } else if (cfg->compile_aot) {
2168 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2171 MONO_INST_NEW (cfg, ins, OP_PCONST);
2172 ins->inst_p0 = call->method;
2173 ins->dreg = method_reg;
2174 MONO_ADD_INS (cfg->cbb, ins);
2178 call->imt_arg_reg = method_reg;
2180 #ifdef MONO_ARCH_IMT_REG
2181 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2183 /* Need this to keep the IMT arg alive */
2184 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2189 #ifdef MONO_ARCH_IMT_REG
2190 method_reg = alloc_preg (cfg);
2193 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2194 } else if (cfg->compile_aot) {
2195 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2198 MONO_INST_NEW (cfg, ins, OP_PCONST);
2199 ins->inst_p0 = call->method;
2200 ins->dreg = method_reg;
2201 MONO_ADD_INS (cfg->cbb, ins);
2204 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2206 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2211 static MonoJumpInfo *
2212 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2214 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2218 ji->data.target = target;
2223 inline static MonoCallInst *
2224 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2225 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2228 #ifdef MONO_ARCH_SOFT_FLOAT
2233 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2235 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2238 call->signature = sig;
2239 call->rgctx_reg = rgctx;
2241 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2244 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2245 call->vret_var = cfg->vret_addr;
2246 //g_assert_not_reached ();
2248 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2249 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2252 temp->backend.is_pinvoke = sig->pinvoke;
2255 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2256 * address of return value to increase optimization opportunities.
2257 * Before vtype decomposition, the dreg of the call ins itself represents the
2258 * fact the call modifies the return value. After decomposition, the call will
2259 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2260 * will be transformed into an LDADDR.
2262 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2263 loada->dreg = alloc_preg (cfg);
2264 loada->inst_p0 = temp;
2265 /* We reference the call too since call->dreg could change during optimization */
2266 loada->inst_p1 = call;
2267 MONO_ADD_INS (cfg->cbb, loada);
2269 call->inst.dreg = temp->dreg;
2271 call->vret_var = loada;
2272 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2273 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2275 #ifdef MONO_ARCH_SOFT_FLOAT
2276 if (COMPILE_SOFT_FLOAT (cfg)) {
2278 * If the call has a float argument, we would need to do an r8->r4 conversion using
2279 * an icall, but that cannot be done during the call sequence since it would clobber
2280 * the call registers + the stack. So we do it before emitting the call.
2282 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2284 MonoInst *in = call->args [i];
2286 if (i >= sig->hasthis)
2287 t = sig->params [i - sig->hasthis];
2289 t = &mono_defaults.int_class->byval_arg;
2290 t = mono_type_get_underlying_type (t);
2292 if (!t->byref && t->type == MONO_TYPE_R4) {
2293 MonoInst *iargs [1];
2297 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2299 /* The result will be in an int vreg */
2300 call->args [i] = conv;
2306 call->need_unbox_trampoline = unbox_trampoline;
2309 if (COMPILE_LLVM (cfg))
2310 mono_llvm_emit_call (cfg, call);
2312 mono_arch_emit_call (cfg, call);
2314 mono_arch_emit_call (cfg, call);
2317 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2318 cfg->flags |= MONO_CFG_HAS_CALLS;
2324 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2326 #ifdef MONO_ARCH_RGCTX_REG
2327 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2328 cfg->uses_rgctx_reg = TRUE;
2329 call->rgctx_reg = TRUE;
2331 call->rgctx_arg_reg = rgctx_reg;
2338 inline static MonoInst*
2339 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2345 rgctx_reg = mono_alloc_preg (cfg);
2346 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2349 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2351 call->inst.sreg1 = addr->dreg;
2353 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2356 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2358 return (MonoInst*)call;
2362 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2364 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2367 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2368 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2370 gboolean might_be_remote;
2371 gboolean virtual = this != NULL;
2372 gboolean enable_for_aot = TRUE;
2376 gboolean need_unbox_trampoline;
2379 rgctx_reg = mono_alloc_preg (cfg);
2380 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2383 if (method->string_ctor) {
2384 /* Create the real signature */
2385 /* FIXME: Cache these */
2386 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2387 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2392 context_used = mono_method_check_context_used (method);
2394 might_be_remote = this && sig->hasthis &&
2395 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2396 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2398 if (might_be_remote && context_used) {
2401 g_assert (cfg->generic_sharing_context);
2403 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2405 return mono_emit_calli (cfg, sig, args, addr, NULL);
2408 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2410 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2412 if (might_be_remote)
2413 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2415 call->method = method;
2416 call->inst.flags |= MONO_INST_HAS_METHOD;
2417 call->inst.inst_left = this;
2420 int vtable_reg, slot_reg, this_reg;
2422 this_reg = this->dreg;
2424 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2425 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2426 MonoInst *dummy_use;
2428 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2430 /* Make a call to delegate->invoke_impl */
2431 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2432 call->inst.inst_basereg = this_reg;
2433 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2434 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2436 /* We must emit a dummy use here because the delegate trampoline will
2437 replace the 'this' argument with the delegate target making this activation
2438 no longer a root for the delegate.
2439 This is an issue for delegates that target collectible code such as dynamic
2440 methods of GC'able assemblies.
2442 For a test case look into #667921.
2444 FIXME: a dummy use is not the best way to do it as the local register allocator
2445 will put it on a caller save register and spil it around the call.
2446 Ideally, we would either put it on a callee save register or only do the store part.
2448 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2450 return (MonoInst*)call;
2454 if ((!cfg->compile_aot || enable_for_aot) &&
2455 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2456 (MONO_METHOD_IS_FINAL (method) &&
2457 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2458 !(method->klass->marshalbyref && context_used)) {
2460 * the method is not virtual, we just need to ensure this is not null
2461 * and then we can call the method directly.
2463 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2465 * The check above ensures method is not gshared, this is needed since
2466 * gshared methods can't have wrappers.
2468 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2471 if (!method->string_ctor)
2472 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2474 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2475 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2477 * the method is virtual, but we can statically dispatch since either
2478 * it's class or the method itself are sealed.
2479 * But first we need to ensure it's not a null reference.
2481 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2483 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2485 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2487 vtable_reg = alloc_preg (cfg);
2488 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2489 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2491 #ifdef MONO_ARCH_HAVE_IMT
2493 guint32 imt_slot = mono_method_get_imt_slot (method);
2494 emit_imt_argument (cfg, call, imt_arg);
2495 slot_reg = vtable_reg;
2496 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2499 if (slot_reg == -1) {
2500 slot_reg = alloc_preg (cfg);
2501 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2502 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2505 slot_reg = vtable_reg;
2506 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2507 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2508 #ifdef MONO_ARCH_HAVE_IMT
2510 g_assert (mono_method_signature (method)->generic_param_count);
2511 emit_imt_argument (cfg, call, imt_arg);
2516 call->inst.sreg1 = slot_reg;
2517 call->virtual = TRUE;
2521 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2524 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2526 return (MonoInst*)call;
2530 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2532 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2536 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2543 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2546 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2548 return (MonoInst*)call;
2552 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2554 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2558 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2562 * mono_emit_abs_call:
2564 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2566 inline static MonoInst*
2567 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2568 MonoMethodSignature *sig, MonoInst **args)
2570 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2574 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2577 if (cfg->abs_patches == NULL)
2578 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2579 g_hash_table_insert (cfg->abs_patches, ji, ji);
2580 ins = mono_emit_native_call (cfg, ji, sig, args);
2581 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2586 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2588 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2589 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2593 * Native code might return non register sized integers
2594 * without initializing the upper bits.
2596 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2597 case OP_LOADI1_MEMBASE:
2598 widen_op = OP_ICONV_TO_I1;
2600 case OP_LOADU1_MEMBASE:
2601 widen_op = OP_ICONV_TO_U1;
2603 case OP_LOADI2_MEMBASE:
2604 widen_op = OP_ICONV_TO_I2;
2606 case OP_LOADU2_MEMBASE:
2607 widen_op = OP_ICONV_TO_U2;
2613 if (widen_op != -1) {
2614 int dreg = alloc_preg (cfg);
2617 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2618 widen->type = ins->type;
2628 get_memcpy_method (void)
2630 static MonoMethod *memcpy_method = NULL;
2631 if (!memcpy_method) {
2632 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2634 g_error ("Old corlib found. Install a new one");
2636 return memcpy_method;
2640 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2642 MonoClassField *field;
2643 gpointer iter = NULL;
2645 while ((field = mono_class_get_fields (klass, &iter))) {
2648 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2650 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2651 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2652 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2653 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2655 MonoClass *field_class = mono_class_from_mono_type (field->type);
2656 if (field_class->has_references)
2657 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2663 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2665 int card_table_shift_bits;
2666 gpointer card_table_mask;
2668 MonoInst *dummy_use;
2669 int nursery_shift_bits;
2670 size_t nursery_size;
2671 gboolean has_card_table_wb = FALSE;
2673 if (!cfg->gen_write_barriers)
2676 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2678 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2680 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2681 has_card_table_wb = TRUE;
2684 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2687 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2688 wbarrier->sreg1 = ptr->dreg;
2690 wbarrier->sreg2 = value->dreg;
2692 wbarrier->sreg2 = value_reg;
2693 MONO_ADD_INS (cfg->cbb, wbarrier);
2694 } else if (card_table) {
2695 int offset_reg = alloc_preg (cfg);
2696 int card_reg = alloc_preg (cfg);
2699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2700 if (card_table_mask)
2701 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2703 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2704 * IMM's larger than 32bits.
2706 if (cfg->compile_aot) {
2707 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2709 MONO_INST_NEW (cfg, ins, OP_PCONST);
2710 ins->inst_p0 = card_table;
2711 ins->dreg = card_reg;
2712 MONO_ADD_INS (cfg->cbb, ins);
2715 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2716 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2718 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2719 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2723 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2725 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2726 dummy_use->sreg1 = value_reg;
2727 MONO_ADD_INS (cfg->cbb, dummy_use);
2732 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2734 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2735 unsigned need_wb = 0;
2740 /*types with references can't have alignment smaller than sizeof(void*) */
2741 if (align < SIZEOF_VOID_P)
2744 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2745 if (size > 32 * SIZEOF_VOID_P)
2748 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2750 /* We don't unroll more than 5 stores to avoid code bloat. */
2751 if (size > 5 * SIZEOF_VOID_P) {
2752 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2753 size += (SIZEOF_VOID_P - 1);
2754 size &= ~(SIZEOF_VOID_P - 1);
2756 EMIT_NEW_ICONST (cfg, iargs [2], size);
2757 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2758 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2762 destreg = iargs [0]->dreg;
2763 srcreg = iargs [1]->dreg;
2766 dest_ptr_reg = alloc_preg (cfg);
2767 tmp_reg = alloc_preg (cfg);
2770 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2772 while (size >= SIZEOF_VOID_P) {
2773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2777 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2779 offset += SIZEOF_VOID_P;
2780 size -= SIZEOF_VOID_P;
2783 /*tmp += sizeof (void*)*/
2784 if (size >= SIZEOF_VOID_P) {
2785 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2786 MONO_ADD_INS (cfg->cbb, iargs [0]);
2790 /* Those cannot be references since size < sizeof (void*) */
2792 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2799 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2800 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2806 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2816 * Emit code to copy a valuetype of type @klass whose address is stored in
2817 * @src->dreg to memory whose address is stored at @dest->dreg.
2820 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2822 MonoInst *iargs [4];
2825 MonoMethod *memcpy_method;
2829 * This check breaks with spilled vars... need to handle it during verification anyway.
2830 * g_assert (klass && klass == src->klass && klass == dest->klass);
2834 n = mono_class_native_size (klass, &align);
2836 n = mono_class_value_size (klass, &align);
2838 /* if native is true there should be no references in the struct */
2839 if (cfg->gen_write_barriers && klass->has_references && !native) {
2840 /* Avoid barriers when storing to the stack */
2841 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2842 (dest->opcode == OP_LDADDR))) {
2843 int context_used = 0;
2848 if (cfg->generic_sharing_context)
2849 context_used = mono_class_check_context_used (klass);
2851 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2852 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2854 } else if (context_used) {
2855 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2857 if (cfg->compile_aot) {
2858 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2860 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2861 mono_class_compute_gc_descriptor (klass);
2865 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2870 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2871 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2872 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2876 EMIT_NEW_ICONST (cfg, iargs [2], n);
2878 memcpy_method = get_memcpy_method ();
2879 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2884 get_memset_method (void)
2886 static MonoMethod *memset_method = NULL;
2887 if (!memset_method) {
2888 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2890 g_error ("Old corlib found. Install a new one");
2892 return memset_method;
2896 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2898 MonoInst *iargs [3];
2901 MonoMethod *memset_method;
2903 /* FIXME: Optimize this for the case when dest is an LDADDR */
2905 mono_class_init (klass);
2906 n = mono_class_value_size (klass, &align);
2908 if (n <= sizeof (gpointer) * 5) {
2909 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2912 memset_method = get_memset_method ();
2914 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2915 EMIT_NEW_ICONST (cfg, iargs [2], n);
2916 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2921 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2923 MonoInst *this = NULL;
2925 g_assert (cfg->generic_sharing_context);
2927 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2928 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2929 !method->klass->valuetype)
2930 EMIT_NEW_ARGLOAD (cfg, this, 0);
2932 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2933 MonoInst *mrgctx_loc, *mrgctx_var;
2936 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2938 mrgctx_loc = mono_get_vtable_var (cfg);
2939 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2942 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2943 MonoInst *vtable_loc, *vtable_var;
2947 vtable_loc = mono_get_vtable_var (cfg);
2948 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2950 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2951 MonoInst *mrgctx_var = vtable_var;
2954 vtable_reg = alloc_preg (cfg);
2955 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2956 vtable_var->type = STACK_PTR;
2964 vtable_reg = alloc_preg (cfg);
2965 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2970 static MonoJumpInfoRgctxEntry *
2971 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2973 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2974 res->method = method;
2975 res->in_mrgctx = in_mrgctx;
2976 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2977 res->data->type = patch_type;
2978 res->data->data.target = patch_data;
2979 res->info_type = info_type;
2984 static inline MonoInst*
2985 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2987 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2991 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2992 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2994 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2995 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2997 return emit_rgctx_fetch (cfg, rgctx, entry);
3001 * emit_get_rgctx_method:
3003 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3004 * normal constants, else emit a load from the rgctx.
3007 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3008 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3010 if (!context_used) {
3013 switch (rgctx_type) {
3014 case MONO_RGCTX_INFO_METHOD:
3015 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3017 case MONO_RGCTX_INFO_METHOD_RGCTX:
3018 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3021 g_assert_not_reached ();
3024 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3025 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3027 return emit_rgctx_fetch (cfg, rgctx, entry);
3032 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3033 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3035 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3036 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3038 return emit_rgctx_fetch (cfg, rgctx, entry);
3042 * On return the caller must check @klass for load errors.
3045 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3047 MonoInst *vtable_arg;
3049 int context_used = 0;
3051 if (cfg->generic_sharing_context)
3052 context_used = mono_class_check_context_used (klass);
3055 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3056 klass, MONO_RGCTX_INFO_VTABLE);
3058 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3062 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3065 if (COMPILE_LLVM (cfg))
3066 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3068 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3069 #ifdef MONO_ARCH_VTABLE_REG
3070 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3071 cfg->uses_vtable_reg = TRUE;
3078 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3082 if (cfg->gen_seq_points && cfg->method == method) {
3083 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3084 MONO_ADD_INS (cfg->cbb, ins);
3089 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3091 if (mini_get_debug_options ()->better_cast_details) {
3092 int to_klass_reg = alloc_preg (cfg);
3093 int vtable_reg = alloc_preg (cfg);
3094 int klass_reg = alloc_preg (cfg);
3095 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3098 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3102 MONO_ADD_INS (cfg->cbb, tls_get);
3103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3104 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3106 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3107 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3108 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3113 reset_cast_details (MonoCompile *cfg)
3115 /* Reset the variables holding the cast details */
3116 if (mini_get_debug_options ()->better_cast_details) {
3117 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3119 MONO_ADD_INS (cfg->cbb, tls_get);
3120 /* It is enough to reset the from field */
3121 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3126 * On return the caller must check @array_class for load errors
3129 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3131 int vtable_reg = alloc_preg (cfg);
3132 int context_used = 0;
3134 if (cfg->generic_sharing_context)
3135 context_used = mono_class_check_context_used (array_class);
3137 save_cast_details (cfg, array_class, obj->dreg);
3139 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3141 if (cfg->opt & MONO_OPT_SHARED) {
3142 int class_reg = alloc_preg (cfg);
3143 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3144 if (cfg->compile_aot) {
3145 int klass_reg = alloc_preg (cfg);
3146 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3147 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3149 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3151 } else if (context_used) {
3152 MonoInst *vtable_ins;
3154 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3155 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3157 if (cfg->compile_aot) {
3161 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3163 vt_reg = alloc_preg (cfg);
3164 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3165 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3168 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3170 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3174 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3176 reset_cast_details (cfg);
3180 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3181 * generic code is generated.
3184 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3186 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3189 MonoInst *rgctx, *addr;
3191 /* FIXME: What if the class is shared? We might not
3192 have to get the address of the method from the
3194 addr = emit_get_rgctx_method (cfg, context_used, method,
3195 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3197 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3199 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3201 return mono_emit_method_call (cfg, method, &val, NULL);
3206 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3210 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3211 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3212 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3213 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3215 obj_reg = sp [0]->dreg;
3216 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3217 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3219 /* FIXME: generics */
3220 g_assert (klass->rank == 0);
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3224 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3227 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3230 MonoInst *element_class;
3232 /* This assertion is from the unboxcast insn */
3233 g_assert (klass->rank == 0);
3235 element_class = emit_get_rgctx_klass (cfg, context_used,
3236 klass->element_class, MONO_RGCTX_INFO_KLASS);
3238 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3239 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3241 save_cast_details (cfg, klass->element_class, obj_reg);
3242 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3243 reset_cast_details (cfg);
3246 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3247 MONO_ADD_INS (cfg->cbb, add);
3248 add->type = STACK_MP;
3255 * Returns NULL and set the cfg exception on error.
3258 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3260 MonoInst *iargs [2];
3266 MonoInst *iargs [2];
3269 FIXME: we cannot get managed_alloc here because we can't get
3270 the class's vtable (because it's not a closed class)
3272 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3273 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3276 if (cfg->opt & MONO_OPT_SHARED)
3277 rgctx_info = MONO_RGCTX_INFO_KLASS;
3279 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3280 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3282 if (cfg->opt & MONO_OPT_SHARED) {
3283 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3285 alloc_ftn = mono_object_new;
3288 alloc_ftn = mono_object_new_specific;
3291 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3294 if (cfg->opt & MONO_OPT_SHARED) {
3295 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3296 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3298 alloc_ftn = mono_object_new;
3299 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3300 /* This happens often in argument checking code, eg. throw new FooException... */
3301 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3302 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3303 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3305 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3306 MonoMethod *managed_alloc = NULL;
3310 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3311 cfg->exception_ptr = klass;
3315 #ifndef MONO_CROSS_COMPILE
3316 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3319 if (managed_alloc) {
3320 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3321 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3323 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3325 guint32 lw = vtable->klass->instance_size;
3326 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3327 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3328 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3331 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3335 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3339 * Returns NULL and set the cfg exception on error.
3342 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3344 MonoInst *alloc, *ins;
3346 if (mono_class_is_nullable (klass)) {
3347 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3350 /* FIXME: What if the class is shared? We might not
3351 have to get the method address from the RGCTX. */
3352 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3353 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3354 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3356 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3358 return mono_emit_method_call (cfg, method, &val, NULL);
3362 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3366 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3373 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3376 MonoGenericContainer *container;
3377 MonoGenericInst *ginst;
3379 if (klass->generic_class) {
3380 container = klass->generic_class->container_class->generic_container;
3381 ginst = klass->generic_class->context.class_inst;
3382 } else if (klass->generic_container && context_used) {
3383 container = klass->generic_container;
3384 ginst = container->context.class_inst;
3389 for (i = 0; i < container->type_argc; ++i) {
3391 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3393 type = ginst->type_argv [i];
3394 if (mini_type_is_reference (cfg, type))
3400 // FIXME: This doesn't work yet (class libs tests fail?)
3401 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3404 * Returns NULL and set the cfg exception on error.
3407 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3409 MonoBasicBlock *is_null_bb;
3410 int obj_reg = src->dreg;
3411 int vtable_reg = alloc_preg (cfg);
3412 MonoInst *klass_inst = NULL;
3417 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3418 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3419 MonoInst *cache_ins;
3421 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3426 /* klass - it's the second element of the cache entry*/
3427 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3430 args [2] = cache_ins;
3432 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3435 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3437 if (is_complex_isinst (klass)) {
3438 /* Complex case, handle by an icall */
3444 args [1] = klass_inst;
3446 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3448 /* Simple case, handled by the code below */
3452 NEW_BBLOCK (cfg, is_null_bb);
3454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3457 save_cast_details (cfg, klass, obj_reg);
3459 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3461 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3463 int klass_reg = alloc_preg (cfg);
3465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3467 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3468 /* the remoting code is broken, access the class for now */
3469 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3470 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3472 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3473 cfg->exception_ptr = klass;
3476 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3478 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3479 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3481 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3483 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3484 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3488 MONO_START_BB (cfg, is_null_bb);
3490 reset_cast_details (cfg);
3496 * Returns NULL and set the cfg exception on error.
3499 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3502 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3503 int obj_reg = src->dreg;
3504 int vtable_reg = alloc_preg (cfg);
3505 int res_reg = alloc_ireg_ref (cfg);
3506 MonoInst *klass_inst = NULL;
3511 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3512 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3513 MonoInst *cache_ins;
3515 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3520 /* klass - it's the second element of the cache entry*/
3521 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3524 args [2] = cache_ins;
3526 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3529 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3531 if (is_complex_isinst (klass)) {
3532 /* Complex case, handle by an icall */
3538 args [1] = klass_inst;
3540 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3542 /* Simple case, the code below can handle it */
3546 NEW_BBLOCK (cfg, is_null_bb);
3547 NEW_BBLOCK (cfg, false_bb);
3548 NEW_BBLOCK (cfg, end_bb);
3550 /* Do the assignment at the beginning, so the other assignment can be if converted */
3551 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3552 ins->type = STACK_OBJ;
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3560 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3561 g_assert (!context_used);
3562 /* the is_null_bb target simply copies the input register to the output */
3563 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3565 int klass_reg = alloc_preg (cfg);
3568 int rank_reg = alloc_preg (cfg);
3569 int eclass_reg = alloc_preg (cfg);
3571 g_assert (!context_used);
3572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3573 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3577 if (klass->cast_class == mono_defaults.object_class) {
3578 int parent_reg = alloc_preg (cfg);
3579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3580 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3581 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3582 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3583 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3584 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3585 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3586 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3587 } else if (klass->cast_class == mono_defaults.enum_class) {
3588 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3589 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3590 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3591 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3593 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3594 /* Check that the object is a vector too */
3595 int bounds_reg = alloc_preg (cfg);
3596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3598 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3601 /* the is_null_bb target simply copies the input register to the output */
3602 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3604 } else if (mono_class_is_nullable (klass)) {
3605 g_assert (!context_used);
3606 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3607 /* the is_null_bb target simply copies the input register to the output */
3608 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3610 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3611 g_assert (!context_used);
3612 /* the remoting code is broken, access the class for now */
3613 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3614 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3616 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3617 cfg->exception_ptr = klass;
3620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3626 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3629 /* the is_null_bb target simply copies the input register to the output */
3630 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3635 MONO_START_BB (cfg, false_bb);
3637 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3640 MONO_START_BB (cfg, is_null_bb);
3642 MONO_START_BB (cfg, end_bb);
3648 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3650 /* This opcode takes as input an object reference and a class, and returns:
3651 0) if the object is an instance of the class,
3652 1) if the object is not instance of the class,
3653 2) if the object is a proxy whose type cannot be determined */
3656 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3657 int obj_reg = src->dreg;
3658 int dreg = alloc_ireg (cfg);
3660 int klass_reg = alloc_preg (cfg);
3662 NEW_BBLOCK (cfg, true_bb);
3663 NEW_BBLOCK (cfg, false_bb);
3664 NEW_BBLOCK (cfg, false2_bb);
3665 NEW_BBLOCK (cfg, end_bb);
3666 NEW_BBLOCK (cfg, no_proxy_bb);
3668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3669 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3671 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3672 NEW_BBLOCK (cfg, interface_fail_bb);
3674 tmp_reg = alloc_preg (cfg);
3675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3676 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3677 MONO_START_BB (cfg, interface_fail_bb);
3678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3680 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3682 tmp_reg = alloc_preg (cfg);
3683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3685 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3687 tmp_reg = alloc_preg (cfg);
3688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3691 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3692 tmp_reg = alloc_preg (cfg);
3693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3694 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3696 tmp_reg = alloc_preg (cfg);
3697 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3698 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3699 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3701 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3702 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3704 MONO_START_BB (cfg, no_proxy_bb);
3706 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3709 MONO_START_BB (cfg, false_bb);
3711 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3714 MONO_START_BB (cfg, false2_bb);
3716 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3719 MONO_START_BB (cfg, true_bb);
3721 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3723 MONO_START_BB (cfg, end_bb);
3726 MONO_INST_NEW (cfg, ins, OP_ICONST);
3728 ins->type = STACK_I4;
3734 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3736 /* This opcode takes as input an object reference and a class, and returns:
3737 0) if the object is an instance of the class,
3738 1) if the object is a proxy whose type cannot be determined
3739 an InvalidCastException exception is thrown otherwhise*/
3742 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3743 int obj_reg = src->dreg;
3744 int dreg = alloc_ireg (cfg);
3745 int tmp_reg = alloc_preg (cfg);
3746 int klass_reg = alloc_preg (cfg);
3748 NEW_BBLOCK (cfg, end_bb);
3749 NEW_BBLOCK (cfg, ok_result_bb);
3751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3752 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3754 save_cast_details (cfg, klass, obj_reg);
3756 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3757 NEW_BBLOCK (cfg, interface_fail_bb);
3759 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3760 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3761 MONO_START_BB (cfg, interface_fail_bb);
3762 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3764 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3766 tmp_reg = alloc_preg (cfg);
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3769 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3771 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3772 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3775 NEW_BBLOCK (cfg, no_proxy_bb);
3777 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3778 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3779 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3781 tmp_reg = alloc_preg (cfg);
3782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3783 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3785 tmp_reg = alloc_preg (cfg);
3786 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3787 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3790 NEW_BBLOCK (cfg, fail_1_bb);
3792 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3794 MONO_START_BB (cfg, fail_1_bb);
3796 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3797 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3799 MONO_START_BB (cfg, no_proxy_bb);
3801 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3804 MONO_START_BB (cfg, ok_result_bb);
3806 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3808 MONO_START_BB (cfg, end_bb);
3811 MONO_INST_NEW (cfg, ins, OP_ICONST);
3813 ins->type = STACK_I4;
3819 * Returns NULL and set the cfg exception on error.
3821 static G_GNUC_UNUSED MonoInst*
3822 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3826 gpointer *trampoline;
3827 MonoInst *obj, *method_ins, *tramp_ins;
3831 obj = handle_alloc (cfg, klass, FALSE, 0);
3835 /* Inline the contents of mono_delegate_ctor */
3837 /* Set target field */
3838 /* Optimize away setting of NULL target */
3839 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3841 if (cfg->gen_write_barriers) {
3842 dreg = alloc_preg (cfg);
3843 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3844 emit_write_barrier (cfg, ptr, target, 0);
3848 /* Set method field */
3849 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3850 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3851 if (cfg->gen_write_barriers) {
3852 dreg = alloc_preg (cfg);
3853 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3854 emit_write_barrier (cfg, ptr, method_ins, 0);
3857 * To avoid looking up the compiled code belonging to the target method
3858 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3859 * store it, and we fill it after the method has been compiled.
3861 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3862 MonoInst *code_slot_ins;
3865 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3867 domain = mono_domain_get ();
3868 mono_domain_lock (domain);
3869 if (!domain_jit_info (domain)->method_code_hash)
3870 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3871 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3873 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3874 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3876 mono_domain_unlock (domain);
3878 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3883 /* Set invoke_impl field */
3884 if (cfg->compile_aot) {
3885 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3887 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
3888 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3890 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3892 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3898 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3900 MonoJitICallInfo *info;
3902 /* Need to register the icall so it gets an icall wrapper */
3903 info = mono_get_array_new_va_icall (rank);
3905 cfg->flags |= MONO_CFG_HAS_VARARGS;
3907 /* mono_array_new_va () needs a vararg calling convention */
3908 cfg->disable_llvm = TRUE;
3910 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3911 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3915 mono_emit_load_got_addr (MonoCompile *cfg)
3917 MonoInst *getaddr, *dummy_use;
3919 if (!cfg->got_var || cfg->got_var_allocated)
3922 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3923 getaddr->dreg = cfg->got_var->dreg;
3925 /* Add it to the start of the first bblock */
3926 if (cfg->bb_entry->code) {
3927 getaddr->next = cfg->bb_entry->code;
3928 cfg->bb_entry->code = getaddr;
3931 MONO_ADD_INS (cfg->bb_entry, getaddr);
3933 cfg->got_var_allocated = TRUE;
3936 * Add a dummy use to keep the got_var alive, since real uses might
3937 * only be generated by the back ends.
3938 * Add it to end_bblock, so the variable's lifetime covers the whole
3940 * It would be better to make the usage of the got var explicit in all
3941 * cases when the backend needs it (i.e. calls, throw etc.), so this
3942 * wouldn't be needed.
3944 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3945 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3948 static int inline_limit;
3949 static gboolean inline_limit_inited;
3952 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3954 MonoMethodHeaderSummary header;
3956 #ifdef MONO_ARCH_SOFT_FLOAT
3957 MonoMethodSignature *sig = mono_method_signature (method);
3961 if (cfg->generic_sharing_context)
3964 if (cfg->inline_depth > 10)
3967 #ifdef MONO_ARCH_HAVE_LMF_OPS
3968 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3969 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3970 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3975 if (!mono_method_get_header_summary (method, &header))
3978 /*runtime, icall and pinvoke are checked by summary call*/
3979 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3980 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3981 (method->klass->marshalbyref) ||
3985 /* also consider num_locals? */
3986 /* Do the size check early to avoid creating vtables */
3987 if (!inline_limit_inited) {
3988 if (getenv ("MONO_INLINELIMIT"))
3989 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3991 inline_limit = INLINE_LENGTH_LIMIT;
3992 inline_limit_inited = TRUE;
3994 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3998 * if we can initialize the class of the method right away, we do,
3999 * otherwise we don't allow inlining if the class needs initialization,
4000 * since it would mean inserting a call to mono_runtime_class_init()
4001 * inside the inlined code
4003 if (!(cfg->opt & MONO_OPT_SHARED)) {
4004 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4005 if (cfg->run_cctors && method->klass->has_cctor) {
4006 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4007 if (!method->klass->runtime_info)
4008 /* No vtable created yet */
4010 vtable = mono_class_vtable (cfg->domain, method->klass);
4013 /* This makes so that inline cannot trigger */
4014 /* .cctors: too many apps depend on them */
4015 /* running with a specific order... */
4016 if (! vtable->initialized)
4018 mono_runtime_class_init (vtable);
4020 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4021 if (!method->klass->runtime_info)
4022 /* No vtable created yet */
4024 vtable = mono_class_vtable (cfg->domain, method->klass);
4027 if (!vtable->initialized)
4032 * If we're compiling for shared code
4033 * the cctor will need to be run at aot method load time, for example,
4034 * or at the end of the compilation of the inlining method.
4036 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4041 * CAS - do not inline methods with declarative security
4042 * Note: this has to be before any possible return TRUE;
4044 if (mono_method_has_declsec (method))
4047 #ifdef MONO_ARCH_SOFT_FLOAT
4049 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4051 for (i = 0; i < sig->param_count; ++i)
4052 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4060 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4062 if (vtable->initialized && !cfg->compile_aot)
4065 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4068 if (!mono_class_needs_cctor_run (vtable->klass, method))
4071 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4072 /* The initialization is already done before the method is called */
4079 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4083 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4085 mono_class_init (klass);
4086 size = mono_class_array_element_size (klass);
4088 mult_reg = alloc_preg (cfg);
4089 array_reg = arr->dreg;
4090 index_reg = index->dreg;
4092 #if SIZEOF_REGISTER == 8
4093 /* The array reg is 64 bits but the index reg is only 32 */
4094 if (COMPILE_LLVM (cfg)) {
4096 index2_reg = index_reg;
4098 index2_reg = alloc_preg (cfg);
4099 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4102 if (index->type == STACK_I8) {
4103 index2_reg = alloc_preg (cfg);
4104 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4106 index2_reg = index_reg;
4111 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4113 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4114 if (size == 1 || size == 2 || size == 4 || size == 8) {
4115 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4117 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4118 ins->klass = mono_class_get_element_class (klass);
4119 ins->type = STACK_MP;
4125 add_reg = alloc_ireg_mp (cfg);
4127 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4128 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4129 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4130 ins->klass = mono_class_get_element_class (klass);
4131 ins->type = STACK_MP;
4132 MONO_ADD_INS (cfg->cbb, ins);
4137 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4139 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4141 int bounds_reg = alloc_preg (cfg);
4142 int add_reg = alloc_ireg_mp (cfg);
4143 int mult_reg = alloc_preg (cfg);
4144 int mult2_reg = alloc_preg (cfg);
4145 int low1_reg = alloc_preg (cfg);
4146 int low2_reg = alloc_preg (cfg);
4147 int high1_reg = alloc_preg (cfg);
4148 int high2_reg = alloc_preg (cfg);
4149 int realidx1_reg = alloc_preg (cfg);
4150 int realidx2_reg = alloc_preg (cfg);
4151 int sum_reg = alloc_preg (cfg);
4152 int index1, index2, tmpreg;
4156 mono_class_init (klass);
4157 size = mono_class_array_element_size (klass);
4159 index1 = index_ins1->dreg;
4160 index2 = index_ins2->dreg;
4162 #if SIZEOF_REGISTER == 8
4163 /* The array reg is 64 bits but the index reg is only 32 */
4164 if (COMPILE_LLVM (cfg)) {
4167 tmpreg = alloc_preg (cfg);
4168 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4170 tmpreg = alloc_preg (cfg);
4171 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4175 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4179 /* range checking */
4180 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4181 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4183 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4184 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4185 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4186 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4187 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4188 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4189 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4191 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4192 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4193 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4194 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4195 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4196 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4197 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4199 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4200 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4201 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4202 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4203 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4205 ins->type = STACK_MP;
4207 MONO_ADD_INS (cfg->cbb, ins);
4214 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4218 MonoMethod *addr_method;
4221 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4224 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4226 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4227 /* emit_ldelema_2 depends on OP_LMUL */
4228 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4229 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4233 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4234 addr_method = mono_marshal_get_array_address (rank, element_size);
4235 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4240 static MonoBreakPolicy
4241 always_insert_breakpoint (MonoMethod *method)
4243 return MONO_BREAK_POLICY_ALWAYS;
4246 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4249 * mono_set_break_policy:
4250 * policy_callback: the new callback function
4252 * Allow embedders to decide wherther to actually obey breakpoint instructions
4253 * (both break IL instructions and Debugger.Break () method calls), for example
4254 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4255 * untrusted or semi-trusted code.
4257 * @policy_callback will be called every time a break point instruction needs to
4258 * be inserted with the method argument being the method that calls Debugger.Break()
4259 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4260 * if it wants the breakpoint to not be effective in the given method.
4261 * #MONO_BREAK_POLICY_ALWAYS is the default.
4264 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4266 if (policy_callback)
4267 break_policy_func = policy_callback;
4269 break_policy_func = always_insert_breakpoint;
4273 should_insert_brekpoint (MonoMethod *method) {
4274 switch (break_policy_func (method)) {
4275 case MONO_BREAK_POLICY_ALWAYS:
4277 case MONO_BREAK_POLICY_NEVER:
4279 case MONO_BREAK_POLICY_ON_DBG:
4280 return mono_debug_using_mono_debugger ();
4282 g_warning ("Incorrect value returned from break policy callback");
4287 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4289 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4291 MonoInst *addr, *store, *load;
4292 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4294 /* the bounds check is already done by the callers */
4295 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4297 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4298 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4299 if (mini_type_is_reference (cfg, fsig->params [2]))
4300 emit_write_barrier (cfg, addr, load, -1);
4302 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4303 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4310 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4312 return mini_type_is_reference (cfg, &klass->byval_arg);
4316 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4318 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4319 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4320 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4321 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4322 MonoInst *iargs [3];
4325 mono_class_setup_vtable (obj_array);
4326 g_assert (helper->slot);
4328 if (sp [0]->type != STACK_OBJ)
4330 if (sp [2]->type != STACK_OBJ)
4337 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4340 if (sp [1]->opcode == OP_ICONST) {
4341 int array_reg = sp [0]->dreg;
4342 int index_reg = sp [1]->dreg;
4343 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4346 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4347 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4349 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4350 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4351 if (generic_class_is_reference_type (cfg, klass))
4352 emit_write_barrier (cfg, addr, sp [2], -1);
4359 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4364 eklass = mono_class_from_mono_type (fsig->params [2]);
4366 eklass = mono_class_from_mono_type (fsig->ret);
4370 return emit_array_store (cfg, eklass, args, FALSE);
4372 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4373 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4379 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4381 MonoInst *ins = NULL;
4382 #ifdef MONO_ARCH_SIMD_INTRINSICS
4383 if (cfg->opt & MONO_OPT_SIMD) {
4384 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4394 emit_memory_barrier (MonoCompile *cfg, int kind)
4396 MonoInst *ins = NULL;
4397 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4398 MONO_ADD_INS (cfg->cbb, ins);
4399 ins->backend.memory_barrier_kind = kind;
4405 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4407 MonoInst *ins = NULL;
4410 /* The LLVM backend supports these intrinsics */
4411 if (cmethod->klass == mono_defaults.math_class) {
4412 if (strcmp (cmethod->name, "Sin") == 0) {
4414 } else if (strcmp (cmethod->name, "Cos") == 0) {
4416 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4418 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4423 MONO_INST_NEW (cfg, ins, opcode);
4424 ins->type = STACK_R8;
4425 ins->dreg = mono_alloc_freg (cfg);
4426 ins->sreg1 = args [0]->dreg;
4427 MONO_ADD_INS (cfg->cbb, ins);
4431 if (cfg->opt & MONO_OPT_CMOV) {
4432 if (strcmp (cmethod->name, "Min") == 0) {
4433 if (fsig->params [0]->type == MONO_TYPE_I4)
4435 if (fsig->params [0]->type == MONO_TYPE_U4)
4436 opcode = OP_IMIN_UN;
4437 else if (fsig->params [0]->type == MONO_TYPE_I8)
4439 else if (fsig->params [0]->type == MONO_TYPE_U8)
4440 opcode = OP_LMIN_UN;
4441 } else if (strcmp (cmethod->name, "Max") == 0) {
4442 if (fsig->params [0]->type == MONO_TYPE_I4)
4444 if (fsig->params [0]->type == MONO_TYPE_U4)
4445 opcode = OP_IMAX_UN;
4446 else if (fsig->params [0]->type == MONO_TYPE_I8)
4448 else if (fsig->params [0]->type == MONO_TYPE_U8)
4449 opcode = OP_LMAX_UN;
4454 MONO_INST_NEW (cfg, ins, opcode);
4455 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4456 ins->dreg = mono_alloc_ireg (cfg);
4457 ins->sreg1 = args [0]->dreg;
4458 ins->sreg2 = args [1]->dreg;
4459 MONO_ADD_INS (cfg->cbb, ins);
4467 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4469 if (cmethod->klass == mono_defaults.array_class) {
4470 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4471 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4472 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4473 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4480 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4482 MonoInst *ins = NULL;
4484 static MonoClass *runtime_helpers_class = NULL;
4485 if (! runtime_helpers_class)
4486 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4487 "System.Runtime.CompilerServices", "RuntimeHelpers");
4489 if (cmethod->klass == mono_defaults.string_class) {
4490 if (strcmp (cmethod->name, "get_Chars") == 0) {
4491 int dreg = alloc_ireg (cfg);
4492 int index_reg = alloc_preg (cfg);
4493 int mult_reg = alloc_preg (cfg);
4494 int add_reg = alloc_preg (cfg);
4496 #if SIZEOF_REGISTER == 8
4497 /* The array reg is 64 bits but the index reg is only 32 */
4498 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4500 index_reg = args [1]->dreg;
4502 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4504 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4505 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4506 add_reg = ins->dreg;
4507 /* Avoid a warning */
4509 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4513 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4514 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4515 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4517 type_from_op (ins, NULL, NULL);
4519 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4520 int dreg = alloc_ireg (cfg);
4521 /* Decompose later to allow more optimizations */
4522 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4523 ins->type = STACK_I4;
4524 ins->flags |= MONO_INST_FAULT;
4525 cfg->cbb->has_array_access = TRUE;
4526 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4529 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4530 int mult_reg = alloc_preg (cfg);
4531 int add_reg = alloc_preg (cfg);
4533 /* The corlib functions check for oob already. */
4534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4535 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4536 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4537 return cfg->cbb->last_ins;
4540 } else if (cmethod->klass == mono_defaults.object_class) {
4542 if (strcmp (cmethod->name, "GetType") == 0) {
4543 int dreg = alloc_ireg_ref (cfg);
4544 int vt_reg = alloc_preg (cfg);
4545 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4546 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4547 type_from_op (ins, NULL, NULL);
4550 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4551 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4552 int dreg = alloc_ireg (cfg);
4553 int t1 = alloc_ireg (cfg);
4555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4556 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4557 ins->type = STACK_I4;
4561 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4562 MONO_INST_NEW (cfg, ins, OP_NOP);
4563 MONO_ADD_INS (cfg->cbb, ins);
4567 } else if (cmethod->klass == mono_defaults.array_class) {
4568 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4569 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4571 #ifndef MONO_BIG_ARRAYS
4573 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4576 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4577 int dreg = alloc_ireg (cfg);
4578 int bounds_reg = alloc_ireg_mp (cfg);
4579 MonoBasicBlock *end_bb, *szarray_bb;
4580 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4582 NEW_BBLOCK (cfg, end_bb);
4583 NEW_BBLOCK (cfg, szarray_bb);
4585 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4586 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4589 /* Non-szarray case */
4591 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4592 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4594 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4595 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4597 MONO_START_BB (cfg, szarray_bb);
4600 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4601 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4603 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4604 MONO_START_BB (cfg, end_bb);
4606 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4607 ins->type = STACK_I4;
4613 if (cmethod->name [0] != 'g')
4616 if (strcmp (cmethod->name, "get_Rank") == 0) {
4617 int dreg = alloc_ireg (cfg);
4618 int vtable_reg = alloc_preg (cfg);
4619 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4620 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4621 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4622 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4623 type_from_op (ins, NULL, NULL);
4626 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4627 int dreg = alloc_ireg (cfg);
4629 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4630 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4631 type_from_op (ins, NULL, NULL);
4636 } else if (cmethod->klass == runtime_helpers_class) {
4638 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4639 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4643 } else if (cmethod->klass == mono_defaults.thread_class) {
4644 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4645 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4646 MONO_ADD_INS (cfg->cbb, ins);
4648 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4649 return emit_memory_barrier (cfg, FullBarrier);
4651 } else if (cmethod->klass == mono_defaults.monitor_class) {
4653 /* FIXME this should be integrated to the check below once we support the trampoline version */
4654 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4655 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4656 MonoMethod *fast_method = NULL;
4658 /* Avoid infinite recursion */
4659 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4662 fast_method = mono_monitor_get_fast_path (cmethod);
4666 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4670 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4671 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4674 if (COMPILE_LLVM (cfg)) {
4676 * Pass the argument normally, the LLVM backend will handle the
4677 * calling convention problems.
4679 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4681 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4682 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4683 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4684 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4687 return (MonoInst*)call;
4688 } else if (strcmp (cmethod->name, "Exit") == 0) {
4691 if (COMPILE_LLVM (cfg)) {
4692 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4694 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4695 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4696 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4697 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4700 return (MonoInst*)call;
4702 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4704 MonoMethod *fast_method = NULL;
4706 /* Avoid infinite recursion */
4707 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4708 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4709 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4712 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4713 strcmp (cmethod->name, "Exit") == 0)
4714 fast_method = mono_monitor_get_fast_path (cmethod);
4718 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4721 } else if (cmethod->klass->image == mono_defaults.corlib &&
4722 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4723 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4726 #if SIZEOF_REGISTER == 8
4727 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4728 /* 64 bit reads are already atomic */
4729 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4730 ins->dreg = mono_alloc_preg (cfg);
4731 ins->inst_basereg = args [0]->dreg;
4732 ins->inst_offset = 0;
4733 MONO_ADD_INS (cfg->cbb, ins);
4737 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4738 if (strcmp (cmethod->name, "Increment") == 0) {
4739 MonoInst *ins_iconst;
4742 if (fsig->params [0]->type == MONO_TYPE_I4)
4743 opcode = OP_ATOMIC_ADD_NEW_I4;
4744 #if SIZEOF_REGISTER == 8
4745 else if (fsig->params [0]->type == MONO_TYPE_I8)
4746 opcode = OP_ATOMIC_ADD_NEW_I8;
4749 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4750 ins_iconst->inst_c0 = 1;
4751 ins_iconst->dreg = mono_alloc_ireg (cfg);
4752 MONO_ADD_INS (cfg->cbb, ins_iconst);
4754 MONO_INST_NEW (cfg, ins, opcode);
4755 ins->dreg = mono_alloc_ireg (cfg);
4756 ins->inst_basereg = args [0]->dreg;
4757 ins->inst_offset = 0;
4758 ins->sreg2 = ins_iconst->dreg;
4759 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4760 MONO_ADD_INS (cfg->cbb, ins);
4762 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4763 MonoInst *ins_iconst;
4766 if (fsig->params [0]->type == MONO_TYPE_I4)
4767 opcode = OP_ATOMIC_ADD_NEW_I4;
4768 #if SIZEOF_REGISTER == 8
4769 else if (fsig->params [0]->type == MONO_TYPE_I8)
4770 opcode = OP_ATOMIC_ADD_NEW_I8;
4773 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4774 ins_iconst->inst_c0 = -1;
4775 ins_iconst->dreg = mono_alloc_ireg (cfg);
4776 MONO_ADD_INS (cfg->cbb, ins_iconst);
4778 MONO_INST_NEW (cfg, ins, opcode);
4779 ins->dreg = mono_alloc_ireg (cfg);
4780 ins->inst_basereg = args [0]->dreg;
4781 ins->inst_offset = 0;
4782 ins->sreg2 = ins_iconst->dreg;
4783 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4784 MONO_ADD_INS (cfg->cbb, ins);
4786 } else if (strcmp (cmethod->name, "Add") == 0) {
4789 if (fsig->params [0]->type == MONO_TYPE_I4)
4790 opcode = OP_ATOMIC_ADD_NEW_I4;
4791 #if SIZEOF_REGISTER == 8
4792 else if (fsig->params [0]->type == MONO_TYPE_I8)
4793 opcode = OP_ATOMIC_ADD_NEW_I8;
4797 MONO_INST_NEW (cfg, ins, opcode);
4798 ins->dreg = mono_alloc_ireg (cfg);
4799 ins->inst_basereg = args [0]->dreg;
4800 ins->inst_offset = 0;
4801 ins->sreg2 = args [1]->dreg;
4802 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4803 MONO_ADD_INS (cfg->cbb, ins);
4806 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4808 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4809 if (strcmp (cmethod->name, "Exchange") == 0) {
4811 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4813 if (fsig->params [0]->type == MONO_TYPE_I4)
4814 opcode = OP_ATOMIC_EXCHANGE_I4;
4815 #if SIZEOF_REGISTER == 8
4816 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4817 (fsig->params [0]->type == MONO_TYPE_I))
4818 opcode = OP_ATOMIC_EXCHANGE_I8;
4820 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4821 opcode = OP_ATOMIC_EXCHANGE_I4;
4826 MONO_INST_NEW (cfg, ins, opcode);
4827 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4828 ins->inst_basereg = args [0]->dreg;
4829 ins->inst_offset = 0;
4830 ins->sreg2 = args [1]->dreg;
4831 MONO_ADD_INS (cfg->cbb, ins);
4833 switch (fsig->params [0]->type) {
4835 ins->type = STACK_I4;
4839 ins->type = STACK_I8;
4841 case MONO_TYPE_OBJECT:
4842 ins->type = STACK_OBJ;
4845 g_assert_not_reached ();
4848 if (cfg->gen_write_barriers && is_ref)
4849 emit_write_barrier (cfg, args [0], args [1], -1);
4851 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4853 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4854 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4856 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4857 if (fsig->params [1]->type == MONO_TYPE_I4)
4859 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4860 size = sizeof (gpointer);
4861 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4864 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4865 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4866 ins->sreg1 = args [0]->dreg;
4867 ins->sreg2 = args [1]->dreg;
4868 ins->sreg3 = args [2]->dreg;
4869 ins->type = STACK_I4;
4870 MONO_ADD_INS (cfg->cbb, ins);
4871 } else if (size == 8) {
4872 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4873 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4874 ins->sreg1 = args [0]->dreg;
4875 ins->sreg2 = args [1]->dreg;
4876 ins->sreg3 = args [2]->dreg;
4877 ins->type = STACK_I8;
4878 MONO_ADD_INS (cfg->cbb, ins);
4880 /* g_assert_not_reached (); */
4882 if (cfg->gen_write_barriers && is_ref)
4883 emit_write_barrier (cfg, args [0], args [1], -1);
4885 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4889 } else if (cmethod->klass->image == mono_defaults.corlib) {
4890 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4891 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4892 if (should_insert_brekpoint (cfg->method)) {
4893 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4895 MONO_INST_NEW (cfg, ins, OP_NOP);
4896 MONO_ADD_INS (cfg->cbb, ins);
4900 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4901 && strcmp (cmethod->klass->name, "Environment") == 0) {
4903 EMIT_NEW_ICONST (cfg, ins, 1);
4905 EMIT_NEW_ICONST (cfg, ins, 0);
4909 } else if (cmethod->klass == mono_defaults.math_class) {
4911 * There is general branches code for Min/Max, but it does not work for
4913 * http://everything2.com/?node_id=1051618
4917 #ifdef MONO_ARCH_SIMD_INTRINSICS
4918 if (cfg->opt & MONO_OPT_SIMD) {
4919 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4925 if (COMPILE_LLVM (cfg)) {
4926 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
4931 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4935 * This entry point could be used later for arbitrary method
4938 inline static MonoInst*
4939 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4940 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4942 if (method->klass == mono_defaults.string_class) {
4943 /* managed string allocation support */
4944 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4945 MonoInst *iargs [2];
4946 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4947 MonoMethod *managed_alloc = NULL;
4949 g_assert (vtable); /*Should not fail since it System.String*/
4950 #ifndef MONO_CROSS_COMPILE
4951 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4955 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4956 iargs [1] = args [0];
4957 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4964 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4966 MonoInst *store, *temp;
4969 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4970 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4973 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4974 * would be different than the MonoInst's used to represent arguments, and
4975 * the ldelema implementation can't deal with that.
4976 * Solution: When ldelema is used on an inline argument, create a var for
4977 * it, emit ldelema on that var, and emit the saving code below in
4978 * inline_method () if needed.
4980 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4981 cfg->args [i] = temp;
4982 /* This uses cfg->args [i] which is set by the preceeding line */
4983 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4984 store->cil_code = sp [0]->cil_code;
4989 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4990 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4992 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4994 check_inline_called_method_name_limit (MonoMethod *called_method)
4997 static char *limit = NULL;
4999 if (limit == NULL) {
5000 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5002 if (limit_string != NULL)
5003 limit = limit_string;
5005 limit = (char *) "";
5008 if (limit [0] != '\0') {
5009 char *called_method_name = mono_method_full_name (called_method, TRUE);
5011 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5012 g_free (called_method_name);
5014 //return (strncmp_result <= 0);
5015 return (strncmp_result == 0);
5022 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5024 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5027 static char *limit = NULL;
5029 if (limit == NULL) {
5030 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5031 if (limit_string != NULL) {
5032 limit = limit_string;
5034 limit = (char *) "";
5038 if (limit [0] != '\0') {
5039 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5041 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5042 g_free (caller_method_name);
5044 //return (strncmp_result <= 0);
5045 return (strncmp_result == 0);
5053 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5055 static double r8_0 = 0.0;
5058 switch (rvar->type) {
5060 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5063 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5068 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5071 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5072 ins->type = STACK_R8;
5073 ins->inst_p0 = (void*)&r8_0;
5074 ins->dreg = rvar->dreg;
5075 MONO_ADD_INS (cfg->cbb, ins);
5078 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5081 g_assert_not_reached ();
5086 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5087 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5089 MonoInst *ins, *rvar = NULL;
5090 MonoMethodHeader *cheader;
5091 MonoBasicBlock *ebblock, *sbblock;
5093 MonoMethod *prev_inlined_method;
5094 MonoInst **prev_locals, **prev_args;
5095 MonoType **prev_arg_types;
5096 guint prev_real_offset;
5097 GHashTable *prev_cbb_hash;
5098 MonoBasicBlock **prev_cil_offset_to_bb;
5099 MonoBasicBlock *prev_cbb;
5100 unsigned char* prev_cil_start;
5101 guint32 prev_cil_offset_to_bb_len;
5102 MonoMethod *prev_current_method;
5103 MonoGenericContext *prev_generic_context;
5104 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5106 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5108 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5109 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5112 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5113 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5117 if (cfg->verbose_level > 2)
5118 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5120 if (!cmethod->inline_info) {
5121 cfg->stat_inlineable_methods++;
5122 cmethod->inline_info = 1;
5125 /* allocate local variables */
5126 cheader = mono_method_get_header (cmethod);
5128 if (cheader == NULL || mono_loader_get_last_error ()) {
5129 MonoLoaderError *error = mono_loader_get_last_error ();
5132 mono_metadata_free_mh (cheader);
5133 if (inline_always && error)
5134 mono_cfg_set_exception (cfg, error->exception_type);
5136 mono_loader_clear_error ();
5140 /*Must verify before creating locals as it can cause the JIT to assert.*/
5141 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5142 mono_metadata_free_mh (cheader);
5146 /* allocate space to store the return value */
5147 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5148 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5151 prev_locals = cfg->locals;
5152 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5153 for (i = 0; i < cheader->num_locals; ++i)
5154 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5156 /* allocate start and end blocks */
5157 /* This is needed so if the inline is aborted, we can clean up */
5158 NEW_BBLOCK (cfg, sbblock);
5159 sbblock->real_offset = real_offset;
5161 NEW_BBLOCK (cfg, ebblock);
5162 ebblock->block_num = cfg->num_bblocks++;
5163 ebblock->real_offset = real_offset;
5165 prev_args = cfg->args;
5166 prev_arg_types = cfg->arg_types;
5167 prev_inlined_method = cfg->inlined_method;
5168 cfg->inlined_method = cmethod;
5169 cfg->ret_var_set = FALSE;
5170 cfg->inline_depth ++;
5171 prev_real_offset = cfg->real_offset;
5172 prev_cbb_hash = cfg->cbb_hash;
5173 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5174 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5175 prev_cil_start = cfg->cil_start;
5176 prev_cbb = cfg->cbb;
5177 prev_current_method = cfg->current_method;
5178 prev_generic_context = cfg->generic_context;
5179 prev_ret_var_set = cfg->ret_var_set;
5181 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5184 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5186 ret_var_set = cfg->ret_var_set;
5188 cfg->inlined_method = prev_inlined_method;
5189 cfg->real_offset = prev_real_offset;
5190 cfg->cbb_hash = prev_cbb_hash;
5191 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5192 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5193 cfg->cil_start = prev_cil_start;
5194 cfg->locals = prev_locals;
5195 cfg->args = prev_args;
5196 cfg->arg_types = prev_arg_types;
5197 cfg->current_method = prev_current_method;
5198 cfg->generic_context = prev_generic_context;
5199 cfg->ret_var_set = prev_ret_var_set;
5200 cfg->inline_depth --;
5202 if ((costs >= 0 && costs < 60) || inline_always) {
5203 if (cfg->verbose_level > 2)
5204 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5206 cfg->stat_inlined_methods++;
5208 /* always add some code to avoid block split failures */
5209 MONO_INST_NEW (cfg, ins, OP_NOP);
5210 MONO_ADD_INS (prev_cbb, ins);
5212 prev_cbb->next_bb = sbblock;
5213 link_bblock (cfg, prev_cbb, sbblock);
5216 * Get rid of the begin and end bblocks if possible to aid local
5219 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5221 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5222 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5224 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5225 MonoBasicBlock *prev = ebblock->in_bb [0];
5226 mono_merge_basic_blocks (cfg, prev, ebblock);
5228 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5229 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5230 cfg->cbb = prev_cbb;
5234 * Its possible that the rvar is set in some prev bblock, but not in others.
5240 for (i = 0; i < ebblock->in_count; ++i) {
5241 bb = ebblock->in_bb [i];
5243 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5246 emit_init_rvar (cfg, rvar, fsig->ret);
5256 * If the inlined method contains only a throw, then the ret var is not
5257 * set, so set it to a dummy value.
5260 emit_init_rvar (cfg, rvar, fsig->ret);
5262 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5265 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5268 if (cfg->verbose_level > 2)
5269 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5270 cfg->exception_type = MONO_EXCEPTION_NONE;
5271 mono_loader_clear_error ();
5273 /* This gets rid of the newly added bblocks */
5274 cfg->cbb = prev_cbb;
5276 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5281 * Some of these comments may well be out-of-date.
5282 * Design decisions: we do a single pass over the IL code (and we do bblock
5283 * splitting/merging in the few cases when it's required: a back jump to an IL
5284 * address that was not already seen as bblock starting point).
5285 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5286 * Complex operations are decomposed in simpler ones right away. We need to let the
5287 * arch-specific code peek and poke inside this process somehow (except when the
5288 * optimizations can take advantage of the full semantic info of coarse opcodes).
5289 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5290 * MonoInst->opcode initially is the IL opcode or some simplification of that
5291 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5292 * opcode with value bigger than OP_LAST.
5293 * At this point the IR can be handed over to an interpreter, a dumb code generator
5294 * or to the optimizing code generator that will translate it to SSA form.
5296 * Profiling directed optimizations.
5297 * We may compile by default with few or no optimizations and instrument the code
5298 * or the user may indicate what methods to optimize the most either in a config file
5299 * or through repeated runs where the compiler applies offline the optimizations to
5300 * each method and then decides if it was worth it.
5303 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5304 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5305 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5306 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5307 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5308 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5309 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5310 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5312 /* offset from br.s -> br like opcodes */
5313 #define BIG_BRANCH_OFFSET 13
5316 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5318 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5320 return b == NULL || b == bb;
5324 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5326 unsigned char *ip = start;
5327 unsigned char *target;
5330 MonoBasicBlock *bblock;
5331 const MonoOpcode *opcode;
5334 cli_addr = ip - start;
5335 i = mono_opcode_value ((const guint8 **)&ip, end);
5338 opcode = &mono_opcodes [i];
5339 switch (opcode->argument) {
5340 case MonoInlineNone:
5343 case MonoInlineString:
5344 case MonoInlineType:
5345 case MonoInlineField:
5346 case MonoInlineMethod:
5349 case MonoShortInlineR:
5356 case MonoShortInlineVar:
5357 case MonoShortInlineI:
5360 case MonoShortInlineBrTarget:
5361 target = start + cli_addr + 2 + (signed char)ip [1];
5362 GET_BBLOCK (cfg, bblock, target);
5365 GET_BBLOCK (cfg, bblock, ip);
5367 case MonoInlineBrTarget:
5368 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5369 GET_BBLOCK (cfg, bblock, target);
5372 GET_BBLOCK (cfg, bblock, ip);
5374 case MonoInlineSwitch: {
5375 guint32 n = read32 (ip + 1);
5378 cli_addr += 5 + 4 * n;
5379 target = start + cli_addr;
5380 GET_BBLOCK (cfg, bblock, target);
5382 for (j = 0; j < n; ++j) {
5383 target = start + cli_addr + (gint32)read32 (ip);
5384 GET_BBLOCK (cfg, bblock, target);
5394 g_assert_not_reached ();
5397 if (i == CEE_THROW) {
5398 unsigned char *bb_start = ip - 1;
5400 /* Find the start of the bblock containing the throw */
5402 while ((bb_start >= start) && !bblock) {
5403 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5407 bblock->out_of_line = 1;
5416 static inline MonoMethod *
5417 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5421 if (m->wrapper_type != MONO_WRAPPER_NONE)
5422 return mono_method_get_wrapper_data (m, token);
5424 method = mono_get_method_full (m->klass->image, token, klass, context);
5429 static inline MonoMethod *
5430 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5432 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5434 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5440 static inline MonoClass*
5441 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5445 if (method->wrapper_type != MONO_WRAPPER_NONE)
5446 klass = mono_method_get_wrapper_data (method, token);
5448 klass = mono_class_get_full (method->klass->image, token, context);
5450 mono_class_init (klass);
5455 * Returns TRUE if the JIT should abort inlining because "callee"
5456 * is influenced by security attributes.
5459 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5463 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5467 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5468 if (result == MONO_JIT_SECURITY_OK)
5471 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5472 /* Generate code to throw a SecurityException before the actual call/link */
5473 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5476 NEW_ICONST (cfg, args [0], 4);
5477 NEW_METHODCONST (cfg, args [1], caller);
5478 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5479 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5480 /* don't hide previous results */
5481 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5482 cfg->exception_data = result;
5490 throw_exception (void)
5492 static MonoMethod *method = NULL;
5495 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5496 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5503 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5505 MonoMethod *thrower = throw_exception ();
5508 EMIT_NEW_PCONST (cfg, args [0], ex);
5509 mono_emit_method_call (cfg, thrower, args, NULL);
5513 * Return the original method is a wrapper is specified. We can only access
5514 * the custom attributes from the original method.
5517 get_original_method (MonoMethod *method)
5519 if (method->wrapper_type == MONO_WRAPPER_NONE)
5522 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5523 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5526 /* in other cases we need to find the original method */
5527 return mono_marshal_method_from_wrapper (method);
5531 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5532 MonoBasicBlock *bblock, unsigned char *ip)
5534 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5535 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5537 emit_throw_exception (cfg, ex);
5541 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5542 MonoBasicBlock *bblock, unsigned char *ip)
5544 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5545 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5547 emit_throw_exception (cfg, ex);
5551 * Check that the IL instructions at ip are the array initialization
5552 * sequence and return the pointer to the data and the size.
5555 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5558 * newarr[System.Int32]
5560 * ldtoken field valuetype ...
5561 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5563 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5564 guint32 token = read32 (ip + 7);
5565 guint32 field_token = read32 (ip + 2);
5566 guint32 field_index = field_token & 0xffffff;
5568 const char *data_ptr;
5570 MonoMethod *cmethod;
5571 MonoClass *dummy_class;
5572 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5578 *out_field_token = field_token;
5580 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5583 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5585 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5586 case MONO_TYPE_BOOLEAN:
5590 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5591 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5592 case MONO_TYPE_CHAR:
5602 return NULL; /* stupid ARM FP swapped format */
5612 if (size > mono_type_size (field->type, &dummy_align))
5615 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5616 if (!method->klass->image->dynamic) {
5617 field_index = read32 (ip + 2) & 0xffffff;
5618 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5619 data_ptr = mono_image_rva_map (method->klass->image, rva);
5620 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5621 /* for aot code we do the lookup on load */
5622 if (aot && data_ptr)
5623 return GUINT_TO_POINTER (rva);
5625 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5627 data_ptr = mono_field_get_data (field);
5635 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5637 char *method_fname = mono_method_full_name (method, TRUE);
5639 MonoMethodHeader *header = mono_method_get_header (method);
5641 if (header->code_size == 0)
5642 method_code = g_strdup ("method body is empty.");
5644 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5645 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5646 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5647 g_free (method_fname);
5648 g_free (method_code);
5649 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5653 set_exception_object (MonoCompile *cfg, MonoException *exception)
5655 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5656 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5657 cfg->exception_ptr = exception;
5661 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5664 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5665 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5666 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5667 /* Optimize reg-reg moves away */
5669 * Can't optimize other opcodes, since sp[0] might point to
5670 * the last ins of a decomposed opcode.
5672 sp [0]->dreg = (cfg)->locals [n]->dreg;
5674 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5679 * ldloca inhibits many optimizations so try to get rid of it in common
5682 static inline unsigned char *
5683 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5692 local = read16 (ip + 2);
5696 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5697 gboolean skip = FALSE;
5699 /* From the INITOBJ case */
5700 token = read32 (ip + 2);
5701 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5702 CHECK_TYPELOAD (klass);
5703 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5704 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5705 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5706 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5719 is_exception_class (MonoClass *class)
5722 if (class == mono_defaults.exception_class)
5724 class = class->parent;
5730 * is_jit_optimizer_disabled:
5732 * Determine whenever M's assembly has a DebuggableAttribute with the
5733 * IsJITOptimizerDisabled flag set.
5736 is_jit_optimizer_disabled (MonoMethod *m)
5738 MonoAssembly *ass = m->klass->image->assembly;
5739 MonoCustomAttrInfo* attrs;
5740 static MonoClass *klass;
5742 gboolean val = FALSE;
5745 if (ass->jit_optimizer_disabled_inited)
5746 return ass->jit_optimizer_disabled;
5749 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5752 ass->jit_optimizer_disabled = FALSE;
5753 mono_memory_barrier ();
5754 ass->jit_optimizer_disabled_inited = TRUE;
5758 attrs = mono_custom_attrs_from_assembly (ass);
5760 for (i = 0; i < attrs->num_attrs; ++i) {
5761 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5764 MonoMethodSignature *sig;
5766 if (!attr->ctor || attr->ctor->klass != klass)
5768 /* Decode the attribute. See reflection.c */
5769 len = attr->data_size;
5770 p = (const char*)attr->data;
5771 g_assert (read16 (p) == 0x0001);
5774 // FIXME: Support named parameters
5775 sig = mono_method_signature (attr->ctor);
5776 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5778 /* Two boolean arguments */
5782 mono_custom_attrs_free (attrs);
5785 ass->jit_optimizer_disabled = val;
5786 mono_memory_barrier ();
5787 ass->jit_optimizer_disabled_inited = TRUE;
5793 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5795 gboolean supported_tail_call;
5798 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5799 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5801 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5804 for (i = 0; i < fsig->param_count; ++i) {
5805 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5806 /* These can point to the current method's stack */
5807 supported_tail_call = FALSE;
5809 if (fsig->hasthis && cmethod->klass->valuetype)
5810 /* this might point to the current method's stack */
5811 supported_tail_call = FALSE;
5812 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5813 supported_tail_call = FALSE;
5814 if (cfg->method->save_lmf)
5815 supported_tail_call = FALSE;
5816 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5817 supported_tail_call = FALSE;
5819 /* Debugging support */
5821 if (supported_tail_call) {
5822 if (!mono_debug_count ())
5823 supported_tail_call = FALSE;
5827 return supported_tail_call;
5830 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5831 * it to the thread local value based on the tls_offset field. Every other kind of access to
5832 * the field causes an assert.
5835 is_magic_tls_access (MonoClassField *field)
5837 if (strcmp (field->name, "tlsdata"))
5839 if (strcmp (field->parent->name, "ThreadLocal`1"))
5841 return field->parent->image == mono_defaults.corlib;
5844 /* emits the code needed to access a managed tls var (like ThreadStatic)
5845 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5846 * pointer for the current thread.
5847 * Returns the MonoInst* representing the address of the tls var.
5850 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5853 int static_data_reg, array_reg, dreg;
5854 int offset2_reg, idx_reg;
5855 // inlined access to the tls data
5856 // idx = (offset >> 24) - 1;
5857 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5858 static_data_reg = alloc_ireg (cfg);
5859 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5860 idx_reg = alloc_ireg (cfg);
5861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5862 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5863 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5864 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5865 array_reg = alloc_ireg (cfg);
5866 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5867 offset2_reg = alloc_ireg (cfg);
5868 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5869 dreg = alloc_ireg (cfg);
5870 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5875 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5876 * this address is cached per-method in cached_tls_addr.
5879 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5881 MonoInst *load, *addr, *temp, *store, *thread_ins;
5882 MonoClassField *offset_field;
5884 if (*cached_tls_addr) {
5885 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5888 thread_ins = mono_get_thread_intrinsic (cfg);
5889 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5891 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5893 MONO_ADD_INS (cfg->cbb, thread_ins);
5895 MonoMethod *thread_method;
5896 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5897 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5899 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5900 addr->klass = mono_class_from_mono_type (tls_field->type);
5901 addr->type = STACK_MP;
5902 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5903 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5905 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5910 * mono_method_to_ir:
5912 * Translate the .net IL into linear IR.
5915 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5916 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5917 guint inline_offset, gboolean is_virtual_call)
5920 MonoInst *ins, **sp, **stack_start;
5921 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5922 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5923 MonoMethod *cmethod, *method_definition;
5924 MonoInst **arg_array;
5925 MonoMethodHeader *header;
5927 guint32 token, ins_flag;
5929 MonoClass *constrained_call = NULL;
5930 unsigned char *ip, *end, *target, *err_pos;
5931 static double r8_0 = 0.0;
5932 MonoMethodSignature *sig;
5933 MonoGenericContext *generic_context = NULL;
5934 MonoGenericContainer *generic_container = NULL;
5935 MonoType **param_types;
5936 int i, n, start_new_bblock, dreg;
5937 int num_calls = 0, inline_costs = 0;
5938 int breakpoint_id = 0;
5940 MonoBoolean security, pinvoke;
5941 MonoSecurityManager* secman = NULL;
5942 MonoDeclSecurityActions actions;
5943 GSList *class_inits = NULL;
5944 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5946 gboolean init_locals, seq_points, skip_dead_blocks;
5947 gboolean disable_inline, sym_seq_points = FALSE;
5948 MonoInst *cached_tls_addr = NULL;
5949 MonoDebugMethodInfo *minfo;
5950 MonoBitSet *seq_point_locs = NULL;
5952 disable_inline = is_jit_optimizer_disabled (method);
5954 /* serialization and xdomain stuff may need access to private fields and methods */
5955 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5956 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5957 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5958 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5959 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5960 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5962 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5964 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5965 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5966 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5967 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5968 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5970 image = method->klass->image;
5971 header = mono_method_get_header (method);
5973 MonoLoaderError *error;
5975 if ((error = mono_loader_get_last_error ())) {
5976 mono_cfg_set_exception (cfg, error->exception_type);
5978 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5979 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5981 goto exception_exit;
5983 generic_container = mono_method_get_generic_container (method);
5984 sig = mono_method_signature (method);
5985 num_args = sig->hasthis + sig->param_count;
5986 ip = (unsigned char*)header->code;
5987 cfg->cil_start = ip;
5988 end = ip + header->code_size;
5989 cfg->stat_cil_code_size += header->code_size;
5990 init_locals = header->init_locals;
5992 seq_points = cfg->gen_seq_points && cfg->method == method;
5994 if (cfg->gen_seq_points && cfg->method == method) {
5995 minfo = mono_debug_lookup_method (method);
5997 int i, n_il_offsets;
6001 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6002 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6003 sym_seq_points = TRUE;
6004 for (i = 0; i < n_il_offsets; ++i) {
6005 if (il_offsets [i] < header->code_size)
6006 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6012 * Methods without init_locals set could cause asserts in various passes
6017 method_definition = method;
6018 while (method_definition->is_inflated) {
6019 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6020 method_definition = imethod->declaring;
6023 /* SkipVerification is not allowed if core-clr is enabled */
6024 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6026 dont_verify_stloc = TRUE;
6029 if (mono_debug_using_mono_debugger ())
6030 cfg->keep_cil_nops = TRUE;
6032 if (sig->is_inflated)
6033 generic_context = mono_method_get_context (method);
6034 else if (generic_container)
6035 generic_context = &generic_container->context;
6036 cfg->generic_context = generic_context;
6038 if (!cfg->generic_sharing_context)
6039 g_assert (!sig->has_type_parameters);
6041 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6042 g_assert (method->is_inflated);
6043 g_assert (mono_method_get_context (method)->method_inst);
6045 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6046 g_assert (sig->generic_param_count);
6048 if (cfg->method == method) {
6049 cfg->real_offset = 0;
6051 cfg->real_offset = inline_offset;
6054 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6055 cfg->cil_offset_to_bb_len = header->code_size;
6057 cfg->current_method = method;
6059 if (cfg->verbose_level > 2)
6060 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6062 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6064 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6065 for (n = 0; n < sig->param_count; ++n)
6066 param_types [n + sig->hasthis] = sig->params [n];
6067 cfg->arg_types = param_types;
6069 dont_inline = g_list_prepend (dont_inline, method);
6070 if (cfg->method == method) {
6072 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6073 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6076 NEW_BBLOCK (cfg, start_bblock);
6077 cfg->bb_entry = start_bblock;
6078 start_bblock->cil_code = NULL;
6079 start_bblock->cil_length = 0;
6080 #if defined(__native_client_codegen__)
6081 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6082 ins->dreg = alloc_dreg (cfg, STACK_I4);
6083 MONO_ADD_INS (start_bblock, ins);
6087 NEW_BBLOCK (cfg, end_bblock);
6088 cfg->bb_exit = end_bblock;
6089 end_bblock->cil_code = NULL;
6090 end_bblock->cil_length = 0;
6091 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6092 g_assert (cfg->num_bblocks == 2);
6094 arg_array = cfg->args;
6096 if (header->num_clauses) {
6097 cfg->spvars = g_hash_table_new (NULL, NULL);
6098 cfg->exvars = g_hash_table_new (NULL, NULL);
6100 /* handle exception clauses */
6101 for (i = 0; i < header->num_clauses; ++i) {
6102 MonoBasicBlock *try_bb;
6103 MonoExceptionClause *clause = &header->clauses [i];
6104 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6105 try_bb->real_offset = clause->try_offset;
6106 try_bb->try_start = TRUE;
6107 try_bb->region = ((i + 1) << 8) | clause->flags;
6108 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6109 tblock->real_offset = clause->handler_offset;
6110 tblock->flags |= BB_EXCEPTION_HANDLER;
6112 link_bblock (cfg, try_bb, tblock);
6114 if (*(ip + clause->handler_offset) == CEE_POP)
6115 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6117 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6118 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6119 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6120 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6121 MONO_ADD_INS (tblock, ins);
6123 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6124 /* finally clauses already have a seq point */
6125 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6126 MONO_ADD_INS (tblock, ins);
6129 /* todo: is a fault block unsafe to optimize? */
6130 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6131 tblock->flags |= BB_EXCEPTION_UNSAFE;
6135 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6137 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6139 /* catch and filter blocks get the exception object on the stack */
6140 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6141 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6142 MonoInst *dummy_use;
6144 /* mostly like handle_stack_args (), but just sets the input args */
6145 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6146 tblock->in_scount = 1;
6147 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6148 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6151 * Add a dummy use for the exvar so its liveness info will be
6155 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6157 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6158 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6159 tblock->flags |= BB_EXCEPTION_HANDLER;
6160 tblock->real_offset = clause->data.filter_offset;
6161 tblock->in_scount = 1;
6162 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6163 /* The filter block shares the exvar with the handler block */
6164 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6165 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6166 MONO_ADD_INS (tblock, ins);
6170 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6171 clause->data.catch_class &&
6172 cfg->generic_sharing_context &&
6173 mono_class_check_context_used (clause->data.catch_class)) {
6175 * In shared generic code with catch
6176 * clauses containing type variables
6177 * the exception handling code has to
6178 * be able to get to the rgctx.
6179 * Therefore we have to make sure that
6180 * the vtable/mrgctx argument (for
6181 * static or generic methods) or the
6182 * "this" argument (for non-static
6183 * methods) are live.
6185 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6186 mini_method_get_context (method)->method_inst ||
6187 method->klass->valuetype) {
6188 mono_get_vtable_var (cfg);
6190 MonoInst *dummy_use;
6192 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6197 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6198 cfg->cbb = start_bblock;
6199 cfg->args = arg_array;
6200 mono_save_args (cfg, sig, inline_args);
6203 /* FIRST CODE BLOCK */
6204 NEW_BBLOCK (cfg, bblock);
6205 bblock->cil_code = ip;
6209 ADD_BBLOCK (cfg, bblock);
6211 if (cfg->method == method) {
6212 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6213 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6214 MONO_INST_NEW (cfg, ins, OP_BREAK);
6215 MONO_ADD_INS (bblock, ins);
6219 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6220 secman = mono_security_manager_get_methods ();
6222 security = (secman && mono_method_has_declsec (method));
6223 /* at this point having security doesn't mean we have any code to generate */
6224 if (security && (cfg->method == method)) {
6225 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6226 * And we do not want to enter the next section (with allocation) if we
6227 * have nothing to generate */
6228 security = mono_declsec_get_demands (method, &actions);
6231 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6232 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6234 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6235 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6236 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6238 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6239 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6243 mono_custom_attrs_free (custom);
6246 custom = mono_custom_attrs_from_class (wrapped->klass);
6247 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6251 mono_custom_attrs_free (custom);
6254 /* not a P/Invoke after all */
6259 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6260 /* we use a separate basic block for the initialization code */
6261 NEW_BBLOCK (cfg, init_localsbb);
6262 cfg->bb_init = init_localsbb;
6263 init_localsbb->real_offset = cfg->real_offset;
6264 start_bblock->next_bb = init_localsbb;
6265 init_localsbb->next_bb = bblock;
6266 link_bblock (cfg, start_bblock, init_localsbb);
6267 link_bblock (cfg, init_localsbb, bblock);
6269 cfg->cbb = init_localsbb;
6271 start_bblock->next_bb = bblock;
6272 link_bblock (cfg, start_bblock, bblock);
6275 /* at this point we know, if security is TRUE, that some code needs to be generated */
6276 if (security && (cfg->method == method)) {
6279 cfg->stat_cas_demand_generation++;
6281 if (actions.demand.blob) {
6282 /* Add code for SecurityAction.Demand */
6283 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6284 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6285 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6286 mono_emit_method_call (cfg, secman->demand, args, NULL);
6288 if (actions.noncasdemand.blob) {
6289 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6290 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6291 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6292 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6293 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6294 mono_emit_method_call (cfg, secman->demand, args, NULL);
6296 if (actions.demandchoice.blob) {
6297 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6298 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6299 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6300 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6301 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6305 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6307 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6310 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6311 /* check if this is native code, e.g. an icall or a p/invoke */
6312 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6313 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6315 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6316 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6318 /* if this ia a native call then it can only be JITted from platform code */
6319 if ((icall || pinvk) && method->klass && method->klass->image) {
6320 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6321 MonoException *ex = icall ? mono_get_exception_security () :
6322 mono_get_exception_method_access ();
6323 emit_throw_exception (cfg, ex);
6330 if (header->code_size == 0)
6333 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6338 if (cfg->method == method)
6339 mono_debug_init_method (cfg, bblock, breakpoint_id);
6341 for (n = 0; n < header->num_locals; ++n) {
6342 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6347 /* We force the vtable variable here for all shared methods
6348 for the possibility that they might show up in a stack
6349 trace where their exact instantiation is needed. */
6350 if (cfg->generic_sharing_context && method == cfg->method) {
6351 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6352 mini_method_get_context (method)->method_inst ||
6353 method->klass->valuetype) {
6354 mono_get_vtable_var (cfg);
6356 /* FIXME: Is there a better way to do this?
6357 We need the variable live for the duration
6358 of the whole method. */
6359 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6363 /* add a check for this != NULL to inlined methods */
6364 if (is_virtual_call) {
6367 NEW_ARGLOAD (cfg, arg_ins, 0);
6368 MONO_ADD_INS (cfg->cbb, arg_ins);
6369 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6372 skip_dead_blocks = !dont_verify;
6373 if (skip_dead_blocks) {
6374 original_bb = bb = mono_basic_block_split (method, &error);
6375 if (!mono_error_ok (&error)) {
6376 mono_error_cleanup (&error);
6382 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6383 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6386 start_new_bblock = 0;
6389 if (cfg->method == method)
6390 cfg->real_offset = ip - header->code;
6392 cfg->real_offset = inline_offset;
6397 if (start_new_bblock) {
6398 bblock->cil_length = ip - bblock->cil_code;
6399 if (start_new_bblock == 2) {
6400 g_assert (ip == tblock->cil_code);
6402 GET_BBLOCK (cfg, tblock, ip);
6404 bblock->next_bb = tblock;
6407 start_new_bblock = 0;
6408 for (i = 0; i < bblock->in_scount; ++i) {
6409 if (cfg->verbose_level > 3)
6410 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6411 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6415 g_slist_free (class_inits);
6418 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6419 link_bblock (cfg, bblock, tblock);
6420 if (sp != stack_start) {
6421 handle_stack_args (cfg, stack_start, sp - stack_start);
6423 CHECK_UNVERIFIABLE (cfg);
6425 bblock->next_bb = tblock;
6428 for (i = 0; i < bblock->in_scount; ++i) {
6429 if (cfg->verbose_level > 3)
6430 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6431 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6434 g_slist_free (class_inits);
6439 if (skip_dead_blocks) {
6440 int ip_offset = ip - header->code;
6442 if (ip_offset == bb->end)
6446 int op_size = mono_opcode_size (ip, end);
6447 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6449 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6451 if (ip_offset + op_size == bb->end) {
6452 MONO_INST_NEW (cfg, ins, OP_NOP);
6453 MONO_ADD_INS (bblock, ins);
6454 start_new_bblock = 1;
6462 * Sequence points are points where the debugger can place a breakpoint.
6463 * Currently, we generate these automatically at points where the IL
6466 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6468 * Make methods interruptable at the beginning, and at the targets of
6469 * backward branches.
6470 * Also, do this at the start of every bblock in methods with clauses too,
6471 * to be able to handle instructions with inprecise control flow like
6473 * Backward branches are handled at the end of method-to-ir ().
6475 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6477 /* Avoid sequence points on empty IL like .volatile */
6478 // FIXME: Enable this
6479 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6480 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6481 MONO_ADD_INS (cfg->cbb, ins);
6484 bblock->real_offset = cfg->real_offset;
6486 if ((cfg->method == method) && cfg->coverage_info) {
6487 guint32 cil_offset = ip - header->code;
6488 cfg->coverage_info->data [cil_offset].cil_code = ip;
6490 /* TODO: Use an increment here */
6491 #if defined(TARGET_X86)
6492 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6493 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6495 MONO_ADD_INS (cfg->cbb, ins);
6497 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6498 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6502 if (cfg->verbose_level > 3)
6503 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6507 if (seq_points && !sym_seq_points && sp != stack_start) {
6509 * The C# compiler uses these nops to notify the JIT that it should
6510 * insert seq points.
6512 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6513 MONO_ADD_INS (cfg->cbb, ins);
6515 if (cfg->keep_cil_nops)
6516 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6518 MONO_INST_NEW (cfg, ins, OP_NOP);
6520 MONO_ADD_INS (bblock, ins);
6523 if (should_insert_brekpoint (cfg->method)) {
6524 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6526 MONO_INST_NEW (cfg, ins, OP_NOP);
6529 MONO_ADD_INS (bblock, ins);
6535 CHECK_STACK_OVF (1);
6536 n = (*ip)-CEE_LDARG_0;
6538 EMIT_NEW_ARGLOAD (cfg, ins, n);
6546 CHECK_STACK_OVF (1);
6547 n = (*ip)-CEE_LDLOC_0;
6549 EMIT_NEW_LOCLOAD (cfg, ins, n);
6558 n = (*ip)-CEE_STLOC_0;
6561 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6563 emit_stloc_ir (cfg, sp, header, n);
6570 CHECK_STACK_OVF (1);
6573 EMIT_NEW_ARGLOAD (cfg, ins, n);
6579 CHECK_STACK_OVF (1);
6582 NEW_ARGLOADA (cfg, ins, n);
6583 MONO_ADD_INS (cfg->cbb, ins);
6593 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6595 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6600 CHECK_STACK_OVF (1);
6603 EMIT_NEW_LOCLOAD (cfg, ins, n);
6607 case CEE_LDLOCA_S: {
6608 unsigned char *tmp_ip;
6610 CHECK_STACK_OVF (1);
6611 CHECK_LOCAL (ip [1]);
6613 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6619 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6628 CHECK_LOCAL (ip [1]);
6629 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6631 emit_stloc_ir (cfg, sp, header, ip [1]);
6636 CHECK_STACK_OVF (1);
6637 EMIT_NEW_PCONST (cfg, ins, NULL);
6638 ins->type = STACK_OBJ;
6643 CHECK_STACK_OVF (1);
6644 EMIT_NEW_ICONST (cfg, ins, -1);
6657 CHECK_STACK_OVF (1);
6658 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6664 CHECK_STACK_OVF (1);
6666 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6672 CHECK_STACK_OVF (1);
6673 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6679 CHECK_STACK_OVF (1);
6680 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6681 ins->type = STACK_I8;
6682 ins->dreg = alloc_dreg (cfg, STACK_I8);
6684 ins->inst_l = (gint64)read64 (ip);
6685 MONO_ADD_INS (bblock, ins);
6691 gboolean use_aotconst = FALSE;
6693 #ifdef TARGET_POWERPC
6694 /* FIXME: Clean this up */
6695 if (cfg->compile_aot)
6696 use_aotconst = TRUE;
6699 /* FIXME: we should really allocate this only late in the compilation process */
6700 f = mono_domain_alloc (cfg->domain, sizeof (float));
6702 CHECK_STACK_OVF (1);
6708 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6710 dreg = alloc_freg (cfg);
6711 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6712 ins->type = STACK_R8;
6714 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6715 ins->type = STACK_R8;
6716 ins->dreg = alloc_dreg (cfg, STACK_R8);
6718 MONO_ADD_INS (bblock, ins);
6728 gboolean use_aotconst = FALSE;
6730 #ifdef TARGET_POWERPC
6731 /* FIXME: Clean this up */
6732 if (cfg->compile_aot)
6733 use_aotconst = TRUE;
6736 /* FIXME: we should really allocate this only late in the compilation process */
6737 d = mono_domain_alloc (cfg->domain, sizeof (double));
6739 CHECK_STACK_OVF (1);
6745 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6747 dreg = alloc_freg (cfg);
6748 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6749 ins->type = STACK_R8;
6751 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6752 ins->type = STACK_R8;
6753 ins->dreg = alloc_dreg (cfg, STACK_R8);
6755 MONO_ADD_INS (bblock, ins);
6764 MonoInst *temp, *store;
6766 CHECK_STACK_OVF (1);
6770 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6771 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6773 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6776 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6789 if (sp [0]->type == STACK_R8)
6790 /* we need to pop the value from the x86 FP stack */
6791 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6797 INLINE_FAILURE ("jmp");
6800 if (stack_start != sp)
6802 token = read32 (ip + 1);
6803 /* FIXME: check the signature matches */
6804 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6806 if (!cmethod || mono_loader_get_last_error ())
6809 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6810 GENERIC_SHARING_FAILURE (CEE_JMP);
6812 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6813 CHECK_CFG_EXCEPTION;
6815 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6817 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6820 /* Handle tail calls similarly to calls */
6821 n = fsig->param_count + fsig->hasthis;
6823 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6824 call->method = cmethod;
6825 call->tail_call = TRUE;
6826 call->signature = mono_method_signature (cmethod);
6827 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6828 call->inst.inst_p0 = cmethod;
6829 for (i = 0; i < n; ++i)
6830 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6832 mono_arch_emit_call (cfg, call);
6833 MONO_ADD_INS (bblock, (MonoInst*)call);
6836 for (i = 0; i < num_args; ++i)
6837 /* Prevent arguments from being optimized away */
6838 arg_array [i]->flags |= MONO_INST_VOLATILE;
6840 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6841 ins = (MonoInst*)call;
6842 ins->inst_p0 = cmethod;
6843 MONO_ADD_INS (bblock, ins);
6847 start_new_bblock = 1;
6852 case CEE_CALLVIRT: {
6853 MonoInst *addr = NULL;
6854 MonoMethodSignature *fsig = NULL;
6856 int virtual = *ip == CEE_CALLVIRT;
6857 int calli = *ip == CEE_CALLI;
6858 gboolean pass_imt_from_rgctx = FALSE;
6859 MonoInst *imt_arg = NULL;
6860 gboolean pass_vtable = FALSE;
6861 gboolean pass_mrgctx = FALSE;
6862 MonoInst *vtable_arg = NULL;
6863 gboolean check_this = FALSE;
6864 gboolean supported_tail_call = FALSE;
6865 gboolean need_seq_point = FALSE;
6868 token = read32 (ip + 1);
6875 if (method->wrapper_type != MONO_WRAPPER_NONE)
6876 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6878 fsig = mono_metadata_parse_signature (image, token);
6880 n = fsig->param_count + fsig->hasthis;
6882 if (method->dynamic && fsig->pinvoke) {
6886 * This is a call through a function pointer using a pinvoke
6887 * signature. Have to create a wrapper and call that instead.
6888 * FIXME: This is very slow, need to create a wrapper at JIT time
6889 * instead based on the signature.
6891 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6892 EMIT_NEW_PCONST (cfg, args [1], fsig);
6894 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6897 MonoMethod *cil_method;
6899 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6900 if (constrained_call && cfg->verbose_level > 2)
6901 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6902 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6903 cil_method = cmethod;
6904 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6905 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6906 cfg->generic_sharing_context)) {
6907 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6909 } else if (constrained_call) {
6910 if (cfg->verbose_level > 2)
6911 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6913 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6915 * This is needed since get_method_constrained can't find
6916 * the method in klass representing a type var.
6917 * The type var is guaranteed to be a reference type in this
6920 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6921 cil_method = cmethod;
6922 g_assert (!cmethod->klass->valuetype);
6924 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6927 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6928 cil_method = cmethod;
6931 if (!cmethod || mono_loader_get_last_error ())
6933 if (!dont_verify && !cfg->skip_visibility) {
6934 MonoMethod *target_method = cil_method;
6935 if (method->is_inflated) {
6936 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6938 if (!mono_method_can_access_method (method_definition, target_method) &&
6939 !mono_method_can_access_method (method, cil_method))
6940 METHOD_ACCESS_FAILURE;
6943 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6944 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6946 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6947 /* MS.NET seems to silently convert this to a callvirt */
6952 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6953 * converts to a callvirt.
6955 * tests/bug-515884.il is an example of this behavior
6957 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6958 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6959 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6963 if (!cmethod->klass->inited)
6964 if (!mono_class_init (cmethod->klass))
6965 TYPE_LOAD_ERROR (cmethod->klass);
6967 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6968 mini_class_is_system_array (cmethod->klass)) {
6969 array_rank = cmethod->klass->rank;
6970 fsig = mono_method_signature (cmethod);
6972 fsig = mono_method_signature (cmethod);
6977 if (fsig->pinvoke) {
6978 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6979 check_for_pending_exc, FALSE);
6980 fsig = mono_method_signature (wrapper);
6981 } else if (constrained_call) {
6982 fsig = mono_method_signature (cmethod);
6984 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6988 mono_save_token_info (cfg, image, token, cil_method);
6990 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
6992 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
6993 * foo (bar (), baz ())
6994 * works correctly. MS does this also:
6995 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
6996 * The problem with this approach is that the debugger will stop after all calls returning a value,
6997 * even for simple cases, like:
7000 /* Special case a few common successor opcodes */
7001 if (!(ip + 5 < end && ip [5] == CEE_POP))
7002 need_seq_point = TRUE;
7005 n = fsig->param_count + fsig->hasthis;
7007 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7008 if (check_linkdemand (cfg, method, cmethod))
7009 INLINE_FAILURE ("linkdemand");
7010 CHECK_CFG_EXCEPTION;
7013 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7014 g_assert_not_reached ();
7017 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7020 if (!cfg->generic_sharing_context && cmethod)
7021 g_assert (!mono_method_check_context_used (cmethod));
7025 //g_assert (!virtual || fsig->hasthis);
7029 if (constrained_call) {
7031 * We have the `constrained.' prefix opcode.
7033 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7035 * The type parameter is instantiated as a valuetype,
7036 * but that type doesn't override the method we're
7037 * calling, so we need to box `this'.
7039 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7040 ins->klass = constrained_call;
7041 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7042 CHECK_CFG_EXCEPTION;
7043 } else if (!constrained_call->valuetype) {
7044 int dreg = alloc_ireg_ref (cfg);
7047 * The type parameter is instantiated as a reference
7048 * type. We have a managed pointer on the stack, so
7049 * we need to dereference it here.
7051 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7052 ins->type = STACK_OBJ;
7055 if (cmethod->klass->valuetype) {
7058 /* Interface method */
7061 mono_class_setup_vtable (constrained_call);
7062 CHECK_TYPELOAD (constrained_call);
7063 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7065 TYPE_LOAD_ERROR (constrained_call);
7066 slot = mono_method_get_vtable_slot (cmethod);
7068 TYPE_LOAD_ERROR (cmethod->klass);
7069 cmethod = constrained_call->vtable [ioffset + slot];
7071 if (cmethod->klass == mono_defaults.enum_class) {
7072 /* Enum implements some interfaces, so treat this as the first case */
7073 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7074 ins->klass = constrained_call;
7075 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7076 CHECK_CFG_EXCEPTION;
7081 constrained_call = NULL;
7084 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
7087 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7089 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7090 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7095 CHECK_CFG_EXCEPTION;
7100 emit_seq_point (cfg, method, ip, FALSE);
7105 * If the callee is a shared method, then its static cctor
7106 * might not get called after the call was patched.
7108 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7109 emit_generic_class_init (cfg, cmethod->klass);
7110 CHECK_TYPELOAD (cmethod->klass);
7113 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
7114 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
7115 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7116 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
7117 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7120 * Pass vtable iff target method might
7121 * be shared, which means that sharing
7122 * is enabled for its class and its
7123 * context is sharable (and it's not a
7126 if (sharing_enabled && context_sharable &&
7127 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
7131 if (cmethod && mini_method_get_context (cmethod) &&
7132 mini_method_get_context (cmethod)->method_inst) {
7133 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7134 MonoGenericContext *context = mini_method_get_context (cmethod);
7135 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7137 g_assert (!pass_vtable);
7139 if (sharing_enabled && context_sharable)
7143 if (cfg->generic_sharing_context && cmethod) {
7144 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7146 context_used = mono_method_check_context_used (cmethod);
7148 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7149 /* Generic method interface
7150 calls are resolved via a
7151 helper function and don't
7153 if (!cmethod_context || !cmethod_context->method_inst)
7154 pass_imt_from_rgctx = TRUE;
7158 * If a shared method calls another
7159 * shared method then the caller must
7160 * have a generic sharing context
7161 * because the magic trampoline
7162 * requires it. FIXME: We shouldn't
7163 * have to force the vtable/mrgctx
7164 * variable here. Instead there
7165 * should be a flag in the cfg to
7166 * request a generic sharing context.
7169 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7170 mono_get_vtable_var (cfg);
7175 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7177 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7179 CHECK_TYPELOAD (cmethod->klass);
7180 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7185 g_assert (!vtable_arg);
7187 if (!cfg->compile_aot) {
7189 * emit_get_rgctx_method () calls mono_class_vtable () so check
7190 * for type load errors before.
7192 mono_class_setup_vtable (cmethod->klass);
7193 CHECK_TYPELOAD (cmethod->klass);
7196 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7198 /* !marshalbyref is needed to properly handle generic methods + remoting */
7199 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7200 MONO_METHOD_IS_FINAL (cmethod)) &&
7201 !cmethod->klass->marshalbyref) {
7208 if (pass_imt_from_rgctx) {
7209 g_assert (!pass_vtable);
7212 imt_arg = emit_get_rgctx_method (cfg, context_used,
7213 cmethod, MONO_RGCTX_INFO_METHOD);
7217 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7219 /* Calling virtual generic methods */
7220 if (cmethod && virtual &&
7221 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7222 !(MONO_METHOD_IS_FINAL (cmethod) &&
7223 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7224 mono_method_signature (cmethod)->generic_param_count) {
7225 MonoInst *this_temp, *this_arg_temp, *store;
7226 MonoInst *iargs [4];
7228 g_assert (mono_method_signature (cmethod)->is_inflated);
7230 /* Prevent inlining of methods that contain indirect calls */
7231 INLINE_FAILURE ("virtual generic call");
7233 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7234 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
7235 g_assert (!imt_arg);
7237 g_assert (cmethod->is_inflated);
7238 imt_arg = emit_get_rgctx_method (cfg, context_used,
7239 cmethod, MONO_RGCTX_INFO_METHOD);
7240 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
7244 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7245 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7246 MONO_ADD_INS (bblock, store);
7248 /* FIXME: This should be a managed pointer */
7249 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7251 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7252 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7253 cmethod, MONO_RGCTX_INFO_METHOD);
7254 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7255 addr = mono_emit_jit_icall (cfg,
7256 mono_helper_compile_generic_method, iargs);
7258 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7260 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7263 if (!MONO_TYPE_IS_VOID (fsig->ret))
7264 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7266 CHECK_CFG_EXCEPTION;
7271 emit_seq_point (cfg, method, ip, FALSE);
7276 * Implement a workaround for the inherent races involved in locking:
7282 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7283 * try block, the Exit () won't be executed, see:
7284 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7285 * To work around this, we extend such try blocks to include the last x bytes
7286 * of the Monitor.Enter () call.
7288 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7289 MonoBasicBlock *tbb;
7291 GET_BBLOCK (cfg, tbb, ip + 5);
7293 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7294 * from Monitor.Enter like ArgumentNullException.
7296 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7297 /* Mark this bblock as needing to be extended */
7298 tbb->extend_try_block = TRUE;
7302 /* Conversion to a JIT intrinsic */
7303 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7305 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7306 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7311 CHECK_CFG_EXCEPTION;
7316 emit_seq_point (cfg, method, ip, FALSE);
7321 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7322 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7323 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7324 !g_list_find (dont_inline, cmethod)) {
7326 gboolean always = FALSE;
7328 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7329 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7330 /* Prevent inlining of methods that call wrappers */
7331 INLINE_FAILURE ("wrapper call");
7332 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7336 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7338 cfg->real_offset += 5;
7341 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7342 /* *sp is already set by inline_method */
7346 inline_costs += costs;
7349 emit_seq_point (cfg, method, ip, FALSE);
7354 inline_costs += 10 * num_calls++;
7356 /* Tail recursion elimination */
7357 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7358 gboolean has_vtargs = FALSE;
7361 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7362 INLINE_FAILURE ("tail call");
7364 /* keep it simple */
7365 for (i = fsig->param_count - 1; i >= 0; i--) {
7366 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7371 for (i = 0; i < n; ++i)
7372 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7373 MONO_INST_NEW (cfg, ins, OP_BR);
7374 MONO_ADD_INS (bblock, ins);
7375 tblock = start_bblock->out_bb [0];
7376 link_bblock (cfg, bblock, tblock);
7377 ins->inst_target_bb = tblock;
7378 start_new_bblock = 1;
7380 /* skip the CEE_RET, too */
7381 if (ip_in_bb (cfg, bblock, ip + 5))
7391 /* Generic sharing */
7392 /* FIXME: only do this for generic methods if
7393 they are not shared! */
7394 if (context_used && !imt_arg && !array_rank &&
7395 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7396 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7397 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7398 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7399 INLINE_FAILURE ("gshared");
7401 g_assert (cfg->generic_sharing_context && cmethod);
7405 * We are compiling a call to a
7406 * generic method from shared code,
7407 * which means that we have to look up
7408 * the method in the rgctx and do an
7411 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7414 /* Indirect calls */
7416 g_assert (!imt_arg);
7418 if (*ip == CEE_CALL)
7419 g_assert (context_used);
7420 else if (*ip == CEE_CALLI)
7421 g_assert (!vtable_arg);
7423 /* FIXME: what the hell is this??? */
7424 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7425 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7427 /* Prevent inlining of methods with indirect calls */
7428 INLINE_FAILURE ("indirect call");
7431 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7433 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7435 * Instead of emitting an indirect call, emit a direct call
7436 * with the contents of the aotconst as the patch info.
7438 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7440 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7441 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7444 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7447 if (!MONO_TYPE_IS_VOID (fsig->ret))
7448 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7450 CHECK_CFG_EXCEPTION;
7455 emit_seq_point (cfg, method, ip, FALSE);
7463 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7464 MonoInst *val = sp [fsig->param_count];
7466 if (val->type == STACK_OBJ) {
7467 MonoInst *iargs [2];
7472 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7475 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7476 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7477 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7478 emit_write_barrier (cfg, addr, val, 0);
7479 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7480 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7482 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7485 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7486 if (!cmethod->klass->element_class->valuetype && !readonly)
7487 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7488 CHECK_TYPELOAD (cmethod->klass);
7491 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7494 g_assert_not_reached ();
7497 CHECK_CFG_EXCEPTION;
7501 emit_seq_point (cfg, method, ip, FALSE);
7505 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7507 if (!MONO_TYPE_IS_VOID (fsig->ret))
7508 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7510 CHECK_CFG_EXCEPTION;
7515 emit_seq_point (cfg, method, ip, FALSE);
7519 /* Tail prefix / tail call optimization */
7521 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7522 /* FIXME: runtime generic context pointer for jumps? */
7523 /* FIXME: handle this for generic sharing eventually */
7524 supported_tail_call = cmethod &&
7525 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7526 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7527 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7529 if (supported_tail_call) {
7532 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7533 INLINE_FAILURE ("tail call");
7535 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7537 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7538 /* Handle tail calls similarly to calls */
7539 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
7541 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7542 call->tail_call = TRUE;
7543 call->method = cmethod;
7544 call->signature = mono_method_signature (cmethod);
7547 * We implement tail calls by storing the actual arguments into the
7548 * argument variables, then emitting a CEE_JMP.
7550 for (i = 0; i < n; ++i) {
7551 /* Prevent argument from being register allocated */
7552 arg_array [i]->flags |= MONO_INST_VOLATILE;
7553 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7557 ins = (MonoInst*)call;
7558 ins->inst_p0 = cmethod;
7559 ins->inst_p1 = arg_array [0];
7560 MONO_ADD_INS (bblock, ins);
7561 link_bblock (cfg, bblock, end_bblock);
7562 start_new_bblock = 1;
7564 CHECK_CFG_EXCEPTION;
7569 // FIXME: Eliminate unreachable epilogs
7572 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7573 * only reachable from this call.
7575 GET_BBLOCK (cfg, tblock, ip);
7576 if (tblock == bblock || tblock->in_count == 0)
7582 * Synchronized wrappers.
7583 * Its hard to determine where to replace a method with its synchronized
7584 * wrapper without causing an infinite recursion. The current solution is
7585 * to add the synchronized wrapper in the trampolines, and to
7586 * change the called method to a dummy wrapper, and resolve that wrapper
7587 * to the real method in mono_jit_compile_method ().
7589 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod) {
7590 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7594 INLINE_FAILURE ("call");
7595 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7596 imt_arg, vtable_arg);
7598 if (!MONO_TYPE_IS_VOID (fsig->ret))
7599 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7601 CHECK_CFG_EXCEPTION;
7606 emit_seq_point (cfg, method, ip, FALSE);
7610 if (cfg->method != method) {
7611 /* return from inlined method */
7613 * If in_count == 0, that means the ret is unreachable due to
7614 * being preceeded by a throw. In that case, inline_method () will
7615 * handle setting the return value
7616 * (test case: test_0_inline_throw ()).
7618 if (return_var && cfg->cbb->in_count) {
7619 MonoType *ret_type = mono_method_signature (method)->ret;
7625 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7628 //g_assert (returnvar != -1);
7629 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7630 cfg->ret_var_set = TRUE;
7634 MonoType *ret_type = mono_method_signature (method)->ret;
7636 if (seq_points && !sym_seq_points) {
7638 * Place a seq point here too even through the IL stack is not
7639 * empty, so a step over on
7642 * will work correctly.
7644 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7645 MONO_ADD_INS (cfg->cbb, ins);
7648 g_assert (!return_var);
7652 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7655 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7658 if (!cfg->vret_addr) {
7661 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7663 EMIT_NEW_RETLOADA (cfg, ret_addr);
7665 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7666 ins->klass = mono_class_from_mono_type (ret_type);
7669 #ifdef MONO_ARCH_SOFT_FLOAT
7670 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7671 MonoInst *iargs [1];
7675 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7676 mono_arch_emit_setret (cfg, method, conv);
7678 mono_arch_emit_setret (cfg, method, *sp);
7681 mono_arch_emit_setret (cfg, method, *sp);
7686 if (sp != stack_start)
7688 MONO_INST_NEW (cfg, ins, OP_BR);
7690 ins->inst_target_bb = end_bblock;
7691 MONO_ADD_INS (bblock, ins);
7692 link_bblock (cfg, bblock, end_bblock);
7693 start_new_bblock = 1;
7697 MONO_INST_NEW (cfg, ins, OP_BR);
7699 target = ip + 1 + (signed char)(*ip);
7701 GET_BBLOCK (cfg, tblock, target);
7702 link_bblock (cfg, bblock, tblock);
7703 ins->inst_target_bb = tblock;
7704 if (sp != stack_start) {
7705 handle_stack_args (cfg, stack_start, sp - stack_start);
7707 CHECK_UNVERIFIABLE (cfg);
7709 MONO_ADD_INS (bblock, ins);
7710 start_new_bblock = 1;
7711 inline_costs += BRANCH_COST;
7725 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7727 target = ip + 1 + *(signed char*)ip;
7733 inline_costs += BRANCH_COST;
7737 MONO_INST_NEW (cfg, ins, OP_BR);
7740 target = ip + 4 + (gint32)read32(ip);
7742 GET_BBLOCK (cfg, tblock, target);
7743 link_bblock (cfg, bblock, tblock);
7744 ins->inst_target_bb = tblock;
7745 if (sp != stack_start) {
7746 handle_stack_args (cfg, stack_start, sp - stack_start);
7748 CHECK_UNVERIFIABLE (cfg);
7751 MONO_ADD_INS (bblock, ins);
7753 start_new_bblock = 1;
7754 inline_costs += BRANCH_COST;
7761 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7762 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7763 guint32 opsize = is_short ? 1 : 4;
7765 CHECK_OPSIZE (opsize);
7767 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7770 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7775 GET_BBLOCK (cfg, tblock, target);
7776 link_bblock (cfg, bblock, tblock);
7777 GET_BBLOCK (cfg, tblock, ip);
7778 link_bblock (cfg, bblock, tblock);
7780 if (sp != stack_start) {
7781 handle_stack_args (cfg, stack_start, sp - stack_start);
7782 CHECK_UNVERIFIABLE (cfg);
7785 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7786 cmp->sreg1 = sp [0]->dreg;
7787 type_from_op (cmp, sp [0], NULL);
7790 #if SIZEOF_REGISTER == 4
7791 if (cmp->opcode == OP_LCOMPARE_IMM) {
7792 /* Convert it to OP_LCOMPARE */
7793 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7794 ins->type = STACK_I8;
7795 ins->dreg = alloc_dreg (cfg, STACK_I8);
7797 MONO_ADD_INS (bblock, ins);
7798 cmp->opcode = OP_LCOMPARE;
7799 cmp->sreg2 = ins->dreg;
7802 MONO_ADD_INS (bblock, cmp);
7804 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7805 type_from_op (ins, sp [0], NULL);
7806 MONO_ADD_INS (bblock, ins);
7807 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7808 GET_BBLOCK (cfg, tblock, target);
7809 ins->inst_true_bb = tblock;
7810 GET_BBLOCK (cfg, tblock, ip);
7811 ins->inst_false_bb = tblock;
7812 start_new_bblock = 2;
7815 inline_costs += BRANCH_COST;
7830 MONO_INST_NEW (cfg, ins, *ip);
7832 target = ip + 4 + (gint32)read32(ip);
7838 inline_costs += BRANCH_COST;
7842 MonoBasicBlock **targets;
7843 MonoBasicBlock *default_bblock;
7844 MonoJumpInfoBBTable *table;
7845 int offset_reg = alloc_preg (cfg);
7846 int target_reg = alloc_preg (cfg);
7847 int table_reg = alloc_preg (cfg);
7848 int sum_reg = alloc_preg (cfg);
7849 gboolean use_op_switch;
7853 n = read32 (ip + 1);
7856 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7860 CHECK_OPSIZE (n * sizeof (guint32));
7861 target = ip + n * sizeof (guint32);
7863 GET_BBLOCK (cfg, default_bblock, target);
7864 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7866 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7867 for (i = 0; i < n; ++i) {
7868 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7869 targets [i] = tblock;
7870 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7874 if (sp != stack_start) {
7876 * Link the current bb with the targets as well, so handle_stack_args
7877 * will set their in_stack correctly.
7879 link_bblock (cfg, bblock, default_bblock);
7880 for (i = 0; i < n; ++i)
7881 link_bblock (cfg, bblock, targets [i]);
7883 handle_stack_args (cfg, stack_start, sp - stack_start);
7885 CHECK_UNVERIFIABLE (cfg);
7888 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7889 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7892 for (i = 0; i < n; ++i)
7893 link_bblock (cfg, bblock, targets [i]);
7895 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7896 table->table = targets;
7897 table->table_size = n;
7899 use_op_switch = FALSE;
7901 /* ARM implements SWITCH statements differently */
7902 /* FIXME: Make it use the generic implementation */
7903 if (!cfg->compile_aot)
7904 use_op_switch = TRUE;
7907 if (COMPILE_LLVM (cfg))
7908 use_op_switch = TRUE;
7910 cfg->cbb->has_jump_table = 1;
7912 if (use_op_switch) {
7913 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7914 ins->sreg1 = src1->dreg;
7915 ins->inst_p0 = table;
7916 ins->inst_many_bb = targets;
7917 ins->klass = GUINT_TO_POINTER (n);
7918 MONO_ADD_INS (cfg->cbb, ins);
7920 if (sizeof (gpointer) == 8)
7921 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7925 #if SIZEOF_REGISTER == 8
7926 /* The upper word might not be zero, and we add it to a 64 bit address later */
7927 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7930 if (cfg->compile_aot) {
7931 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7933 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7934 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7935 ins->inst_p0 = table;
7936 ins->dreg = table_reg;
7937 MONO_ADD_INS (cfg->cbb, ins);
7940 /* FIXME: Use load_memindex */
7941 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7942 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7943 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7945 start_new_bblock = 1;
7946 inline_costs += (BRANCH_COST * 2);
7966 dreg = alloc_freg (cfg);
7969 dreg = alloc_lreg (cfg);
7972 dreg = alloc_ireg_ref (cfg);
7975 dreg = alloc_preg (cfg);
7978 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7979 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7980 ins->flags |= ins_flag;
7982 MONO_ADD_INS (bblock, ins);
7984 if (ins->flags & MONO_INST_VOLATILE) {
7985 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7986 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7987 emit_memory_barrier (cfg, FullBarrier);
8002 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8003 ins->flags |= ins_flag;
8006 if (ins->flags & MONO_INST_VOLATILE) {
8007 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8008 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8009 emit_memory_barrier (cfg, FullBarrier);
8012 MONO_ADD_INS (bblock, ins);
8014 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8015 emit_write_barrier (cfg, sp [0], sp [1], -1);
8024 MONO_INST_NEW (cfg, ins, (*ip));
8026 ins->sreg1 = sp [0]->dreg;
8027 ins->sreg2 = sp [1]->dreg;
8028 type_from_op (ins, sp [0], sp [1]);
8030 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8032 /* Use the immediate opcodes if possible */
8033 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8034 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8035 if (imm_opcode != -1) {
8036 ins->opcode = imm_opcode;
8037 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8040 sp [1]->opcode = OP_NOP;
8044 MONO_ADD_INS ((cfg)->cbb, (ins));
8046 *sp++ = mono_decompose_opcode (cfg, ins);
8063 MONO_INST_NEW (cfg, ins, (*ip));
8065 ins->sreg1 = sp [0]->dreg;
8066 ins->sreg2 = sp [1]->dreg;
8067 type_from_op (ins, sp [0], sp [1]);
8069 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8070 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8072 /* FIXME: Pass opcode to is_inst_imm */
8074 /* Use the immediate opcodes if possible */
8075 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8078 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8079 if (imm_opcode != -1) {
8080 ins->opcode = imm_opcode;
8081 if (sp [1]->opcode == OP_I8CONST) {
8082 #if SIZEOF_REGISTER == 8
8083 ins->inst_imm = sp [1]->inst_l;
8085 ins->inst_ls_word = sp [1]->inst_ls_word;
8086 ins->inst_ms_word = sp [1]->inst_ms_word;
8090 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8093 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8094 if (sp [1]->next == NULL)
8095 sp [1]->opcode = OP_NOP;
8098 MONO_ADD_INS ((cfg)->cbb, (ins));
8100 *sp++ = mono_decompose_opcode (cfg, ins);
8113 case CEE_CONV_OVF_I8:
8114 case CEE_CONV_OVF_U8:
8118 /* Special case this earlier so we have long constants in the IR */
8119 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8120 int data = sp [-1]->inst_c0;
8121 sp [-1]->opcode = OP_I8CONST;
8122 sp [-1]->type = STACK_I8;
8123 #if SIZEOF_REGISTER == 8
8124 if ((*ip) == CEE_CONV_U8)
8125 sp [-1]->inst_c0 = (guint32)data;
8127 sp [-1]->inst_c0 = data;
8129 sp [-1]->inst_ls_word = data;
8130 if ((*ip) == CEE_CONV_U8)
8131 sp [-1]->inst_ms_word = 0;
8133 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8135 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8142 case CEE_CONV_OVF_I4:
8143 case CEE_CONV_OVF_I1:
8144 case CEE_CONV_OVF_I2:
8145 case CEE_CONV_OVF_I:
8146 case CEE_CONV_OVF_U:
8149 if (sp [-1]->type == STACK_R8) {
8150 ADD_UNOP (CEE_CONV_OVF_I8);
8157 case CEE_CONV_OVF_U1:
8158 case CEE_CONV_OVF_U2:
8159 case CEE_CONV_OVF_U4:
8162 if (sp [-1]->type == STACK_R8) {
8163 ADD_UNOP (CEE_CONV_OVF_U8);
8170 case CEE_CONV_OVF_I1_UN:
8171 case CEE_CONV_OVF_I2_UN:
8172 case CEE_CONV_OVF_I4_UN:
8173 case CEE_CONV_OVF_I8_UN:
8174 case CEE_CONV_OVF_U1_UN:
8175 case CEE_CONV_OVF_U2_UN:
8176 case CEE_CONV_OVF_U4_UN:
8177 case CEE_CONV_OVF_U8_UN:
8178 case CEE_CONV_OVF_I_UN:
8179 case CEE_CONV_OVF_U_UN:
8186 CHECK_CFG_EXCEPTION;
8190 case CEE_ADD_OVF_UN:
8192 case CEE_MUL_OVF_UN:
8194 case CEE_SUB_OVF_UN:
8202 token = read32 (ip + 1);
8203 klass = mini_get_class (method, token, generic_context);
8204 CHECK_TYPELOAD (klass);
8206 if (generic_class_is_reference_type (cfg, klass)) {
8207 MonoInst *store, *load;
8208 int dreg = alloc_ireg_ref (cfg);
8210 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8211 load->flags |= ins_flag;
8212 MONO_ADD_INS (cfg->cbb, load);
8214 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8215 store->flags |= ins_flag;
8216 MONO_ADD_INS (cfg->cbb, store);
8218 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8219 emit_write_barrier (cfg, sp [0], sp [1], -1);
8221 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8233 token = read32 (ip + 1);
8234 klass = mini_get_class (method, token, generic_context);
8235 CHECK_TYPELOAD (klass);
8237 /* Optimize the common ldobj+stloc combination */
8247 loc_index = ip [5] - CEE_STLOC_0;
8254 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8255 CHECK_LOCAL (loc_index);
8257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8258 ins->dreg = cfg->locals [loc_index]->dreg;
8264 /* Optimize the ldobj+stobj combination */
8265 /* The reference case ends up being a load+store anyway */
8266 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8271 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8278 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8287 CHECK_STACK_OVF (1);
8289 n = read32 (ip + 1);
8291 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8292 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8293 ins->type = STACK_OBJ;
8296 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8297 MonoInst *iargs [1];
8299 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8300 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8302 if (cfg->opt & MONO_OPT_SHARED) {
8303 MonoInst *iargs [3];
8305 if (cfg->compile_aot) {
8306 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8308 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8309 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8310 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8311 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8312 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8314 if (bblock->out_of_line) {
8315 MonoInst *iargs [2];
8317 if (image == mono_defaults.corlib) {
8319 * Avoid relocations in AOT and save some space by using a
8320 * version of helper_ldstr specialized to mscorlib.
8322 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8323 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8325 /* Avoid creating the string object */
8326 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8327 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8328 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8332 if (cfg->compile_aot) {
8333 NEW_LDSTRCONST (cfg, ins, image, n);
8335 MONO_ADD_INS (bblock, ins);
8338 NEW_PCONST (cfg, ins, NULL);
8339 ins->type = STACK_OBJ;
8340 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8342 OUT_OF_MEMORY_FAILURE;
8345 MONO_ADD_INS (bblock, ins);
8354 MonoInst *iargs [2];
8355 MonoMethodSignature *fsig;
8358 MonoInst *vtable_arg = NULL;
8361 token = read32 (ip + 1);
8362 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8363 if (!cmethod || mono_loader_get_last_error ())
8365 fsig = mono_method_get_signature (cmethod, image, token);
8369 mono_save_token_info (cfg, image, token, cmethod);
8371 if (!mono_class_init (cmethod->klass))
8372 TYPE_LOAD_ERROR (cmethod->klass);
8374 if (cfg->generic_sharing_context)
8375 context_used = mono_method_check_context_used (cmethod);
8377 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8378 if (check_linkdemand (cfg, method, cmethod))
8379 INLINE_FAILURE ("linkdemand");
8380 CHECK_CFG_EXCEPTION;
8381 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8382 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8385 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8386 emit_generic_class_init (cfg, cmethod->klass);
8387 CHECK_TYPELOAD (cmethod->klass);
8390 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8391 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8392 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8393 mono_class_vtable (cfg->domain, cmethod->klass);
8394 CHECK_TYPELOAD (cmethod->klass);
8396 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8397 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8400 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8401 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8403 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8405 CHECK_TYPELOAD (cmethod->klass);
8406 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8411 n = fsig->param_count;
8415 * Generate smaller code for the common newobj <exception> instruction in
8416 * argument checking code.
8418 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8419 is_exception_class (cmethod->klass) && n <= 2 &&
8420 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8421 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8422 MonoInst *iargs [3];
8424 g_assert (!vtable_arg);
8428 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8431 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8435 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8440 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8443 g_assert_not_reached ();
8451 /* move the args to allow room for 'this' in the first position */
8457 /* check_call_signature () requires sp[0] to be set */
8458 this_ins.type = STACK_OBJ;
8460 if (check_call_signature (cfg, fsig, sp))
8465 if (mini_class_is_system_array (cmethod->klass)) {
8466 g_assert (!vtable_arg);
8468 *sp = emit_get_rgctx_method (cfg, context_used,
8469 cmethod, MONO_RGCTX_INFO_METHOD);
8471 /* Avoid varargs in the common case */
8472 if (fsig->param_count == 1)
8473 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8474 else if (fsig->param_count == 2)
8475 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8476 else if (fsig->param_count == 3)
8477 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8479 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8480 } else if (cmethod->string_ctor) {
8481 g_assert (!context_used);
8482 g_assert (!vtable_arg);
8483 /* we simply pass a null pointer */
8484 EMIT_NEW_PCONST (cfg, *sp, NULL);
8485 /* now call the string ctor */
8486 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8488 MonoInst* callvirt_this_arg = NULL;
8490 if (cmethod->klass->valuetype) {
8491 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8492 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8493 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8498 * The code generated by mini_emit_virtual_call () expects
8499 * iargs [0] to be a boxed instance, but luckily the vcall
8500 * will be transformed into a normal call there.
8502 } else if (context_used) {
8503 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8506 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8508 CHECK_TYPELOAD (cmethod->klass);
8511 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8512 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8513 * As a workaround, we call class cctors before allocating objects.
8515 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8516 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8517 if (cfg->verbose_level > 2)
8518 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8519 class_inits = g_slist_prepend (class_inits, vtable);
8522 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8525 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8528 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8530 /* Now call the actual ctor */
8531 /* Avoid virtual calls to ctors if possible */
8532 if (cmethod->klass->marshalbyref)
8533 callvirt_this_arg = sp [0];
8536 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8537 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8538 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8543 CHECK_CFG_EXCEPTION;
8544 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8545 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8546 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8547 !g_list_find (dont_inline, cmethod)) {
8550 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8551 cfg->real_offset += 5;
8554 inline_costs += costs - 5;
8556 INLINE_FAILURE ("inline failure");
8557 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8559 } else if (context_used &&
8560 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8561 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8562 MonoInst *cmethod_addr;
8564 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8565 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8567 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8569 INLINE_FAILURE ("ctor call");
8570 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8571 callvirt_this_arg, NULL, vtable_arg);
8575 if (alloc == NULL) {
8577 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8578 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8592 token = read32 (ip + 1);
8593 klass = mini_get_class (method, token, generic_context);
8594 CHECK_TYPELOAD (klass);
8595 if (sp [0]->type != STACK_OBJ)
8598 if (cfg->generic_sharing_context)
8599 context_used = mono_class_check_context_used (klass);
8601 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8602 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8609 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8612 if (cfg->compile_aot)
8613 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8615 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8617 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8618 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8621 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8622 MonoMethod *mono_castclass;
8623 MonoInst *iargs [1];
8626 mono_castclass = mono_marshal_get_castclass (klass);
8629 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8630 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8631 CHECK_CFG_EXCEPTION;
8632 g_assert (costs > 0);
8635 cfg->real_offset += 5;
8640 inline_costs += costs;
8643 ins = handle_castclass (cfg, klass, *sp, context_used);
8644 CHECK_CFG_EXCEPTION;
8654 token = read32 (ip + 1);
8655 klass = mini_get_class (method, token, generic_context);
8656 CHECK_TYPELOAD (klass);
8657 if (sp [0]->type != STACK_OBJ)
8660 if (cfg->generic_sharing_context)
8661 context_used = mono_class_check_context_used (klass);
8663 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8664 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8671 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8674 if (cfg->compile_aot)
8675 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8677 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8679 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8682 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8683 MonoMethod *mono_isinst;
8684 MonoInst *iargs [1];
8687 mono_isinst = mono_marshal_get_isinst (klass);
8690 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8691 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8692 CHECK_CFG_EXCEPTION;
8693 g_assert (costs > 0);
8696 cfg->real_offset += 5;
8701 inline_costs += costs;
8704 ins = handle_isinst (cfg, klass, *sp, context_used);
8705 CHECK_CFG_EXCEPTION;
8712 case CEE_UNBOX_ANY: {
8716 token = read32 (ip + 1);
8717 klass = mini_get_class (method, token, generic_context);
8718 CHECK_TYPELOAD (klass);
8720 mono_save_token_info (cfg, image, token, klass);
8722 if (cfg->generic_sharing_context)
8723 context_used = mono_class_check_context_used (klass);
8725 if (generic_class_is_reference_type (cfg, klass)) {
8726 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8727 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8728 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8735 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8738 /*FIXME AOT support*/
8739 if (cfg->compile_aot)
8740 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8742 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8744 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8745 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8748 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8749 MonoMethod *mono_castclass;
8750 MonoInst *iargs [1];
8753 mono_castclass = mono_marshal_get_castclass (klass);
8756 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8757 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8758 CHECK_CFG_EXCEPTION;
8759 g_assert (costs > 0);
8762 cfg->real_offset += 5;
8766 inline_costs += costs;
8768 ins = handle_castclass (cfg, klass, *sp, context_used);
8769 CHECK_CFG_EXCEPTION;
8777 if (mono_class_is_nullable (klass)) {
8778 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8785 ins = handle_unbox (cfg, klass, sp, context_used);
8791 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8804 token = read32 (ip + 1);
8805 klass = mini_get_class (method, token, generic_context);
8806 CHECK_TYPELOAD (klass);
8808 mono_save_token_info (cfg, image, token, klass);
8810 if (cfg->generic_sharing_context)
8811 context_used = mono_class_check_context_used (klass);
8813 if (generic_class_is_reference_type (cfg, klass)) {
8819 if (klass == mono_defaults.void_class)
8821 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8823 /* frequent check in generic code: box (struct), brtrue */
8825 // FIXME: LLVM can't handle the inconsistent bb linking
8826 if (!mono_class_is_nullable (klass) &&
8827 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8828 (ip [5] == CEE_BRTRUE ||
8829 ip [5] == CEE_BRTRUE_S ||
8830 ip [5] == CEE_BRFALSE ||
8831 ip [5] == CEE_BRFALSE_S)) {
8832 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8834 MonoBasicBlock *true_bb, *false_bb;
8838 if (cfg->verbose_level > 3) {
8839 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8840 printf ("<box+brtrue opt>\n");
8848 target = ip + 1 + (signed char)(*ip);
8855 target = ip + 4 + (gint)(read32 (ip));
8859 g_assert_not_reached ();
8863 * We need to link both bblocks, since it is needed for handling stack
8864 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8865 * Branching to only one of them would lead to inconsistencies, so
8866 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8868 GET_BBLOCK (cfg, true_bb, target);
8869 GET_BBLOCK (cfg, false_bb, ip);
8871 mono_link_bblock (cfg, cfg->cbb, true_bb);
8872 mono_link_bblock (cfg, cfg->cbb, false_bb);
8874 if (sp != stack_start) {
8875 handle_stack_args (cfg, stack_start, sp - stack_start);
8877 CHECK_UNVERIFIABLE (cfg);
8880 if (COMPILE_LLVM (cfg)) {
8881 dreg = alloc_ireg (cfg);
8882 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8883 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8885 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8887 /* The JIT can't eliminate the iconst+compare */
8888 MONO_INST_NEW (cfg, ins, OP_BR);
8889 ins->inst_target_bb = is_true ? true_bb : false_bb;
8890 MONO_ADD_INS (cfg->cbb, ins);
8893 start_new_bblock = 1;
8897 *sp++ = handle_box (cfg, val, klass, context_used);
8899 CHECK_CFG_EXCEPTION;
8908 token = read32 (ip + 1);
8909 klass = mini_get_class (method, token, generic_context);
8910 CHECK_TYPELOAD (klass);
8912 mono_save_token_info (cfg, image, token, klass);
8914 if (cfg->generic_sharing_context)
8915 context_used = mono_class_check_context_used (klass);
8917 if (mono_class_is_nullable (klass)) {
8920 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8921 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8925 ins = handle_unbox (cfg, klass, sp, context_used);
8938 MonoClassField *field;
8941 gboolean is_instance;
8943 gpointer addr = NULL;
8944 gboolean is_special_static;
8946 MonoInst *store_val = NULL;
8949 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
8951 if (op == CEE_STFLD) {
8959 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8961 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8964 if (op == CEE_STSFLD) {
8972 token = read32 (ip + 1);
8973 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8974 field = mono_method_get_wrapper_data (method, token);
8975 klass = field->parent;
8978 field = mono_field_from_token (image, token, &klass, generic_context);
8982 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8983 FIELD_ACCESS_FAILURE;
8984 mono_class_init (klass);
8986 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
8989 /* if the class is Critical then transparent code cannot access it's fields */
8990 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8991 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8993 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8994 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8995 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8996 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9000 * LDFLD etc. is usable on static fields as well, so convert those cases to
9003 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9015 g_assert_not_reached ();
9017 is_instance = FALSE;
9022 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9023 if (op == CEE_STFLD) {
9024 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9026 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
9027 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9028 MonoInst *iargs [5];
9031 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9032 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9033 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9037 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9038 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9039 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9040 CHECK_CFG_EXCEPTION;
9041 g_assert (costs > 0);
9043 cfg->real_offset += 5;
9046 inline_costs += costs;
9048 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9053 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9055 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9056 if (sp [0]->opcode != OP_LDADDR)
9057 store->flags |= MONO_INST_FAULT;
9059 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9060 /* insert call to write barrier */
9064 dreg = alloc_ireg_mp (cfg);
9065 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9066 emit_write_barrier (cfg, ptr, sp [1], -1);
9069 store->flags |= ins_flag;
9076 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
9077 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9078 MonoInst *iargs [4];
9081 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9082 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9083 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9084 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9085 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9086 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9087 CHECK_CFG_EXCEPTION;
9089 g_assert (costs > 0);
9091 cfg->real_offset += 5;
9095 inline_costs += costs;
9097 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9100 } else if (is_instance) {
9101 if (sp [0]->type == STACK_VTYPE) {
9104 /* Have to compute the address of the variable */
9106 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9108 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9110 g_assert (var->klass == klass);
9112 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9116 if (op == CEE_LDFLDA) {
9117 if (is_magic_tls_access (field)) {
9119 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9121 if (sp [0]->type == STACK_OBJ) {
9122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9123 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9126 dreg = alloc_ireg_mp (cfg);
9128 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9129 ins->klass = mono_class_from_mono_type (field->type);
9130 ins->type = STACK_MP;
9136 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9138 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9139 load->flags |= ins_flag;
9140 if (sp [0]->opcode != OP_LDADDR)
9141 load->flags |= MONO_INST_FAULT;
9155 * We can only support shared generic static
9156 * field access on architectures where the
9157 * trampoline code has been extended to handle
9158 * the generic class init.
9160 #ifndef MONO_ARCH_VTABLE_REG
9161 GENERIC_SHARING_FAILURE (op);
9164 if (cfg->generic_sharing_context)
9165 context_used = mono_class_check_context_used (klass);
9167 ftype = mono_field_get_type (field);
9169 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9172 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9173 * to be called here.
9175 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9176 mono_class_vtable (cfg->domain, klass);
9177 CHECK_TYPELOAD (klass);
9179 mono_domain_lock (cfg->domain);
9180 if (cfg->domain->special_static_fields)
9181 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9182 mono_domain_unlock (cfg->domain);
9184 is_special_static = mono_class_field_is_special_static (field);
9186 /* Generate IR to compute the field address */
9187 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9189 * Fast access to TLS data
9190 * Inline version of get_thread_static_data () in
9194 int idx, static_data_reg, array_reg, dreg;
9195 MonoInst *thread_ins;
9197 // offset &= 0x7fffffff;
9198 // idx = (offset >> 24) - 1;
9199 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9201 thread_ins = mono_get_thread_intrinsic (cfg);
9202 MONO_ADD_INS (cfg->cbb, thread_ins);
9203 static_data_reg = alloc_ireg (cfg);
9204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9206 if (cfg->compile_aot) {
9207 int offset_reg, offset2_reg, idx_reg;
9209 /* For TLS variables, this will return the TLS offset */
9210 EMIT_NEW_SFLDACONST (cfg, ins, field);
9211 offset_reg = ins->dreg;
9212 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9213 idx_reg = alloc_ireg (cfg);
9214 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9215 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9216 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9217 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9218 array_reg = alloc_ireg (cfg);
9219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9220 offset2_reg = alloc_ireg (cfg);
9221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9222 dreg = alloc_ireg (cfg);
9223 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9225 offset = (gsize)addr & 0x7fffffff;
9226 idx = (offset >> 24) - 1;
9228 array_reg = alloc_ireg (cfg);
9229 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9230 dreg = alloc_ireg (cfg);
9231 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9233 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9234 (cfg->compile_aot && is_special_static) ||
9235 (context_used && is_special_static)) {
9236 MonoInst *iargs [2];
9238 g_assert (field->parent);
9239 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9241 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9242 field, MONO_RGCTX_INFO_CLASS_FIELD);
9244 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9246 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9247 } else if (context_used) {
9248 MonoInst *static_data;
9251 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9252 method->klass->name_space, method->klass->name, method->name,
9253 depth, field->offset);
9256 if (mono_class_needs_cctor_run (klass, method))
9257 emit_generic_class_init (cfg, klass);
9260 * The pointer we're computing here is
9262 * super_info.static_data + field->offset
9264 static_data = emit_get_rgctx_klass (cfg, context_used,
9265 klass, MONO_RGCTX_INFO_STATIC_DATA);
9267 if (field->offset == 0) {
9270 int addr_reg = mono_alloc_preg (cfg);
9271 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9273 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9274 MonoInst *iargs [2];
9276 g_assert (field->parent);
9277 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9278 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9279 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9281 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9283 CHECK_TYPELOAD (klass);
9285 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9286 if (!(g_slist_find (class_inits, vtable))) {
9287 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9288 if (cfg->verbose_level > 2)
9289 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9290 class_inits = g_slist_prepend (class_inits, vtable);
9293 if (cfg->run_cctors) {
9295 /* This makes so that inline cannot trigger */
9296 /* .cctors: too many apps depend on them */
9297 /* running with a specific order... */
9298 if (! vtable->initialized)
9299 INLINE_FAILURE ("class init");
9300 ex = mono_runtime_class_init_full (vtable, FALSE);
9302 set_exception_object (cfg, ex);
9303 goto exception_exit;
9307 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9309 if (cfg->compile_aot)
9310 EMIT_NEW_SFLDACONST (cfg, ins, field);
9312 EMIT_NEW_PCONST (cfg, ins, addr);
9314 MonoInst *iargs [1];
9315 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9316 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9320 /* Generate IR to do the actual load/store operation */
9322 if (op == CEE_LDSFLDA) {
9323 ins->klass = mono_class_from_mono_type (ftype);
9324 ins->type = STACK_PTR;
9326 } else if (op == CEE_STSFLD) {
9329 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9330 store->flags |= ins_flag;
9332 gboolean is_const = FALSE;
9333 MonoVTable *vtable = NULL;
9334 gpointer addr = NULL;
9336 if (!context_used) {
9337 vtable = mono_class_vtable (cfg->domain, klass);
9338 CHECK_TYPELOAD (klass);
9340 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9341 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9342 int ro_type = ftype->type;
9344 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9345 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9346 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9348 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9351 case MONO_TYPE_BOOLEAN:
9353 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9357 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9360 case MONO_TYPE_CHAR:
9362 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9366 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9371 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9375 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9381 case MONO_TYPE_FNPTR:
9382 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9383 type_to_eval_stack_type ((cfg), field->type, *sp);
9386 case MONO_TYPE_STRING:
9387 case MONO_TYPE_OBJECT:
9388 case MONO_TYPE_CLASS:
9389 case MONO_TYPE_SZARRAY:
9390 case MONO_TYPE_ARRAY:
9391 if (!mono_gc_is_moving ()) {
9392 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9393 type_to_eval_stack_type ((cfg), field->type, *sp);
9401 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9406 case MONO_TYPE_VALUETYPE:
9416 CHECK_STACK_OVF (1);
9418 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9419 load->flags |= ins_flag;
9432 token = read32 (ip + 1);
9433 klass = mini_get_class (method, token, generic_context);
9434 CHECK_TYPELOAD (klass);
9435 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9436 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9437 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9438 generic_class_is_reference_type (cfg, klass)) {
9439 /* insert call to write barrier */
9440 emit_write_barrier (cfg, sp [0], sp [1], -1);
9452 const char *data_ptr;
9454 guint32 field_token;
9460 token = read32 (ip + 1);
9462 klass = mini_get_class (method, token, generic_context);
9463 CHECK_TYPELOAD (klass);
9465 if (cfg->generic_sharing_context)
9466 context_used = mono_class_check_context_used (klass);
9468 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9469 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9470 ins->sreg1 = sp [0]->dreg;
9471 ins->type = STACK_I4;
9472 ins->dreg = alloc_ireg (cfg);
9473 MONO_ADD_INS (cfg->cbb, ins);
9474 *sp = mono_decompose_opcode (cfg, ins);
9479 MonoClass *array_class = mono_array_class_get (klass, 1);
9480 /* FIXME: we cannot get a managed
9481 allocator because we can't get the
9482 open generic class's vtable. We
9483 have the same problem in
9484 handle_alloc(). This
9485 needs to be solved so that we can
9486 have managed allocs of shared
9489 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9490 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9492 MonoMethod *managed_alloc = NULL;
9494 /* FIXME: Decompose later to help abcrem */
9497 args [0] = emit_get_rgctx_klass (cfg, context_used,
9498 array_class, MONO_RGCTX_INFO_VTABLE);
9503 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9505 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9507 if (cfg->opt & MONO_OPT_SHARED) {
9508 /* Decompose now to avoid problems with references to the domainvar */
9509 MonoInst *iargs [3];
9511 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9512 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9515 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9517 /* Decompose later since it is needed by abcrem */
9518 MonoClass *array_type = mono_array_class_get (klass, 1);
9519 mono_class_vtable (cfg->domain, array_type);
9520 CHECK_TYPELOAD (array_type);
9522 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9523 ins->dreg = alloc_ireg_ref (cfg);
9524 ins->sreg1 = sp [0]->dreg;
9525 ins->inst_newa_class = klass;
9526 ins->type = STACK_OBJ;
9527 ins->klass = array_type;
9528 MONO_ADD_INS (cfg->cbb, ins);
9529 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9530 cfg->cbb->has_array_access = TRUE;
9532 /* Needed so mono_emit_load_get_addr () gets called */
9533 mono_get_got_var (cfg);
9543 * we inline/optimize the initialization sequence if possible.
9544 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9545 * for small sizes open code the memcpy
9546 * ensure the rva field is big enough
9548 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9549 MonoMethod *memcpy_method = get_memcpy_method ();
9550 MonoInst *iargs [3];
9551 int add_reg = alloc_ireg_mp (cfg);
9553 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9554 if (cfg->compile_aot) {
9555 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9557 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9559 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9560 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9569 if (sp [0]->type != STACK_OBJ)
9572 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9573 ins->dreg = alloc_preg (cfg);
9574 ins->sreg1 = sp [0]->dreg;
9575 ins->type = STACK_I4;
9576 /* This flag will be inherited by the decomposition */
9577 ins->flags |= MONO_INST_FAULT;
9578 MONO_ADD_INS (cfg->cbb, ins);
9579 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9580 cfg->cbb->has_array_access = TRUE;
9588 if (sp [0]->type != STACK_OBJ)
9591 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9593 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9594 CHECK_TYPELOAD (klass);
9595 /* we need to make sure that this array is exactly the type it needs
9596 * to be for correctness. the wrappers are lax with their usage
9597 * so we need to ignore them here
9599 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9600 MonoClass *array_class = mono_array_class_get (klass, 1);
9601 mini_emit_check_array_type (cfg, sp [0], array_class);
9602 CHECK_TYPELOAD (array_class);
9606 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9621 case CEE_LDELEM_REF: {
9627 if (*ip == CEE_LDELEM) {
9629 token = read32 (ip + 1);
9630 klass = mini_get_class (method, token, generic_context);
9631 CHECK_TYPELOAD (klass);
9632 mono_class_init (klass);
9635 klass = array_access_to_klass (*ip);
9637 if (sp [0]->type != STACK_OBJ)
9640 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9642 if (sp [1]->opcode == OP_ICONST) {
9643 int array_reg = sp [0]->dreg;
9644 int index_reg = sp [1]->dreg;
9645 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9647 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9648 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9650 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9651 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9654 if (*ip == CEE_LDELEM)
9667 case CEE_STELEM_REF:
9672 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9674 if (*ip == CEE_STELEM) {
9676 token = read32 (ip + 1);
9677 klass = mini_get_class (method, token, generic_context);
9678 CHECK_TYPELOAD (klass);
9679 mono_class_init (klass);
9682 klass = array_access_to_klass (*ip);
9684 if (sp [0]->type != STACK_OBJ)
9687 emit_array_store (cfg, klass, sp, TRUE);
9689 if (*ip == CEE_STELEM)
9696 case CEE_CKFINITE: {
9700 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9701 ins->sreg1 = sp [0]->dreg;
9702 ins->dreg = alloc_freg (cfg);
9703 ins->type = STACK_R8;
9704 MONO_ADD_INS (bblock, ins);
9706 *sp++ = mono_decompose_opcode (cfg, ins);
9711 case CEE_REFANYVAL: {
9712 MonoInst *src_var, *src;
9714 int klass_reg = alloc_preg (cfg);
9715 int dreg = alloc_preg (cfg);
9718 MONO_INST_NEW (cfg, ins, *ip);
9721 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9722 CHECK_TYPELOAD (klass);
9723 mono_class_init (klass);
9725 if (cfg->generic_sharing_context)
9726 context_used = mono_class_check_context_used (klass);
9729 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9731 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9732 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9736 MonoInst *klass_ins;
9738 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9739 klass, MONO_RGCTX_INFO_KLASS);
9742 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9743 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9745 mini_emit_class_check (cfg, klass_reg, klass);
9747 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9748 ins->type = STACK_MP;
9753 case CEE_MKREFANY: {
9754 MonoInst *loc, *addr;
9757 MONO_INST_NEW (cfg, ins, *ip);
9760 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9761 CHECK_TYPELOAD (klass);
9762 mono_class_init (klass);
9764 if (cfg->generic_sharing_context)
9765 context_used = mono_class_check_context_used (klass);
9767 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9768 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9771 MonoInst *const_ins;
9772 int type_reg = alloc_preg (cfg);
9774 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9775 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9776 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9777 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9778 } else if (cfg->compile_aot) {
9779 int const_reg = alloc_preg (cfg);
9780 int type_reg = alloc_preg (cfg);
9782 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9783 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9787 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9788 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9792 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9793 ins->type = STACK_VTYPE;
9794 ins->klass = mono_defaults.typed_reference_class;
9801 MonoClass *handle_class;
9803 CHECK_STACK_OVF (1);
9806 n = read32 (ip + 1);
9808 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9809 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9810 handle = mono_method_get_wrapper_data (method, n);
9811 handle_class = mono_method_get_wrapper_data (method, n + 1);
9812 if (handle_class == mono_defaults.typehandle_class)
9813 handle = &((MonoClass*)handle)->byval_arg;
9816 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9820 mono_class_init (handle_class);
9821 if (cfg->generic_sharing_context) {
9822 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9823 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9824 /* This case handles ldtoken
9825 of an open type, like for
9828 } else if (handle_class == mono_defaults.typehandle_class) {
9829 /* If we get a MONO_TYPE_CLASS
9830 then we need to provide the
9832 instantiation of it. */
9833 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9836 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9837 } else if (handle_class == mono_defaults.fieldhandle_class)
9838 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9839 else if (handle_class == mono_defaults.methodhandle_class)
9840 context_used = mono_method_check_context_used (handle);
9842 g_assert_not_reached ();
9845 if ((cfg->opt & MONO_OPT_SHARED) &&
9846 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9847 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9848 MonoInst *addr, *vtvar, *iargs [3];
9849 int method_context_used;
9851 if (cfg->generic_sharing_context)
9852 method_context_used = mono_method_check_context_used (method);
9854 method_context_used = 0;
9856 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9858 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9859 EMIT_NEW_ICONST (cfg, iargs [1], n);
9860 if (method_context_used) {
9861 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9862 method, MONO_RGCTX_INFO_METHOD);
9863 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9865 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9866 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9868 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9872 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9874 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9875 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9876 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9877 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9878 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9879 MonoClass *tclass = mono_class_from_mono_type (handle);
9881 mono_class_init (tclass);
9883 ins = emit_get_rgctx_klass (cfg, context_used,
9884 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9885 } else if (cfg->compile_aot) {
9886 if (method->wrapper_type) {
9887 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9888 /* Special case for static synchronized wrappers */
9889 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9891 /* FIXME: n is not a normal token */
9892 cfg->disable_aot = TRUE;
9893 EMIT_NEW_PCONST (cfg, ins, NULL);
9896 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9899 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9901 ins->type = STACK_OBJ;
9902 ins->klass = cmethod->klass;
9905 MonoInst *addr, *vtvar;
9907 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9910 if (handle_class == mono_defaults.typehandle_class) {
9911 ins = emit_get_rgctx_klass (cfg, context_used,
9912 mono_class_from_mono_type (handle),
9913 MONO_RGCTX_INFO_TYPE);
9914 } else if (handle_class == mono_defaults.methodhandle_class) {
9915 ins = emit_get_rgctx_method (cfg, context_used,
9916 handle, MONO_RGCTX_INFO_METHOD);
9917 } else if (handle_class == mono_defaults.fieldhandle_class) {
9918 ins = emit_get_rgctx_field (cfg, context_used,
9919 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9921 g_assert_not_reached ();
9923 } else if (cfg->compile_aot) {
9924 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9926 EMIT_NEW_PCONST (cfg, ins, handle);
9928 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9930 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9940 MONO_INST_NEW (cfg, ins, OP_THROW);
9942 ins->sreg1 = sp [0]->dreg;
9944 bblock->out_of_line = TRUE;
9945 MONO_ADD_INS (bblock, ins);
9946 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9947 MONO_ADD_INS (bblock, ins);
9950 link_bblock (cfg, bblock, end_bblock);
9951 start_new_bblock = 1;
9953 case CEE_ENDFINALLY:
9954 /* mono_save_seq_point_info () depends on this */
9955 if (sp != stack_start)
9956 emit_seq_point (cfg, method, ip, FALSE);
9957 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9958 MONO_ADD_INS (bblock, ins);
9960 start_new_bblock = 1;
9963 * Control will leave the method so empty the stack, otherwise
9964 * the next basic block will start with a nonempty stack.
9966 while (sp != stack_start) {
9974 if (*ip == CEE_LEAVE) {
9976 target = ip + 5 + (gint32)read32(ip + 1);
9979 target = ip + 2 + (signed char)(ip [1]);
9982 /* empty the stack */
9983 while (sp != stack_start) {
9988 * If this leave statement is in a catch block, check for a
9989 * pending exception, and rethrow it if necessary.
9990 * We avoid doing this in runtime invoke wrappers, since those are called
9991 * by native code which excepts the wrapper to catch all exceptions.
9993 for (i = 0; i < header->num_clauses; ++i) {
9994 MonoExceptionClause *clause = &header->clauses [i];
9997 * Use <= in the final comparison to handle clauses with multiple
9998 * leave statements, like in bug #78024.
9999 * The ordering of the exception clauses guarantees that we find the
10000 * innermost clause.
10002 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10004 MonoBasicBlock *dont_throw;
10009 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10012 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10014 NEW_BBLOCK (cfg, dont_throw);
10017 * Currently, we always rethrow the abort exception, despite the
10018 * fact that this is not correct. See thread6.cs for an example.
10019 * But propagating the abort exception is more important than
10020 * getting the sematics right.
10022 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10023 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10024 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10026 MONO_START_BB (cfg, dont_throw);
10031 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10033 MonoExceptionClause *clause;
10035 for (tmp = handlers; tmp; tmp = tmp->next) {
10036 clause = tmp->data;
10037 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10039 link_bblock (cfg, bblock, tblock);
10040 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10041 ins->inst_target_bb = tblock;
10042 ins->inst_eh_block = clause;
10043 MONO_ADD_INS (bblock, ins);
10044 bblock->has_call_handler = 1;
10045 if (COMPILE_LLVM (cfg)) {
10046 MonoBasicBlock *target_bb;
10049 * Link the finally bblock with the target, since it will
10050 * conceptually branch there.
10051 * FIXME: Have to link the bblock containing the endfinally.
10053 GET_BBLOCK (cfg, target_bb, target);
10054 link_bblock (cfg, tblock, target_bb);
10057 g_list_free (handlers);
10060 MONO_INST_NEW (cfg, ins, OP_BR);
10061 MONO_ADD_INS (bblock, ins);
10062 GET_BBLOCK (cfg, tblock, target);
10063 link_bblock (cfg, bblock, tblock);
10064 ins->inst_target_bb = tblock;
10065 start_new_bblock = 1;
10067 if (*ip == CEE_LEAVE)
10076 * Mono specific opcodes
10078 case MONO_CUSTOM_PREFIX: {
10080 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10084 case CEE_MONO_ICALL: {
10086 MonoJitICallInfo *info;
10088 token = read32 (ip + 2);
10089 func = mono_method_get_wrapper_data (method, token);
10090 info = mono_find_jit_icall_by_addr (func);
10093 CHECK_STACK (info->sig->param_count);
10094 sp -= info->sig->param_count;
10096 ins = mono_emit_jit_icall (cfg, info->func, sp);
10097 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10101 inline_costs += 10 * num_calls++;
10105 case CEE_MONO_LDPTR: {
10108 CHECK_STACK_OVF (1);
10110 token = read32 (ip + 2);
10112 ptr = mono_method_get_wrapper_data (method, token);
10113 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
10114 MonoJitICallInfo *callinfo;
10115 const char *icall_name;
10117 icall_name = method->name + strlen ("__icall_wrapper_");
10118 g_assert (icall_name);
10119 callinfo = mono_find_jit_icall_by_name (icall_name);
10120 g_assert (callinfo);
10122 if (ptr == callinfo->func) {
10123 /* Will be transformed into an AOTCONST later */
10124 EMIT_NEW_PCONST (cfg, ins, ptr);
10130 /* FIXME: Generalize this */
10131 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10132 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10137 EMIT_NEW_PCONST (cfg, ins, ptr);
10140 inline_costs += 10 * num_calls++;
10141 /* Can't embed random pointers into AOT code */
10142 cfg->disable_aot = 1;
10145 case CEE_MONO_ICALL_ADDR: {
10146 MonoMethod *cmethod;
10149 CHECK_STACK_OVF (1);
10151 token = read32 (ip + 2);
10153 cmethod = mono_method_get_wrapper_data (method, token);
10155 if (cfg->compile_aot) {
10156 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10158 ptr = mono_lookup_internal_call (cmethod);
10160 EMIT_NEW_PCONST (cfg, ins, ptr);
10166 case CEE_MONO_VTADDR: {
10167 MonoInst *src_var, *src;
10173 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10174 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10179 case CEE_MONO_NEWOBJ: {
10180 MonoInst *iargs [2];
10182 CHECK_STACK_OVF (1);
10184 token = read32 (ip + 2);
10185 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10186 mono_class_init (klass);
10187 NEW_DOMAINCONST (cfg, iargs [0]);
10188 MONO_ADD_INS (cfg->cbb, iargs [0]);
10189 NEW_CLASSCONST (cfg, iargs [1], klass);
10190 MONO_ADD_INS (cfg->cbb, iargs [1]);
10191 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10193 inline_costs += 10 * num_calls++;
10196 case CEE_MONO_OBJADDR:
10199 MONO_INST_NEW (cfg, ins, OP_MOVE);
10200 ins->dreg = alloc_ireg_mp (cfg);
10201 ins->sreg1 = sp [0]->dreg;
10202 ins->type = STACK_MP;
10203 MONO_ADD_INS (cfg->cbb, ins);
10207 case CEE_MONO_LDNATIVEOBJ:
10209 * Similar to LDOBJ, but instead load the unmanaged
10210 * representation of the vtype to the stack.
10215 token = read32 (ip + 2);
10216 klass = mono_method_get_wrapper_data (method, token);
10217 g_assert (klass->valuetype);
10218 mono_class_init (klass);
10221 MonoInst *src, *dest, *temp;
10224 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10225 temp->backend.is_pinvoke = 1;
10226 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10227 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10229 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10230 dest->type = STACK_VTYPE;
10231 dest->klass = klass;
10237 case CEE_MONO_RETOBJ: {
10239 * Same as RET, but return the native representation of a vtype
10242 g_assert (cfg->ret);
10243 g_assert (mono_method_signature (method)->pinvoke);
10248 token = read32 (ip + 2);
10249 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10251 if (!cfg->vret_addr) {
10252 g_assert (cfg->ret_var_is_local);
10254 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10256 EMIT_NEW_RETLOADA (cfg, ins);
10258 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10260 if (sp != stack_start)
10263 MONO_INST_NEW (cfg, ins, OP_BR);
10264 ins->inst_target_bb = end_bblock;
10265 MONO_ADD_INS (bblock, ins);
10266 link_bblock (cfg, bblock, end_bblock);
10267 start_new_bblock = 1;
10271 case CEE_MONO_CISINST:
10272 case CEE_MONO_CCASTCLASS: {
10277 token = read32 (ip + 2);
10278 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10279 if (ip [1] == CEE_MONO_CISINST)
10280 ins = handle_cisinst (cfg, klass, sp [0]);
10282 ins = handle_ccastclass (cfg, klass, sp [0]);
10288 case CEE_MONO_SAVE_LMF:
10289 case CEE_MONO_RESTORE_LMF:
10290 #ifdef MONO_ARCH_HAVE_LMF_OPS
10291 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10292 MONO_ADD_INS (bblock, ins);
10293 cfg->need_lmf_area = TRUE;
10297 case CEE_MONO_CLASSCONST:
10298 CHECK_STACK_OVF (1);
10300 token = read32 (ip + 2);
10301 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10304 inline_costs += 10 * num_calls++;
10306 case CEE_MONO_NOT_TAKEN:
10307 bblock->out_of_line = TRUE;
10311 CHECK_STACK_OVF (1);
10313 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10314 ins->dreg = alloc_preg (cfg);
10315 ins->inst_offset = (gint32)read32 (ip + 2);
10316 ins->type = STACK_PTR;
10317 MONO_ADD_INS (bblock, ins);
10321 case CEE_MONO_DYN_CALL: {
10322 MonoCallInst *call;
10324 /* It would be easier to call a trampoline, but that would put an
10325 * extra frame on the stack, confusing exception handling. So
10326 * implement it inline using an opcode for now.
10329 if (!cfg->dyn_call_var) {
10330 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10331 /* prevent it from being register allocated */
10332 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10335 /* Has to use a call inst since it local regalloc expects it */
10336 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10337 ins = (MonoInst*)call;
10339 ins->sreg1 = sp [0]->dreg;
10340 ins->sreg2 = sp [1]->dreg;
10341 MONO_ADD_INS (bblock, ins);
10343 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10344 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10348 inline_costs += 10 * num_calls++;
10352 case CEE_MONO_MEMORY_BARRIER: {
10354 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10358 case CEE_MONO_JIT_ATTACH: {
10359 MonoInst *args [16];
10360 MonoInst *ad_ins, *lmf_ins;
10361 MonoBasicBlock *next_bb = NULL;
10363 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10365 EMIT_NEW_PCONST (cfg, ins, NULL);
10366 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10372 ad_ins = mono_get_domain_intrinsic (cfg);
10373 lmf_ins = mono_get_lmf_intrinsic (cfg);
10376 #ifdef MONO_ARCH_HAVE_TLS_GET
10377 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
10378 NEW_BBLOCK (cfg, next_bb);
10380 MONO_ADD_INS (cfg->cbb, ad_ins);
10381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
10382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10384 MONO_ADD_INS (cfg->cbb, lmf_ins);
10385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
10386 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10390 if (cfg->compile_aot) {
10391 /* AOT code is only used in the root domain */
10392 EMIT_NEW_PCONST (cfg, args [0], NULL);
10394 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
10396 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
10397 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10400 MONO_START_BB (cfg, next_bb);
10406 case CEE_MONO_JIT_DETACH: {
10407 MonoInst *args [16];
10409 /* Restore the original domain */
10410 dreg = alloc_ireg (cfg);
10411 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
10412 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
10417 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10423 case CEE_PREFIX1: {
10426 case CEE_ARGLIST: {
10427 /* somewhat similar to LDTOKEN */
10428 MonoInst *addr, *vtvar;
10429 CHECK_STACK_OVF (1);
10430 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10432 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10433 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10435 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10436 ins->type = STACK_VTYPE;
10437 ins->klass = mono_defaults.argumenthandle_class;
10450 * The following transforms:
10451 * CEE_CEQ into OP_CEQ
10452 * CEE_CGT into OP_CGT
10453 * CEE_CGT_UN into OP_CGT_UN
10454 * CEE_CLT into OP_CLT
10455 * CEE_CLT_UN into OP_CLT_UN
10457 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10459 MONO_INST_NEW (cfg, ins, cmp->opcode);
10461 cmp->sreg1 = sp [0]->dreg;
10462 cmp->sreg2 = sp [1]->dreg;
10463 type_from_op (cmp, sp [0], sp [1]);
10465 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10466 cmp->opcode = OP_LCOMPARE;
10467 else if (sp [0]->type == STACK_R8)
10468 cmp->opcode = OP_FCOMPARE;
10470 cmp->opcode = OP_ICOMPARE;
10471 MONO_ADD_INS (bblock, cmp);
10472 ins->type = STACK_I4;
10473 ins->dreg = alloc_dreg (cfg, ins->type);
10474 type_from_op (ins, sp [0], sp [1]);
10476 if (cmp->opcode == OP_FCOMPARE) {
10478 * The backends expect the fceq opcodes to do the
10481 cmp->opcode = OP_NOP;
10482 ins->sreg1 = cmp->sreg1;
10483 ins->sreg2 = cmp->sreg2;
10485 MONO_ADD_INS (bblock, ins);
10491 MonoInst *argconst;
10492 MonoMethod *cil_method;
10494 CHECK_STACK_OVF (1);
10496 n = read32 (ip + 2);
10497 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10498 if (!cmethod || mono_loader_get_last_error ())
10500 mono_class_init (cmethod->klass);
10502 mono_save_token_info (cfg, image, n, cmethod);
10504 if (cfg->generic_sharing_context)
10505 context_used = mono_method_check_context_used (cmethod);
10507 cil_method = cmethod;
10508 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10509 METHOD_ACCESS_FAILURE;
10511 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10512 if (check_linkdemand (cfg, method, cmethod))
10513 INLINE_FAILURE ("linkdemand");
10514 CHECK_CFG_EXCEPTION;
10515 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10516 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10520 * Optimize the common case of ldftn+delegate creation
10522 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10523 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10524 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10525 MonoInst *target_ins;
10526 MonoMethod *invoke;
10527 int invoke_context_used = 0;
10529 invoke = mono_get_delegate_invoke (ctor_method->klass);
10530 if (!invoke || !mono_method_signature (invoke))
10533 if (cfg->generic_sharing_context)
10534 invoke_context_used = mono_method_check_context_used (invoke);
10536 target_ins = sp [-1];
10538 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10539 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10541 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10542 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10543 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10545 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10549 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10550 /* FIXME: SGEN support */
10551 if (invoke_context_used == 0) {
10553 if (cfg->verbose_level > 3)
10554 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10556 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10557 CHECK_CFG_EXCEPTION;
10566 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10567 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10571 inline_costs += 10 * num_calls++;
10574 case CEE_LDVIRTFTN: {
10575 MonoInst *args [2];
10579 n = read32 (ip + 2);
10580 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10581 if (!cmethod || mono_loader_get_last_error ())
10583 mono_class_init (cmethod->klass);
10585 if (cfg->generic_sharing_context)
10586 context_used = mono_method_check_context_used (cmethod);
10588 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10589 if (check_linkdemand (cfg, method, cmethod))
10590 INLINE_FAILURE ("linkdemand");
10591 CHECK_CFG_EXCEPTION;
10592 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10593 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10599 args [1] = emit_get_rgctx_method (cfg, context_used,
10600 cmethod, MONO_RGCTX_INFO_METHOD);
10603 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10605 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10608 inline_costs += 10 * num_calls++;
10612 CHECK_STACK_OVF (1);
10614 n = read16 (ip + 2);
10616 EMIT_NEW_ARGLOAD (cfg, ins, n);
10621 CHECK_STACK_OVF (1);
10623 n = read16 (ip + 2);
10625 NEW_ARGLOADA (cfg, ins, n);
10626 MONO_ADD_INS (cfg->cbb, ins);
10634 n = read16 (ip + 2);
10636 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10638 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10642 CHECK_STACK_OVF (1);
10644 n = read16 (ip + 2);
10646 EMIT_NEW_LOCLOAD (cfg, ins, n);
10651 unsigned char *tmp_ip;
10652 CHECK_STACK_OVF (1);
10654 n = read16 (ip + 2);
10657 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10663 EMIT_NEW_LOCLOADA (cfg, ins, n);
10672 n = read16 (ip + 2);
10674 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10676 emit_stloc_ir (cfg, sp, header, n);
10683 if (sp != stack_start)
10685 if (cfg->method != method)
10687 * Inlining this into a loop in a parent could lead to
10688 * stack overflows which is different behavior than the
10689 * non-inlined case, thus disable inlining in this case.
10691 goto inline_failure;
10693 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10694 ins->dreg = alloc_preg (cfg);
10695 ins->sreg1 = sp [0]->dreg;
10696 ins->type = STACK_PTR;
10697 MONO_ADD_INS (cfg->cbb, ins);
10699 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10701 ins->flags |= MONO_INST_INIT;
10706 case CEE_ENDFILTER: {
10707 MonoExceptionClause *clause, *nearest;
10708 int cc, nearest_num;
10712 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10714 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10715 ins->sreg1 = (*sp)->dreg;
10716 MONO_ADD_INS (bblock, ins);
10717 start_new_bblock = 1;
10722 for (cc = 0; cc < header->num_clauses; ++cc) {
10723 clause = &header->clauses [cc];
10724 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10725 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10726 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10731 g_assert (nearest);
10732 if ((ip - header->code) != nearest->handler_offset)
10737 case CEE_UNALIGNED_:
10738 ins_flag |= MONO_INST_UNALIGNED;
10739 /* FIXME: record alignment? we can assume 1 for now */
10743 case CEE_VOLATILE_:
10744 ins_flag |= MONO_INST_VOLATILE;
10748 ins_flag |= MONO_INST_TAILCALL;
10749 cfg->flags |= MONO_CFG_HAS_TAIL;
10750 /* Can't inline tail calls at this time */
10751 inline_costs += 100000;
10758 token = read32 (ip + 2);
10759 klass = mini_get_class (method, token, generic_context);
10760 CHECK_TYPELOAD (klass);
10761 if (generic_class_is_reference_type (cfg, klass))
10762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10764 mini_emit_initobj (cfg, *sp, NULL, klass);
10768 case CEE_CONSTRAINED_:
10770 token = read32 (ip + 2);
10771 if (method->wrapper_type != MONO_WRAPPER_NONE)
10772 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10774 constrained_call = mono_class_get_full (image, token, generic_context);
10775 CHECK_TYPELOAD (constrained_call);
10779 case CEE_INITBLK: {
10780 MonoInst *iargs [3];
10784 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10785 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10786 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10787 /* emit_memset only works when val == 0 */
10788 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10790 iargs [0] = sp [0];
10791 iargs [1] = sp [1];
10792 iargs [2] = sp [2];
10793 if (ip [1] == CEE_CPBLK) {
10794 MonoMethod *memcpy_method = get_memcpy_method ();
10795 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10797 MonoMethod *memset_method = get_memset_method ();
10798 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10808 ins_flag |= MONO_INST_NOTYPECHECK;
10810 ins_flag |= MONO_INST_NORANGECHECK;
10811 /* we ignore the no-nullcheck for now since we
10812 * really do it explicitly only when doing callvirt->call
10816 case CEE_RETHROW: {
10818 int handler_offset = -1;
10820 for (i = 0; i < header->num_clauses; ++i) {
10821 MonoExceptionClause *clause = &header->clauses [i];
10822 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10823 handler_offset = clause->handler_offset;
10828 bblock->flags |= BB_EXCEPTION_UNSAFE;
10830 g_assert (handler_offset != -1);
10832 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10833 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10834 ins->sreg1 = load->dreg;
10835 MONO_ADD_INS (bblock, ins);
10837 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10838 MONO_ADD_INS (bblock, ins);
10841 link_bblock (cfg, bblock, end_bblock);
10842 start_new_bblock = 1;
10850 CHECK_STACK_OVF (1);
10852 token = read32 (ip + 2);
10853 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10854 MonoType *type = mono_type_create_from_typespec (image, token);
10855 token = mono_type_size (type, &ialign);
10857 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10858 CHECK_TYPELOAD (klass);
10859 mono_class_init (klass);
10860 token = mono_class_value_size (klass, &align);
10862 EMIT_NEW_ICONST (cfg, ins, token);
10867 case CEE_REFANYTYPE: {
10868 MonoInst *src_var, *src;
10874 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10876 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10877 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10878 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10883 case CEE_READONLY_:
10896 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10906 g_warning ("opcode 0x%02x not handled", *ip);
10910 if (start_new_bblock != 1)
10913 bblock->cil_length = ip - bblock->cil_code;
10914 if (bblock->next_bb) {
10915 /* This could already be set because of inlining, #693905 */
10916 MonoBasicBlock *bb = bblock;
10918 while (bb->next_bb)
10920 bb->next_bb = end_bblock;
10922 bblock->next_bb = end_bblock;
10925 if (cfg->method == method && cfg->domainvar) {
10927 MonoInst *get_domain;
10929 cfg->cbb = init_localsbb;
10931 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10932 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10935 get_domain->dreg = alloc_preg (cfg);
10936 MONO_ADD_INS (cfg->cbb, get_domain);
10938 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10939 MONO_ADD_INS (cfg->cbb, store);
10942 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10943 if (cfg->compile_aot)
10944 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10945 mono_get_got_var (cfg);
10948 if (cfg->method == method && cfg->got_var)
10949 mono_emit_load_got_addr (cfg);
10954 cfg->cbb = init_localsbb;
10956 for (i = 0; i < header->num_locals; ++i) {
10957 MonoType *ptype = header->locals [i];
10958 int t = ptype->type;
10959 dreg = cfg->locals [i]->dreg;
10961 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10962 t = mono_class_enum_basetype (ptype->data.klass)->type;
10963 if (ptype->byref) {
10964 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10965 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10966 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10967 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10968 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10969 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10970 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10971 ins->type = STACK_R8;
10972 ins->inst_p0 = (void*)&r8_0;
10973 ins->dreg = alloc_dreg (cfg, STACK_R8);
10974 MONO_ADD_INS (init_localsbb, ins);
10975 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10976 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10977 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10978 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10980 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10985 if (cfg->init_ref_vars && cfg->method == method) {
10986 /* Emit initialization for ref vars */
10987 // FIXME: Avoid duplication initialization for IL locals.
10988 for (i = 0; i < cfg->num_varinfo; ++i) {
10989 MonoInst *ins = cfg->varinfo [i];
10991 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10992 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10997 MonoBasicBlock *bb;
11000 * Make seq points at backward branch targets interruptable.
11002 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11003 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11004 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11007 /* Add a sequence point for method entry/exit events */
11009 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11010 MONO_ADD_INS (init_localsbb, ins);
11011 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11012 MONO_ADD_INS (cfg->bb_exit, ins);
11017 if (cfg->method == method) {
11018 MonoBasicBlock *bb;
11019 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11020 bb->region = mono_find_block_region (cfg, bb->real_offset);
11022 mono_create_spvar_for_region (cfg, bb->region);
11023 if (cfg->verbose_level > 2)
11024 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11028 g_slist_free (class_inits);
11029 dont_inline = g_list_remove (dont_inline, method);
11031 if (inline_costs < 0) {
11034 /* Method is too large */
11035 mname = mono_method_full_name (method, TRUE);
11036 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11037 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11039 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11040 mono_basic_block_free (original_bb);
11044 if ((cfg->verbose_level > 2) && (cfg->method == method))
11045 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11047 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11048 mono_basic_block_free (original_bb);
11049 return inline_costs;
11052 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11059 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11063 set_exception_type_from_invalid_il (cfg, method, ip);
11067 g_slist_free (class_inits);
11068 mono_basic_block_free (original_bb);
11069 dont_inline = g_list_remove (dont_inline, method);
11070 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11075 store_membase_reg_to_store_membase_imm (int opcode)
11078 case OP_STORE_MEMBASE_REG:
11079 return OP_STORE_MEMBASE_IMM;
11080 case OP_STOREI1_MEMBASE_REG:
11081 return OP_STOREI1_MEMBASE_IMM;
11082 case OP_STOREI2_MEMBASE_REG:
11083 return OP_STOREI2_MEMBASE_IMM;
11084 case OP_STOREI4_MEMBASE_REG:
11085 return OP_STOREI4_MEMBASE_IMM;
11086 case OP_STOREI8_MEMBASE_REG:
11087 return OP_STOREI8_MEMBASE_IMM;
11089 g_assert_not_reached ();
11095 #endif /* DISABLE_JIT */
11098 mono_op_to_op_imm (int opcode)
11102 return OP_IADD_IMM;
11104 return OP_ISUB_IMM;
11106 return OP_IDIV_IMM;
11108 return OP_IDIV_UN_IMM;
11110 return OP_IREM_IMM;
11112 return OP_IREM_UN_IMM;
11114 return OP_IMUL_IMM;
11116 return OP_IAND_IMM;
11120 return OP_IXOR_IMM;
11122 return OP_ISHL_IMM;
11124 return OP_ISHR_IMM;
11126 return OP_ISHR_UN_IMM;
11129 return OP_LADD_IMM;
11131 return OP_LSUB_IMM;
11133 return OP_LAND_IMM;
11137 return OP_LXOR_IMM;
11139 return OP_LSHL_IMM;
11141 return OP_LSHR_IMM;
11143 return OP_LSHR_UN_IMM;
11146 return OP_COMPARE_IMM;
11148 return OP_ICOMPARE_IMM;
11150 return OP_LCOMPARE_IMM;
11152 case OP_STORE_MEMBASE_REG:
11153 return OP_STORE_MEMBASE_IMM;
11154 case OP_STOREI1_MEMBASE_REG:
11155 return OP_STOREI1_MEMBASE_IMM;
11156 case OP_STOREI2_MEMBASE_REG:
11157 return OP_STOREI2_MEMBASE_IMM;
11158 case OP_STOREI4_MEMBASE_REG:
11159 return OP_STOREI4_MEMBASE_IMM;
11161 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11163 return OP_X86_PUSH_IMM;
11164 case OP_X86_COMPARE_MEMBASE_REG:
11165 return OP_X86_COMPARE_MEMBASE_IMM;
11167 #if defined(TARGET_AMD64)
11168 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11169 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11171 case OP_VOIDCALL_REG:
11172 return OP_VOIDCALL;
11180 return OP_LOCALLOC_IMM;
11187 ldind_to_load_membase (int opcode)
11191 return OP_LOADI1_MEMBASE;
11193 return OP_LOADU1_MEMBASE;
11195 return OP_LOADI2_MEMBASE;
11197 return OP_LOADU2_MEMBASE;
11199 return OP_LOADI4_MEMBASE;
11201 return OP_LOADU4_MEMBASE;
11203 return OP_LOAD_MEMBASE;
11204 case CEE_LDIND_REF:
11205 return OP_LOAD_MEMBASE;
11207 return OP_LOADI8_MEMBASE;
11209 return OP_LOADR4_MEMBASE;
11211 return OP_LOADR8_MEMBASE;
11213 g_assert_not_reached ();
11220 stind_to_store_membase (int opcode)
11224 return OP_STOREI1_MEMBASE_REG;
11226 return OP_STOREI2_MEMBASE_REG;
11228 return OP_STOREI4_MEMBASE_REG;
11230 case CEE_STIND_REF:
11231 return OP_STORE_MEMBASE_REG;
11233 return OP_STOREI8_MEMBASE_REG;
11235 return OP_STORER4_MEMBASE_REG;
11237 return OP_STORER8_MEMBASE_REG;
11239 g_assert_not_reached ();
11246 mono_load_membase_to_load_mem (int opcode)
11248 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11249 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11251 case OP_LOAD_MEMBASE:
11252 return OP_LOAD_MEM;
11253 case OP_LOADU1_MEMBASE:
11254 return OP_LOADU1_MEM;
11255 case OP_LOADU2_MEMBASE:
11256 return OP_LOADU2_MEM;
11257 case OP_LOADI4_MEMBASE:
11258 return OP_LOADI4_MEM;
11259 case OP_LOADU4_MEMBASE:
11260 return OP_LOADU4_MEM;
11261 #if SIZEOF_REGISTER == 8
11262 case OP_LOADI8_MEMBASE:
11263 return OP_LOADI8_MEM;
11272 op_to_op_dest_membase (int store_opcode, int opcode)
11274 #if defined(TARGET_X86)
11275 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11280 return OP_X86_ADD_MEMBASE_REG;
11282 return OP_X86_SUB_MEMBASE_REG;
11284 return OP_X86_AND_MEMBASE_REG;
11286 return OP_X86_OR_MEMBASE_REG;
11288 return OP_X86_XOR_MEMBASE_REG;
11291 return OP_X86_ADD_MEMBASE_IMM;
11294 return OP_X86_SUB_MEMBASE_IMM;
11297 return OP_X86_AND_MEMBASE_IMM;
11300 return OP_X86_OR_MEMBASE_IMM;
11303 return OP_X86_XOR_MEMBASE_IMM;
11309 #if defined(TARGET_AMD64)
11310 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11315 return OP_X86_ADD_MEMBASE_REG;
11317 return OP_X86_SUB_MEMBASE_REG;
11319 return OP_X86_AND_MEMBASE_REG;
11321 return OP_X86_OR_MEMBASE_REG;
11323 return OP_X86_XOR_MEMBASE_REG;
11325 return OP_X86_ADD_MEMBASE_IMM;
11327 return OP_X86_SUB_MEMBASE_IMM;
11329 return OP_X86_AND_MEMBASE_IMM;
11331 return OP_X86_OR_MEMBASE_IMM;
11333 return OP_X86_XOR_MEMBASE_IMM;
11335 return OP_AMD64_ADD_MEMBASE_REG;
11337 return OP_AMD64_SUB_MEMBASE_REG;
11339 return OP_AMD64_AND_MEMBASE_REG;
11341 return OP_AMD64_OR_MEMBASE_REG;
11343 return OP_AMD64_XOR_MEMBASE_REG;
11346 return OP_AMD64_ADD_MEMBASE_IMM;
11349 return OP_AMD64_SUB_MEMBASE_IMM;
11352 return OP_AMD64_AND_MEMBASE_IMM;
11355 return OP_AMD64_OR_MEMBASE_IMM;
11358 return OP_AMD64_XOR_MEMBASE_IMM;
11368 op_to_op_store_membase (int store_opcode, int opcode)
11370 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11373 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11374 return OP_X86_SETEQ_MEMBASE;
11376 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11377 return OP_X86_SETNE_MEMBASE;
11385 op_to_op_src1_membase (int load_opcode, int opcode)
11388 /* FIXME: This has sign extension issues */
11390 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11391 return OP_X86_COMPARE_MEMBASE8_IMM;
11394 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11399 return OP_X86_PUSH_MEMBASE;
11400 case OP_COMPARE_IMM:
11401 case OP_ICOMPARE_IMM:
11402 return OP_X86_COMPARE_MEMBASE_IMM;
11405 return OP_X86_COMPARE_MEMBASE_REG;
11409 #ifdef TARGET_AMD64
11410 /* FIXME: This has sign extension issues */
11412 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11413 return OP_X86_COMPARE_MEMBASE8_IMM;
11418 #ifdef __mono_ilp32__
11419 if (load_opcode == OP_LOADI8_MEMBASE)
11421 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11423 return OP_X86_PUSH_MEMBASE;
11425 /* FIXME: This only works for 32 bit immediates
11426 case OP_COMPARE_IMM:
11427 case OP_LCOMPARE_IMM:
11428 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11429 return OP_AMD64_COMPARE_MEMBASE_IMM;
11431 case OP_ICOMPARE_IMM:
11432 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11433 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11437 #ifdef __mono_ilp32__
11438 if (load_opcode == OP_LOAD_MEMBASE)
11439 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11440 if (load_opcode == OP_LOADI8_MEMBASE)
11442 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11444 return OP_AMD64_COMPARE_MEMBASE_REG;
11447 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11448 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11457 op_to_op_src2_membase (int load_opcode, int opcode)
11460 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11466 return OP_X86_COMPARE_REG_MEMBASE;
11468 return OP_X86_ADD_REG_MEMBASE;
11470 return OP_X86_SUB_REG_MEMBASE;
11472 return OP_X86_AND_REG_MEMBASE;
11474 return OP_X86_OR_REG_MEMBASE;
11476 return OP_X86_XOR_REG_MEMBASE;
11480 #ifdef TARGET_AMD64
11481 #ifdef __mono_ilp32__
11482 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11484 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11488 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11490 return OP_X86_ADD_REG_MEMBASE;
11492 return OP_X86_SUB_REG_MEMBASE;
11494 return OP_X86_AND_REG_MEMBASE;
11496 return OP_X86_OR_REG_MEMBASE;
11498 return OP_X86_XOR_REG_MEMBASE;
11500 #ifdef __mono_ilp32__
11501 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11503 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11508 return OP_AMD64_COMPARE_REG_MEMBASE;
11510 return OP_AMD64_ADD_REG_MEMBASE;
11512 return OP_AMD64_SUB_REG_MEMBASE;
11514 return OP_AMD64_AND_REG_MEMBASE;
11516 return OP_AMD64_OR_REG_MEMBASE;
11518 return OP_AMD64_XOR_REG_MEMBASE;
11527 mono_op_to_op_imm_noemul (int opcode)
11530 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11536 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11543 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11548 return mono_op_to_op_imm (opcode);
11552 #ifndef DISABLE_JIT
11555 * mono_handle_global_vregs:
11557 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11561 mono_handle_global_vregs (MonoCompile *cfg)
11563 gint32 *vreg_to_bb;
11564 MonoBasicBlock *bb;
11567 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11569 #ifdef MONO_ARCH_SIMD_INTRINSICS
11570 if (cfg->uses_simd_intrinsics)
11571 mono_simd_simplify_indirection (cfg);
11574 /* Find local vregs used in more than one bb */
11575 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11576 MonoInst *ins = bb->code;
11577 int block_num = bb->block_num;
11579 if (cfg->verbose_level > 2)
11580 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11583 for (; ins; ins = ins->next) {
11584 const char *spec = INS_INFO (ins->opcode);
11585 int regtype = 0, regindex;
11588 if (G_UNLIKELY (cfg->verbose_level > 2))
11589 mono_print_ins (ins);
11591 g_assert (ins->opcode >= MONO_CEE_LAST);
11593 for (regindex = 0; regindex < 4; regindex ++) {
11596 if (regindex == 0) {
11597 regtype = spec [MONO_INST_DEST];
11598 if (regtype == ' ')
11601 } else if (regindex == 1) {
11602 regtype = spec [MONO_INST_SRC1];
11603 if (regtype == ' ')
11606 } else if (regindex == 2) {
11607 regtype = spec [MONO_INST_SRC2];
11608 if (regtype == ' ')
11611 } else if (regindex == 3) {
11612 regtype = spec [MONO_INST_SRC3];
11613 if (regtype == ' ')
11618 #if SIZEOF_REGISTER == 4
11619 /* In the LLVM case, the long opcodes are not decomposed */
11620 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11622 * Since some instructions reference the original long vreg,
11623 * and some reference the two component vregs, it is quite hard
11624 * to determine when it needs to be global. So be conservative.
11626 if (!get_vreg_to_inst (cfg, vreg)) {
11627 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11629 if (cfg->verbose_level > 2)
11630 printf ("LONG VREG R%d made global.\n", vreg);
11634 * Make the component vregs volatile since the optimizations can
11635 * get confused otherwise.
11637 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11638 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11642 g_assert (vreg != -1);
11644 prev_bb = vreg_to_bb [vreg];
11645 if (prev_bb == 0) {
11646 /* 0 is a valid block num */
11647 vreg_to_bb [vreg] = block_num + 1;
11648 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11649 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11652 if (!get_vreg_to_inst (cfg, vreg)) {
11653 if (G_UNLIKELY (cfg->verbose_level > 2))
11654 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11658 if (vreg_is_ref (cfg, vreg))
11659 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11661 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11664 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11667 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11670 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11673 g_assert_not_reached ();
11677 /* Flag as having been used in more than one bb */
11678 vreg_to_bb [vreg] = -1;
11684 /* If a variable is used in only one bblock, convert it into a local vreg */
11685 for (i = 0; i < cfg->num_varinfo; i++) {
11686 MonoInst *var = cfg->varinfo [i];
11687 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11689 switch (var->type) {
11695 #if SIZEOF_REGISTER == 8
11698 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11699 /* Enabling this screws up the fp stack on x86 */
11702 /* Arguments are implicitly global */
11703 /* Putting R4 vars into registers doesn't work currently */
11704 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11706 * Make that the variable's liveness interval doesn't contain a call, since
11707 * that would cause the lvreg to be spilled, making the whole optimization
11710 /* This is too slow for JIT compilation */
11712 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11714 int def_index, call_index, ins_index;
11715 gboolean spilled = FALSE;
11720 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11721 const char *spec = INS_INFO (ins->opcode);
11723 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11724 def_index = ins_index;
11726 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11727 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11728 if (call_index > def_index) {
11734 if (MONO_IS_CALL (ins))
11735 call_index = ins_index;
11745 if (G_UNLIKELY (cfg->verbose_level > 2))
11746 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11747 var->flags |= MONO_INST_IS_DEAD;
11748 cfg->vreg_to_inst [var->dreg] = NULL;
11755 * Compress the varinfo and vars tables so the liveness computation is faster and
11756 * takes up less space.
11759 for (i = 0; i < cfg->num_varinfo; ++i) {
11760 MonoInst *var = cfg->varinfo [i];
11761 if (pos < i && cfg->locals_start == i)
11762 cfg->locals_start = pos;
11763 if (!(var->flags & MONO_INST_IS_DEAD)) {
11765 cfg->varinfo [pos] = cfg->varinfo [i];
11766 cfg->varinfo [pos]->inst_c0 = pos;
11767 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11768 cfg->vars [pos].idx = pos;
11769 #if SIZEOF_REGISTER == 4
11770 if (cfg->varinfo [pos]->type == STACK_I8) {
11771 /* Modify the two component vars too */
11774 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11775 var1->inst_c0 = pos;
11776 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11777 var1->inst_c0 = pos;
11784 cfg->num_varinfo = pos;
11785 if (cfg->locals_start > cfg->num_varinfo)
11786 cfg->locals_start = cfg->num_varinfo;
11790 * mono_spill_global_vars:
11792 * Generate spill code for variables which are not allocated to registers,
11793 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11794 * code is generated which could be optimized by the local optimization passes.
11797 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11799 MonoBasicBlock *bb;
11801 int orig_next_vreg;
11802 guint32 *vreg_to_lvreg;
11804 guint32 i, lvregs_len;
11805 gboolean dest_has_lvreg = FALSE;
11806 guint32 stacktypes [128];
11807 MonoInst **live_range_start, **live_range_end;
11808 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11810 *need_local_opts = FALSE;
11812 memset (spec2, 0, sizeof (spec2));
11814 /* FIXME: Move this function to mini.c */
11815 stacktypes ['i'] = STACK_PTR;
11816 stacktypes ['l'] = STACK_I8;
11817 stacktypes ['f'] = STACK_R8;
11818 #ifdef MONO_ARCH_SIMD_INTRINSICS
11819 stacktypes ['x'] = STACK_VTYPE;
11822 #if SIZEOF_REGISTER == 4
11823 /* Create MonoInsts for longs */
11824 for (i = 0; i < cfg->num_varinfo; i++) {
11825 MonoInst *ins = cfg->varinfo [i];
11827 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11828 switch (ins->type) {
11833 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11836 g_assert (ins->opcode == OP_REGOFFSET);
11838 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11840 tree->opcode = OP_REGOFFSET;
11841 tree->inst_basereg = ins->inst_basereg;
11842 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11844 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11846 tree->opcode = OP_REGOFFSET;
11847 tree->inst_basereg = ins->inst_basereg;
11848 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11858 if (cfg->compute_gc_maps) {
11859 /* registers need liveness info even for !non refs */
11860 for (i = 0; i < cfg->num_varinfo; i++) {
11861 MonoInst *ins = cfg->varinfo [i];
11863 if (ins->opcode == OP_REGVAR)
11864 ins->flags |= MONO_INST_GC_TRACK;
11868 /* FIXME: widening and truncation */
11871 * As an optimization, when a variable allocated to the stack is first loaded into
11872 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11873 * the variable again.
11875 orig_next_vreg = cfg->next_vreg;
11876 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11877 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11881 * These arrays contain the first and last instructions accessing a given
11883 * Since we emit bblocks in the same order we process them here, and we
11884 * don't split live ranges, these will precisely describe the live range of
11885 * the variable, i.e. the instruction range where a valid value can be found
11886 * in the variables location.
11887 * The live range is computed using the liveness info computed by the liveness pass.
11888 * We can't use vmv->range, since that is an abstract live range, and we need
11889 * one which is instruction precise.
11890 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11892 /* FIXME: Only do this if debugging info is requested */
11893 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11894 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11895 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11896 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11898 /* Add spill loads/stores */
11899 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11902 if (cfg->verbose_level > 2)
11903 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11905 /* Clear vreg_to_lvreg array */
11906 for (i = 0; i < lvregs_len; i++)
11907 vreg_to_lvreg [lvregs [i]] = 0;
11911 MONO_BB_FOR_EACH_INS (bb, ins) {
11912 const char *spec = INS_INFO (ins->opcode);
11913 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11914 gboolean store, no_lvreg;
11915 int sregs [MONO_MAX_SRC_REGS];
11917 if (G_UNLIKELY (cfg->verbose_level > 2))
11918 mono_print_ins (ins);
11920 if (ins->opcode == OP_NOP)
11924 * We handle LDADDR here as well, since it can only be decomposed
11925 * when variable addresses are known.
11927 if (ins->opcode == OP_LDADDR) {
11928 MonoInst *var = ins->inst_p0;
11930 if (var->opcode == OP_VTARG_ADDR) {
11931 /* Happens on SPARC/S390 where vtypes are passed by reference */
11932 MonoInst *vtaddr = var->inst_left;
11933 if (vtaddr->opcode == OP_REGVAR) {
11934 ins->opcode = OP_MOVE;
11935 ins->sreg1 = vtaddr->dreg;
11937 else if (var->inst_left->opcode == OP_REGOFFSET) {
11938 ins->opcode = OP_LOAD_MEMBASE;
11939 ins->inst_basereg = vtaddr->inst_basereg;
11940 ins->inst_offset = vtaddr->inst_offset;
11944 g_assert (var->opcode == OP_REGOFFSET);
11946 ins->opcode = OP_ADD_IMM;
11947 ins->sreg1 = var->inst_basereg;
11948 ins->inst_imm = var->inst_offset;
11951 *need_local_opts = TRUE;
11952 spec = INS_INFO (ins->opcode);
11955 if (ins->opcode < MONO_CEE_LAST) {
11956 mono_print_ins (ins);
11957 g_assert_not_reached ();
11961 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11965 if (MONO_IS_STORE_MEMBASE (ins)) {
11966 tmp_reg = ins->dreg;
11967 ins->dreg = ins->sreg2;
11968 ins->sreg2 = tmp_reg;
11971 spec2 [MONO_INST_DEST] = ' ';
11972 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11973 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11974 spec2 [MONO_INST_SRC3] = ' ';
11976 } else if (MONO_IS_STORE_MEMINDEX (ins))
11977 g_assert_not_reached ();
11982 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11983 printf ("\t %.3s %d", spec, ins->dreg);
11984 num_sregs = mono_inst_get_src_registers (ins, sregs);
11985 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11986 printf (" %d", sregs [srcindex]);
11993 regtype = spec [MONO_INST_DEST];
11994 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11997 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11998 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11999 MonoInst *store_ins;
12001 MonoInst *def_ins = ins;
12002 int dreg = ins->dreg; /* The original vreg */
12004 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12006 if (var->opcode == OP_REGVAR) {
12007 ins->dreg = var->dreg;
12008 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12010 * Instead of emitting a load+store, use a _membase opcode.
12012 g_assert (var->opcode == OP_REGOFFSET);
12013 if (ins->opcode == OP_MOVE) {
12017 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12018 ins->inst_basereg = var->inst_basereg;
12019 ins->inst_offset = var->inst_offset;
12022 spec = INS_INFO (ins->opcode);
12026 g_assert (var->opcode == OP_REGOFFSET);
12028 prev_dreg = ins->dreg;
12030 /* Invalidate any previous lvreg for this vreg */
12031 vreg_to_lvreg [ins->dreg] = 0;
12035 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12037 store_opcode = OP_STOREI8_MEMBASE_REG;
12040 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12042 if (regtype == 'l') {
12043 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12044 mono_bblock_insert_after_ins (bb, ins, store_ins);
12045 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12046 mono_bblock_insert_after_ins (bb, ins, store_ins);
12047 def_ins = store_ins;
12050 g_assert (store_opcode != OP_STOREV_MEMBASE);
12052 /* Try to fuse the store into the instruction itself */
12053 /* FIXME: Add more instructions */
12054 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12055 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12056 ins->inst_imm = ins->inst_c0;
12057 ins->inst_destbasereg = var->inst_basereg;
12058 ins->inst_offset = var->inst_offset;
12059 spec = INS_INFO (ins->opcode);
12060 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12061 ins->opcode = store_opcode;
12062 ins->inst_destbasereg = var->inst_basereg;
12063 ins->inst_offset = var->inst_offset;
12067 tmp_reg = ins->dreg;
12068 ins->dreg = ins->sreg2;
12069 ins->sreg2 = tmp_reg;
12072 spec2 [MONO_INST_DEST] = ' ';
12073 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12074 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12075 spec2 [MONO_INST_SRC3] = ' ';
12077 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12078 // FIXME: The backends expect the base reg to be in inst_basereg
12079 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12081 ins->inst_basereg = var->inst_basereg;
12082 ins->inst_offset = var->inst_offset;
12083 spec = INS_INFO (ins->opcode);
12085 /* printf ("INS: "); mono_print_ins (ins); */
12086 /* Create a store instruction */
12087 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12089 /* Insert it after the instruction */
12090 mono_bblock_insert_after_ins (bb, ins, store_ins);
12092 def_ins = store_ins;
12095 * We can't assign ins->dreg to var->dreg here, since the
12096 * sregs could use it. So set a flag, and do it after
12099 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12100 dest_has_lvreg = TRUE;
12105 if (def_ins && !live_range_start [dreg]) {
12106 live_range_start [dreg] = def_ins;
12107 live_range_start_bb [dreg] = bb;
12110 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12113 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12114 tmp->inst_c1 = dreg;
12115 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12122 num_sregs = mono_inst_get_src_registers (ins, sregs);
12123 for (srcindex = 0; srcindex < 3; ++srcindex) {
12124 regtype = spec [MONO_INST_SRC1 + srcindex];
12125 sreg = sregs [srcindex];
12127 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12128 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12129 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12130 MonoInst *use_ins = ins;
12131 MonoInst *load_ins;
12132 guint32 load_opcode;
12134 if (var->opcode == OP_REGVAR) {
12135 sregs [srcindex] = var->dreg;
12136 //mono_inst_set_src_registers (ins, sregs);
12137 live_range_end [sreg] = use_ins;
12138 live_range_end_bb [sreg] = bb;
12140 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12143 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12144 /* var->dreg is a hreg */
12145 tmp->inst_c1 = sreg;
12146 mono_bblock_insert_after_ins (bb, ins, tmp);
12152 g_assert (var->opcode == OP_REGOFFSET);
12154 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12156 g_assert (load_opcode != OP_LOADV_MEMBASE);
12158 if (vreg_to_lvreg [sreg]) {
12159 g_assert (vreg_to_lvreg [sreg] != -1);
12161 /* The variable is already loaded to an lvreg */
12162 if (G_UNLIKELY (cfg->verbose_level > 2))
12163 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12164 sregs [srcindex] = vreg_to_lvreg [sreg];
12165 //mono_inst_set_src_registers (ins, sregs);
12169 /* Try to fuse the load into the instruction */
12170 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
12171 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
12172 sregs [0] = var->inst_basereg;
12173 //mono_inst_set_src_registers (ins, sregs);
12174 ins->inst_offset = var->inst_offset;
12175 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
12176 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
12177 sregs [1] = var->inst_basereg;
12178 //mono_inst_set_src_registers (ins, sregs);
12179 ins->inst_offset = var->inst_offset;
12181 if (MONO_IS_REAL_MOVE (ins)) {
12182 ins->opcode = OP_NOP;
12185 //printf ("%d ", srcindex); mono_print_ins (ins);
12187 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12189 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12190 if (var->dreg == prev_dreg) {
12192 * sreg refers to the value loaded by the load
12193 * emitted below, but we need to use ins->dreg
12194 * since it refers to the store emitted earlier.
12198 g_assert (sreg != -1);
12199 vreg_to_lvreg [var->dreg] = sreg;
12200 g_assert (lvregs_len < 1024);
12201 lvregs [lvregs_len ++] = var->dreg;
12205 sregs [srcindex] = sreg;
12206 //mono_inst_set_src_registers (ins, sregs);
12208 if (regtype == 'l') {
12209 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12210 mono_bblock_insert_before_ins (bb, ins, load_ins);
12211 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12212 mono_bblock_insert_before_ins (bb, ins, load_ins);
12213 use_ins = load_ins;
12216 #if SIZEOF_REGISTER == 4
12217 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12219 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12220 mono_bblock_insert_before_ins (bb, ins, load_ins);
12221 use_ins = load_ins;
12225 if (var->dreg < orig_next_vreg) {
12226 live_range_end [var->dreg] = use_ins;
12227 live_range_end_bb [var->dreg] = bb;
12230 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12233 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12234 tmp->inst_c1 = var->dreg;
12235 mono_bblock_insert_after_ins (bb, ins, tmp);
12239 mono_inst_set_src_registers (ins, sregs);
12241 if (dest_has_lvreg) {
12242 g_assert (ins->dreg != -1);
12243 vreg_to_lvreg [prev_dreg] = ins->dreg;
12244 g_assert (lvregs_len < 1024);
12245 lvregs [lvregs_len ++] = prev_dreg;
12246 dest_has_lvreg = FALSE;
12250 tmp_reg = ins->dreg;
12251 ins->dreg = ins->sreg2;
12252 ins->sreg2 = tmp_reg;
12255 if (MONO_IS_CALL (ins)) {
12256 /* Clear vreg_to_lvreg array */
12257 for (i = 0; i < lvregs_len; i++)
12258 vreg_to_lvreg [lvregs [i]] = 0;
12260 } else if (ins->opcode == OP_NOP) {
12262 MONO_INST_NULLIFY_SREGS (ins);
12265 if (cfg->verbose_level > 2)
12266 mono_print_ins_index (1, ins);
12269 /* Extend the live range based on the liveness info */
12270 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12271 for (i = 0; i < cfg->num_varinfo; i ++) {
12272 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12274 if (vreg_is_volatile (cfg, vi->vreg))
12275 /* The liveness info is incomplete */
12278 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12279 /* Live from at least the first ins of this bb */
12280 live_range_start [vi->vreg] = bb->code;
12281 live_range_start_bb [vi->vreg] = bb;
12284 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12285 /* Live at least until the last ins of this bb */
12286 live_range_end [vi->vreg] = bb->last_ins;
12287 live_range_end_bb [vi->vreg] = bb;
12293 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12295 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12296 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12298 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12299 for (i = 0; i < cfg->num_varinfo; ++i) {
12300 int vreg = MONO_VARINFO (cfg, i)->vreg;
12303 if (live_range_start [vreg]) {
12304 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12306 ins->inst_c1 = vreg;
12307 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12309 if (live_range_end [vreg]) {
12310 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12312 ins->inst_c1 = vreg;
12313 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12314 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12316 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12322 g_free (live_range_start);
12323 g_free (live_range_end);
12324 g_free (live_range_start_bb);
12325 g_free (live_range_end_bb);
12330 * - use 'iadd' instead of 'int_add'
12331 * - handling ovf opcodes: decompose in method_to_ir.
12332 * - unify iregs/fregs
12333 * -> partly done, the missing parts are:
12334 * - a more complete unification would involve unifying the hregs as well, so
12335 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12336 * would no longer map to the machine hregs, so the code generators would need to
12337 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12338 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12339 * fp/non-fp branches speeds it up by about 15%.
12340 * - use sext/zext opcodes instead of shifts
12342 * - get rid of TEMPLOADs if possible and use vregs instead
12343 * - clean up usage of OP_P/OP_ opcodes
12344 * - cleanup usage of DUMMY_USE
12345 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12347 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12348 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12349 * - make sure handle_stack_args () is called before the branch is emitted
12350 * - when the new IR is done, get rid of all unused stuff
12351 * - COMPARE/BEQ as separate instructions or unify them ?
12352 * - keeping them separate allows specialized compare instructions like
12353 * compare_imm, compare_membase
12354 * - most back ends unify fp compare+branch, fp compare+ceq
12355 * - integrate mono_save_args into inline_method
12356 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12357 * - handle long shift opts on 32 bit platforms somehow: they require
12358 * 3 sregs (2 for arg1 and 1 for arg2)
12359 * - make byref a 'normal' type.
12360 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12361 * variable if needed.
12362 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12363 * like inline_method.
12364 * - remove inlining restrictions
12365 * - fix LNEG and enable cfold of INEG
12366 * - generalize x86 optimizations like ldelema as a peephole optimization
12367 * - add store_mem_imm for amd64
12368 * - optimize the loading of the interruption flag in the managed->native wrappers
12369 * - avoid special handling of OP_NOP in passes
12370 * - move code inserting instructions into one function/macro.
12371 * - try a coalescing phase after liveness analysis
12372 * - add float -> vreg conversion + local optimizations on !x86
12373 * - figure out how to handle decomposed branches during optimizations, ie.
12374 * compare+branch, op_jump_table+op_br etc.
12375 * - promote RuntimeXHandles to vregs
12376 * - vtype cleanups:
12377 * - add a NEW_VARLOADA_VREG macro
12378 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12379 * accessing vtype fields.
12380 * - get rid of I8CONST on 64 bit platforms
12381 * - dealing with the increase in code size due to branches created during opcode
12383 * - use extended basic blocks
12384 * - all parts of the JIT
12385 * - handle_global_vregs () && local regalloc
12386 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12387 * - sources of increase in code size:
12390 * - isinst and castclass
12391 * - lvregs not allocated to global registers even if used multiple times
12392 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12394 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12395 * - add all micro optimizations from the old JIT
12396 * - put tree optimizations into the deadce pass
12397 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12398 * specific function.
12399 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12400 * fcompare + branchCC.
12401 * - create a helper function for allocating a stack slot, taking into account
12402 * MONO_CFG_HAS_SPILLUP.
12404 * - merge the ia64 switch changes.
12405 * - optimize mono_regstate2_alloc_int/float.
12406 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12407 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12408 * parts of the tree could be separated by other instructions, killing the tree
12409 * arguments, or stores killing loads etc. Also, should we fold loads into other
12410 * instructions if the result of the load is used multiple times ?
12411 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12412 * - LAST MERGE: 108395.
12413 * - when returning vtypes in registers, generate IR and append it to the end of the
12414 * last bb instead of doing it in the epilog.
12415 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12423 - When to decompose opcodes:
12424 - earlier: this makes some optimizations hard to implement, since the low level IR
12425 no longer contains the neccessary information. But it is easier to do.
12426 - later: harder to implement, enables more optimizations.
12427 - Branches inside bblocks:
12428 - created when decomposing complex opcodes.
12429 - branches to another bblock: harmless, but not tracked by the branch
12430 optimizations, so need to branch to a label at the start of the bblock.
12431 - branches to inside the same bblock: very problematic, trips up the local
12432 reg allocator. Can be fixed by spitting the current bblock, but that is a
12433 complex operation, since some local vregs can become global vregs etc.
12434 - Local/global vregs:
12435 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12436 local register allocator.
12437 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12438 structure, created by mono_create_var (). Assigned to hregs or the stack by
12439 the global register allocator.
12440 - When to do optimizations like alu->alu_imm:
12441 - earlier -> saves work later on since the IR will be smaller/simpler
12442 - later -> can work on more instructions
12443 - Handling of valuetypes:
12444 - When a vtype is pushed on the stack, a new temporary is created, an
12445 instruction computing its address (LDADDR) is emitted and pushed on
12446 the stack. Need to optimize cases when the vtype is used immediately as in
12447 argument passing, stloc etc.
12448 - Instead of the to_end stuff in the old JIT, simply call the function handling
12449 the values on the stack before emitting the last instruction of the bb.
12452 #endif /* DISABLE_JIT */