2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/metadata/debug-mono-symfile.h>
56 #include <mono/utils/mono-compiler.h>
57 #include <mono/utils/mono-memory-model.h>
58 #include <mono/metadata/mono-basic-block.h>
65 #include "jit-icalls.h"
67 #include "debugger-agent.h"
69 #define BRANCH_COST 10
70 #define INLINE_LENGTH_LIMIT 20
71 #define INLINE_FAILURE do {\
72 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
75 #define CHECK_CFG_EXCEPTION do {\
76 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
79 #define METHOD_ACCESS_FAILURE do { \
80 char *method_fname = mono_method_full_name (method, TRUE); \
81 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
82 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
83 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
84 g_free (method_fname); \
85 g_free (cil_method_fname); \
86 goto exception_exit; \
88 #define FIELD_ACCESS_FAILURE do { \
89 char *method_fname = mono_method_full_name (method, TRUE); \
90 char *field_fname = mono_field_full_name (field); \
91 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
92 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
93 g_free (method_fname); \
94 g_free (field_fname); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 if (cfg->generic_sharing_context) { \
99 if (cfg->verbose_level > 2) \
100 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
101 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
102 goto exception_exit; \
105 #define OUT_OF_MEMORY_FAILURE do { \
106 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
107 goto exception_exit; \
109 /* Determine whenever 'ins' represents a load of the 'this' argument */
110 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
112 static int ldind_to_load_membase (int opcode);
113 static int stind_to_store_membase (int opcode);
115 int mono_op_to_op_imm (int opcode);
116 int mono_op_to_op_imm_noemul (int opcode);
118 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
119 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
120 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
122 /* helper methods signatures */
123 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_domain_get = NULL;
125 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
126 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
127 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
128 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
129 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
132 * Instruction metadata
140 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
141 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
147 #if SIZEOF_REGISTER == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
160 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
161 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
163 * This should contain the index of the last sreg + 1. This is not the same
164 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
166 const gint8 ins_sreg_counts[] = {
167 #include "mini-ops.h"
172 #define MONO_INIT_VARINFO(vi,id) do { \
173 (vi)->range.first_use.pos.bid = 0xffff; \
179 mono_inst_set_src_registers (MonoInst *ins, int *regs)
181 ins->sreg1 = regs [0];
182 ins->sreg2 = regs [1];
183 ins->sreg3 = regs [2];
187 mono_alloc_ireg (MonoCompile *cfg)
189 return alloc_ireg (cfg);
193 mono_alloc_freg (MonoCompile *cfg)
195 return alloc_freg (cfg);
199 mono_alloc_preg (MonoCompile *cfg)
201 return alloc_preg (cfg);
205 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
207 return alloc_dreg (cfg, stack_type);
211 * mono_alloc_ireg_ref:
213 * Allocate an IREG, and mark it as holding a GC ref.
216 mono_alloc_ireg_ref (MonoCompile *cfg)
218 return alloc_ireg_ref (cfg);
222 * mono_alloc_ireg_mp:
224 * Allocate an IREG, and mark it as holding a managed pointer.
227 mono_alloc_ireg_mp (MonoCompile *cfg)
229 return alloc_ireg_mp (cfg);
233 * mono_alloc_ireg_copy:
235 * Allocate an IREG with the same GC type as VREG.
238 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
240 if (vreg_is_ref (cfg, vreg))
241 return alloc_ireg_ref (cfg);
242 else if (vreg_is_mp (cfg, vreg))
243 return alloc_ireg_mp (cfg);
245 return alloc_ireg (cfg);
249 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
255 switch (type->type) {
258 case MONO_TYPE_BOOLEAN:
270 case MONO_TYPE_FNPTR:
272 case MONO_TYPE_CLASS:
273 case MONO_TYPE_STRING:
274 case MONO_TYPE_OBJECT:
275 case MONO_TYPE_SZARRAY:
276 case MONO_TYPE_ARRAY:
280 #if SIZEOF_REGISTER == 8
289 case MONO_TYPE_VALUETYPE:
290 if (type->data.klass->enumtype) {
291 type = mono_class_enum_basetype (type->data.klass);
294 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
297 case MONO_TYPE_TYPEDBYREF:
299 case MONO_TYPE_GENERICINST:
300 type = &type->data.generic_class->container_class->byval_arg;
304 g_assert (cfg->generic_sharing_context);
307 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
313 mono_print_bb (MonoBasicBlock *bb, const char *msg)
318 printf ("\n%s %d: [IN: ", msg, bb->block_num);
319 for (i = 0; i < bb->in_count; ++i)
320 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
322 for (i = 0; i < bb->out_count; ++i)
323 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
325 for (tree = bb->code; tree; tree = tree->next)
326 mono_print_ins_index (-1, tree);
330 mono_create_helper_signatures (void)
332 helper_sig_domain_get = mono_create_icall_signature ("ptr");
333 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
334 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
335 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
336 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
337 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
338 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
342 * Can't put this at the beginning, since other files reference stuff from this
347 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
349 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
351 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
353 #define GET_BBLOCK(cfg,tblock,ip) do { \
354 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
356 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
357 NEW_BBLOCK (cfg, (tblock)); \
358 (tblock)->cil_code = (ip); \
359 ADD_BBLOCK (cfg, (tblock)); \
363 #if defined(TARGET_X86) || defined(TARGET_AMD64)
364 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
365 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
366 (dest)->dreg = alloc_ireg_mp ((cfg)); \
367 (dest)->sreg1 = (sr1); \
368 (dest)->sreg2 = (sr2); \
369 (dest)->inst_imm = (imm); \
370 (dest)->backend.shift_amount = (shift); \
371 MONO_ADD_INS ((cfg)->cbb, (dest)); \
375 #if SIZEOF_REGISTER == 8
376 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
377 /* FIXME: Need to add many more cases */ \
378 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
380 int dr = alloc_preg (cfg); \
381 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
382 (ins)->sreg2 = widen->dreg; \
386 #define ADD_WIDEN_OP(ins, arg1, arg2)
389 #define ADD_BINOP(op) do { \
390 MONO_INST_NEW (cfg, ins, (op)); \
392 ins->sreg1 = sp [0]->dreg; \
393 ins->sreg2 = sp [1]->dreg; \
394 type_from_op (ins, sp [0], sp [1]); \
396 /* Have to insert a widening op */ \
397 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
398 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
399 MONO_ADD_INS ((cfg)->cbb, (ins)); \
400 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
403 #define ADD_UNOP(op) do { \
404 MONO_INST_NEW (cfg, ins, (op)); \
406 ins->sreg1 = sp [0]->dreg; \
407 type_from_op (ins, sp [0], NULL); \
409 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
410 MONO_ADD_INS ((cfg)->cbb, (ins)); \
411 *sp++ = mono_decompose_opcode (cfg, ins); \
414 #define ADD_BINCOND(next_block) do { \
417 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
418 cmp->sreg1 = sp [0]->dreg; \
419 cmp->sreg2 = sp [1]->dreg; \
420 type_from_op (cmp, sp [0], sp [1]); \
422 type_from_op (ins, sp [0], sp [1]); \
423 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
424 GET_BBLOCK (cfg, tblock, target); \
425 link_bblock (cfg, bblock, tblock); \
426 ins->inst_true_bb = tblock; \
427 if ((next_block)) { \
428 link_bblock (cfg, bblock, (next_block)); \
429 ins->inst_false_bb = (next_block); \
430 start_new_bblock = 1; \
432 GET_BBLOCK (cfg, tblock, ip); \
433 link_bblock (cfg, bblock, tblock); \
434 ins->inst_false_bb = tblock; \
435 start_new_bblock = 2; \
437 if (sp != stack_start) { \
438 handle_stack_args (cfg, stack_start, sp - stack_start); \
439 CHECK_UNVERIFIABLE (cfg); \
441 MONO_ADD_INS (bblock, cmp); \
442 MONO_ADD_INS (bblock, ins); \
446 * link_bblock: Links two basic blocks
448 * links two basic blocks in the control flow graph, the 'from'
449 * argument is the starting block and the 'to' argument is the block
450 * the control flow ends to after 'from'.
453 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
455 MonoBasicBlock **newa;
459 if (from->cil_code) {
461 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
463 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
466 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
468 printf ("edge from entry to exit\n");
473 for (i = 0; i < from->out_count; ++i) {
474 if (to == from->out_bb [i]) {
480 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
481 for (i = 0; i < from->out_count; ++i) {
482 newa [i] = from->out_bb [i];
490 for (i = 0; i < to->in_count; ++i) {
491 if (from == to->in_bb [i]) {
497 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
498 for (i = 0; i < to->in_count; ++i) {
499 newa [i] = to->in_bb [i];
508 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
510 link_bblock (cfg, from, to);
514 * mono_find_block_region:
516 * We mark each basic block with a region ID. We use that to avoid BB
517 * optimizations when blocks are in different regions.
520 * A region token that encodes where this region is, and information
521 * about the clause owner for this block.
523 * The region encodes the try/catch/filter clause that owns this block
524 * as well as the type. -1 is a special value that represents a block
525 * that is in none of try/catch/filter.
528 mono_find_block_region (MonoCompile *cfg, int offset)
530 MonoMethodHeader *header = cfg->header;
531 MonoExceptionClause *clause;
534 for (i = 0; i < header->num_clauses; ++i) {
535 clause = &header->clauses [i];
536 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
537 (offset < (clause->handler_offset)))
538 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
540 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
541 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
542 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
543 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
544 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
546 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
549 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
550 return ((i + 1) << 8) | clause->flags;
557 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
559 MonoMethodHeader *header = cfg->header;
560 MonoExceptionClause *clause;
564 for (i = 0; i < header->num_clauses; ++i) {
565 clause = &header->clauses [i];
566 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
567 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
568 if (clause->flags == type)
569 res = g_list_append (res, clause);
576 mono_create_spvar_for_region (MonoCompile *cfg, int region)
580 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
584 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
585 /* prevent it from being register allocated */
586 var->flags |= MONO_INST_INDIRECT;
588 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
592 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
594 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
598 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
602 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
606 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
607 /* prevent it from being register allocated */
608 var->flags |= MONO_INST_INDIRECT;
610 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
616 * Returns the type used in the eval stack when @type is loaded.
617 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
620 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
624 inst->klass = klass = mono_class_from_mono_type (type);
626 inst->type = STACK_MP;
631 switch (type->type) {
633 inst->type = STACK_INV;
637 case MONO_TYPE_BOOLEAN:
643 inst->type = STACK_I4;
648 case MONO_TYPE_FNPTR:
649 inst->type = STACK_PTR;
651 case MONO_TYPE_CLASS:
652 case MONO_TYPE_STRING:
653 case MONO_TYPE_OBJECT:
654 case MONO_TYPE_SZARRAY:
655 case MONO_TYPE_ARRAY:
656 inst->type = STACK_OBJ;
660 inst->type = STACK_I8;
664 inst->type = STACK_R8;
666 case MONO_TYPE_VALUETYPE:
667 if (type->data.klass->enumtype) {
668 type = mono_class_enum_basetype (type->data.klass);
672 inst->type = STACK_VTYPE;
675 case MONO_TYPE_TYPEDBYREF:
676 inst->klass = mono_defaults.typed_reference_class;
677 inst->type = STACK_VTYPE;
679 case MONO_TYPE_GENERICINST:
680 type = &type->data.generic_class->container_class->byval_arg;
683 case MONO_TYPE_MVAR :
684 /* FIXME: all the arguments must be references for now,
685 * later look inside cfg and see if the arg num is
688 g_assert (cfg->generic_sharing_context);
689 inst->type = STACK_OBJ;
692 g_error ("unknown type 0x%02x in eval stack type", type->type);
697 * The following tables are used to quickly validate the IL code in type_from_op ().
700 bin_num_table [STACK_MAX] [STACK_MAX] = {
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
713 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
716 /* reduce the size of this table */
718 bin_int_table [STACK_MAX] [STACK_MAX] = {
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
721 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
722 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
723 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
724 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
725 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
726 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
730 bin_comp_table [STACK_MAX] [STACK_MAX] = {
731 /* Inv i L p F & O vt */
733 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
734 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
735 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
736 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
737 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
738 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
739 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
742 /* reduce the size of this table */
744 shift_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 * Tables to map from the non-specific opcode to the matching
757 * type-specific opcode.
759 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
761 binops_op_map [STACK_MAX] = {
762 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
765 /* handles from CEE_NEG to CEE_CONV_U8 */
767 unops_op_map [STACK_MAX] = {
768 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
771 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
773 ovfops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
777 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
779 ovf2ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
783 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
785 ovf3ops_op_map [STACK_MAX] = {
786 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
789 /* handles from CEE_BEQ to CEE_BLT_UN */
791 beqops_op_map [STACK_MAX] = {
792 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
795 /* handles from CEE_CEQ to CEE_CLT_UN */
797 ceqops_op_map [STACK_MAX] = {
798 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
802 * Sets ins->type (the type on the eval stack) according to the
803 * type of the opcode and the arguments to it.
804 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
806 * FIXME: this function sets ins->type unconditionally in some cases, but
807 * it should set it to invalid for some types (a conv.x on an object)
810 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
812 switch (ins->opcode) {
819 /* FIXME: check unverifiable args for STACK_MP */
820 ins->type = bin_num_table [src1->type] [src2->type];
821 ins->opcode += binops_op_map [ins->type];
828 ins->type = bin_int_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
834 ins->type = shift_table [src1->type] [src2->type];
835 ins->opcode += binops_op_map [ins->type];
840 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
841 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
842 ins->opcode = OP_LCOMPARE;
843 else if (src1->type == STACK_R8)
844 ins->opcode = OP_FCOMPARE;
846 ins->opcode = OP_ICOMPARE;
848 case OP_ICOMPARE_IMM:
849 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
850 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
851 ins->opcode = OP_LCOMPARE_IMM;
863 ins->opcode += beqops_op_map [src1->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 ins->opcode += ceqops_op_map [src1->type];
873 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
874 ins->opcode += ceqops_op_map [src1->type];
878 ins->type = neg_table [src1->type];
879 ins->opcode += unops_op_map [ins->type];
882 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
883 ins->type = src1->type;
885 ins->type = STACK_INV;
886 ins->opcode += unops_op_map [ins->type];
892 ins->type = STACK_I4;
893 ins->opcode += unops_op_map [src1->type];
896 ins->type = STACK_R8;
897 switch (src1->type) {
900 ins->opcode = OP_ICONV_TO_R_UN;
903 ins->opcode = OP_LCONV_TO_R_UN;
907 case CEE_CONV_OVF_I1:
908 case CEE_CONV_OVF_U1:
909 case CEE_CONV_OVF_I2:
910 case CEE_CONV_OVF_U2:
911 case CEE_CONV_OVF_I4:
912 case CEE_CONV_OVF_U4:
913 ins->type = STACK_I4;
914 ins->opcode += ovf3ops_op_map [src1->type];
916 case CEE_CONV_OVF_I_UN:
917 case CEE_CONV_OVF_U_UN:
918 ins->type = STACK_PTR;
919 ins->opcode += ovf2ops_op_map [src1->type];
921 case CEE_CONV_OVF_I1_UN:
922 case CEE_CONV_OVF_I2_UN:
923 case CEE_CONV_OVF_I4_UN:
924 case CEE_CONV_OVF_U1_UN:
925 case CEE_CONV_OVF_U2_UN:
926 case CEE_CONV_OVF_U4_UN:
927 ins->type = STACK_I4;
928 ins->opcode += ovf2ops_op_map [src1->type];
931 ins->type = STACK_PTR;
932 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_U;
938 #if SIZEOF_VOID_P == 8
939 ins->opcode = OP_LCONV_TO_U;
941 ins->opcode = OP_MOVE;
945 ins->opcode = OP_LCONV_TO_U;
948 ins->opcode = OP_FCONV_TO_U;
954 ins->type = STACK_I8;
955 ins->opcode += unops_op_map [src1->type];
957 case CEE_CONV_OVF_I8:
958 case CEE_CONV_OVF_U8:
959 ins->type = STACK_I8;
960 ins->opcode += ovf3ops_op_map [src1->type];
962 case CEE_CONV_OVF_U8_UN:
963 case CEE_CONV_OVF_I8_UN:
964 ins->type = STACK_I8;
965 ins->opcode += ovf2ops_op_map [src1->type];
969 ins->type = STACK_R8;
970 ins->opcode += unops_op_map [src1->type];
973 ins->type = STACK_R8;
977 ins->type = STACK_I4;
978 ins->opcode += ovfops_op_map [src1->type];
983 ins->type = STACK_PTR;
984 ins->opcode += ovfops_op_map [src1->type];
992 ins->type = bin_num_table [src1->type] [src2->type];
993 ins->opcode += ovfops_op_map [src1->type];
994 if (ins->type == STACK_R8)
995 ins->type = STACK_INV;
997 case OP_LOAD_MEMBASE:
998 ins->type = STACK_PTR;
1000 case OP_LOADI1_MEMBASE:
1001 case OP_LOADU1_MEMBASE:
1002 case OP_LOADI2_MEMBASE:
1003 case OP_LOADU2_MEMBASE:
1004 case OP_LOADI4_MEMBASE:
1005 case OP_LOADU4_MEMBASE:
1006 ins->type = STACK_PTR;
1008 case OP_LOADI8_MEMBASE:
1009 ins->type = STACK_I8;
1011 case OP_LOADR4_MEMBASE:
1012 case OP_LOADR8_MEMBASE:
1013 ins->type = STACK_R8;
1016 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1020 if (ins->type == STACK_MP)
1021 ins->klass = mono_defaults.object_class;
1026 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1032 param_table [STACK_MAX] [STACK_MAX] = {
1037 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1041 switch (args->type) {
1051 for (i = 0; i < sig->param_count; ++i) {
1052 switch (args [i].type) {
1056 if (!sig->params [i]->byref)
1060 if (sig->params [i]->byref)
1062 switch (sig->params [i]->type) {
1063 case MONO_TYPE_CLASS:
1064 case MONO_TYPE_STRING:
1065 case MONO_TYPE_OBJECT:
1066 case MONO_TYPE_SZARRAY:
1067 case MONO_TYPE_ARRAY:
1074 if (sig->params [i]->byref)
1076 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1085 /*if (!param_table [args [i].type] [sig->params [i]->type])
1093 * When we need a pointer to the current domain many times in a method, we
1094 * call mono_domain_get() once and we store the result in a local variable.
1095 * This function returns the variable that represents the MonoDomain*.
1097 inline static MonoInst *
1098 mono_get_domainvar (MonoCompile *cfg)
1100 if (!cfg->domainvar)
1101 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1102 return cfg->domainvar;
1106 * The got_var contains the address of the Global Offset Table when AOT
1110 mono_get_got_var (MonoCompile *cfg)
1112 #ifdef MONO_ARCH_NEED_GOT_VAR
1113 if (!cfg->compile_aot)
1115 if (!cfg->got_var) {
1116 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1118 return cfg->got_var;
1125 mono_get_vtable_var (MonoCompile *cfg)
1127 g_assert (cfg->generic_sharing_context);
1129 if (!cfg->rgctx_var) {
1130 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1131 /* force the var to be stack allocated */
1132 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1135 return cfg->rgctx_var;
1139 type_from_stack_type (MonoInst *ins) {
1140 switch (ins->type) {
1141 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1142 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1143 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1144 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1146 return &ins->klass->this_arg;
1147 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1148 case STACK_VTYPE: return &ins->klass->byval_arg;
1150 g_error ("stack type %d to monotype not handled\n", ins->type);
1155 static G_GNUC_UNUSED int
1156 type_to_stack_type (MonoType *t)
1158 t = mono_type_get_underlying_type (t);
1162 case MONO_TYPE_BOOLEAN:
1165 case MONO_TYPE_CHAR:
1172 case MONO_TYPE_FNPTR:
1174 case MONO_TYPE_CLASS:
1175 case MONO_TYPE_STRING:
1176 case MONO_TYPE_OBJECT:
1177 case MONO_TYPE_SZARRAY:
1178 case MONO_TYPE_ARRAY:
1186 case MONO_TYPE_VALUETYPE:
1187 case MONO_TYPE_TYPEDBYREF:
1189 case MONO_TYPE_GENERICINST:
1190 if (mono_type_generic_inst_is_valuetype (t))
1196 g_assert_not_reached ();
1203 array_access_to_klass (int opcode)
1207 return mono_defaults.byte_class;
1209 return mono_defaults.uint16_class;
1212 return mono_defaults.int_class;
1215 return mono_defaults.sbyte_class;
1218 return mono_defaults.int16_class;
1221 return mono_defaults.int32_class;
1223 return mono_defaults.uint32_class;
1226 return mono_defaults.int64_class;
1229 return mono_defaults.single_class;
1232 return mono_defaults.double_class;
1233 case CEE_LDELEM_REF:
1234 case CEE_STELEM_REF:
1235 return mono_defaults.object_class;
1237 g_assert_not_reached ();
1243 * We try to share variables when possible
1246 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1251 /* inlining can result in deeper stacks */
1252 if (slot >= cfg->header->max_stack)
1253 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1255 pos = ins->type - 1 + slot * STACK_MAX;
1257 switch (ins->type) {
1264 if ((vnum = cfg->intvars [pos]))
1265 return cfg->varinfo [vnum];
1266 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1267 cfg->intvars [pos] = res->inst_c0;
1270 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1276 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1279 * Don't use this if a generic_context is set, since that means AOT can't
1280 * look up the method using just the image+token.
1281 * table == 0 means this is a reference made from a wrapper.
1283 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1284 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1285 jump_info_token->image = image;
1286 jump_info_token->token = token;
1287 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1292 * This function is called to handle items that are left on the evaluation stack
1293 * at basic block boundaries. What happens is that we save the values to local variables
1294 * and we reload them later when first entering the target basic block (with the
1295 * handle_loaded_temps () function).
1296 * A single joint point will use the same variables (stored in the array bb->out_stack or
1297 * bb->in_stack, if the basic block is before or after the joint point).
1299 * This function needs to be called _before_ emitting the last instruction of
1300 * the bb (i.e. before emitting a branch).
1301 * If the stack merge fails at a join point, cfg->unverifiable is set.
1304 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1307 MonoBasicBlock *bb = cfg->cbb;
1308 MonoBasicBlock *outb;
1309 MonoInst *inst, **locals;
1314 if (cfg->verbose_level > 3)
1315 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1316 if (!bb->out_scount) {
1317 bb->out_scount = count;
1318 //printf ("bblock %d has out:", bb->block_num);
1320 for (i = 0; i < bb->out_count; ++i) {
1321 outb = bb->out_bb [i];
1322 /* exception handlers are linked, but they should not be considered for stack args */
1323 if (outb->flags & BB_EXCEPTION_HANDLER)
1325 //printf (" %d", outb->block_num);
1326 if (outb->in_stack) {
1328 bb->out_stack = outb->in_stack;
1334 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1335 for (i = 0; i < count; ++i) {
1337 * try to reuse temps already allocated for this purpouse, if they occupy the same
1338 * stack slot and if they are of the same type.
1339 * This won't cause conflicts since if 'local' is used to
1340 * store one of the values in the in_stack of a bblock, then
1341 * the same variable will be used for the same outgoing stack
1343 * This doesn't work when inlining methods, since the bblocks
1344 * in the inlined methods do not inherit their in_stack from
1345 * the bblock they are inlined to. See bug #58863 for an
1348 if (cfg->inlined_method)
1349 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1351 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1356 for (i = 0; i < bb->out_count; ++i) {
1357 outb = bb->out_bb [i];
1358 /* exception handlers are linked, but they should not be considered for stack args */
1359 if (outb->flags & BB_EXCEPTION_HANDLER)
1361 if (outb->in_scount) {
1362 if (outb->in_scount != bb->out_scount) {
1363 cfg->unverifiable = TRUE;
1366 continue; /* check they are the same locals */
1368 outb->in_scount = count;
1369 outb->in_stack = bb->out_stack;
1372 locals = bb->out_stack;
1374 for (i = 0; i < count; ++i) {
1375 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1376 inst->cil_code = sp [i]->cil_code;
1377 sp [i] = locals [i];
1378 if (cfg->verbose_level > 3)
1379 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1383 * It is possible that the out bblocks already have in_stack assigned, and
1384 * the in_stacks differ. In this case, we will store to all the different
1391 /* Find a bblock which has a different in_stack */
1393 while (bindex < bb->out_count) {
1394 outb = bb->out_bb [bindex];
1395 /* exception handlers are linked, but they should not be considered for stack args */
1396 if (outb->flags & BB_EXCEPTION_HANDLER) {
1400 if (outb->in_stack != locals) {
1401 for (i = 0; i < count; ++i) {
1402 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1403 inst->cil_code = sp [i]->cil_code;
1404 sp [i] = locals [i];
1405 if (cfg->verbose_level > 3)
1406 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1408 locals = outb->in_stack;
1417 /* Emit code which loads interface_offsets [klass->interface_id]
1418 * The array is stored in memory before vtable.
1421 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1423 if (cfg->compile_aot) {
1424 int ioffset_reg = alloc_preg (cfg);
1425 int iid_reg = alloc_preg (cfg);
1427 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1428 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1437 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1439 int ibitmap_reg = alloc_preg (cfg);
1440 #ifdef COMPRESSED_INTERFACE_BITMAP
1442 MonoInst *res, *ins;
1443 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1444 MONO_ADD_INS (cfg->cbb, ins);
1446 if (cfg->compile_aot)
1447 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1449 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1450 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1451 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1453 int ibitmap_byte_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1457 if (cfg->compile_aot) {
1458 int iid_reg = alloc_preg (cfg);
1459 int shifted_iid_reg = alloc_preg (cfg);
1460 int ibitmap_byte_address_reg = alloc_preg (cfg);
1461 int masked_iid_reg = alloc_preg (cfg);
1462 int iid_one_bit_reg = alloc_preg (cfg);
1463 int iid_bit_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1466 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1469 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1470 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1471 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1480 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1481 * stored in "klass_reg" implements the interface "klass".
1484 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1486 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1490 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1491 * stored in "vtable_reg" implements the interface "klass".
1494 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1496 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1500 * Emit code which checks whenever the interface id of @klass is smaller than
1501 * than the value given by max_iid_reg.
1504 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1505 MonoBasicBlock *false_target)
1507 if (cfg->compile_aot) {
1508 int iid_reg = alloc_preg (cfg);
1509 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1510 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1517 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1520 /* Same as above, but obtains max_iid from a vtable */
1522 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1523 MonoBasicBlock *false_target)
1525 int max_iid_reg = alloc_preg (cfg);
1527 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1528 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1531 /* Same as above, but obtains max_iid from a klass */
1533 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1534 MonoBasicBlock *false_target)
1536 int max_iid_reg = alloc_preg (cfg);
1538 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1539 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1543 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1545 int idepth_reg = alloc_preg (cfg);
1546 int stypes_reg = alloc_preg (cfg);
1547 int stype = alloc_preg (cfg);
1549 mono_class_setup_supertypes (klass);
1551 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1552 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1554 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1560 } else if (cfg->compile_aot) {
1561 int const_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1563 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1571 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1573 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1577 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int intf_reg = alloc_preg (cfg);
1581 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1582 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1585 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1587 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1591 * Variant of the above that takes a register to the class, not the vtable.
1594 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1596 int intf_bit_reg = alloc_preg (cfg);
1598 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1599 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1604 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1608 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1611 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1612 } else if (cfg->compile_aot) {
1613 int const_reg = alloc_preg (cfg);
1614 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1615 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1619 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1623 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1625 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1629 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1642 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1645 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1648 int rank_reg = alloc_preg (cfg);
1649 int eclass_reg = alloc_preg (cfg);
1651 g_assert (!klass_inst);
1652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1654 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1655 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1657 if (klass->cast_class == mono_defaults.object_class) {
1658 int parent_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1660 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1661 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1662 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1663 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1664 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1665 } else if (klass->cast_class == mono_defaults.enum_class) {
1666 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1667 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1668 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1670 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1671 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1674 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1675 /* Check that the object is a vector too */
1676 int bounds_reg = alloc_preg (cfg);
1677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1679 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1682 int idepth_reg = alloc_preg (cfg);
1683 int stypes_reg = alloc_preg (cfg);
1684 int stype = alloc_preg (cfg);
1686 mono_class_setup_supertypes (klass);
1688 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1689 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1691 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1694 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1695 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1700 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1702 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1715 if ((size <= 4) && (size <= align)) {
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1724 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1726 #if SIZEOF_REGISTER == 8
1728 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1734 val_reg = alloc_preg (cfg);
1736 if (SIZEOF_REGISTER == 8)
1737 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1739 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1742 /* This could be optimized further if neccesary */
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1751 #if !NO_UNALIGNED_ACCESS
1752 if (SIZEOF_REGISTER == 8) {
1754 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1777 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1784 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1791 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1792 g_assert (size < 10000);
1795 /* This could be optimized further if neccesary */
1797 cur_reg = alloc_preg (cfg);
1798 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1806 #if !NO_UNALIGNED_ACCESS
1807 if (SIZEOF_REGISTER == 8) {
1809 cur_reg = alloc_preg (cfg);
1810 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1820 cur_reg = alloc_preg (cfg);
1821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1828 cur_reg = alloc_preg (cfg);
1829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1836 cur_reg = alloc_preg (cfg);
1837 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1838 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1846 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1849 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1852 type = mini_get_basic_type_from_generic (gsctx, type);
1853 switch (type->type) {
1854 case MONO_TYPE_VOID:
1855 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1858 case MONO_TYPE_BOOLEAN:
1861 case MONO_TYPE_CHAR:
1864 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1868 case MONO_TYPE_FNPTR:
1869 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1870 case MONO_TYPE_CLASS:
1871 case MONO_TYPE_STRING:
1872 case MONO_TYPE_OBJECT:
1873 case MONO_TYPE_SZARRAY:
1874 case MONO_TYPE_ARRAY:
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1881 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1882 case MONO_TYPE_VALUETYPE:
1883 if (type->data.klass->enumtype) {
1884 type = mono_class_enum_basetype (type->data.klass);
1887 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1888 case MONO_TYPE_TYPEDBYREF:
1889 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1890 case MONO_TYPE_GENERICINST:
1891 type = &type->data.generic_class->container_class->byval_arg;
1894 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1900 * target_type_is_incompatible:
1901 * @cfg: MonoCompile context
1903 * Check that the item @arg on the evaluation stack can be stored
1904 * in the target type (can be a local, or field, etc).
1905 * The cfg arg can be used to check if we need verification or just
1908 * Returns: non-0 value if arg can't be stored on a target.
1911 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1913 MonoType *simple_type;
1916 if (target->byref) {
1917 /* FIXME: check that the pointed to types match */
1918 if (arg->type == STACK_MP)
1919 return arg->klass != mono_class_from_mono_type (target);
1920 if (arg->type == STACK_PTR)
1925 simple_type = mono_type_get_underlying_type (target);
1926 switch (simple_type->type) {
1927 case MONO_TYPE_VOID:
1931 case MONO_TYPE_BOOLEAN:
1934 case MONO_TYPE_CHAR:
1937 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1941 /* STACK_MP is needed when setting pinned locals */
1942 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1947 case MONO_TYPE_FNPTR:
1949 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1950 * in native int. (#688008).
1952 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1955 case MONO_TYPE_CLASS:
1956 case MONO_TYPE_STRING:
1957 case MONO_TYPE_OBJECT:
1958 case MONO_TYPE_SZARRAY:
1959 case MONO_TYPE_ARRAY:
1960 if (arg->type != STACK_OBJ)
1962 /* FIXME: check type compatibility */
1966 if (arg->type != STACK_I8)
1971 if (arg->type != STACK_R8)
1974 case MONO_TYPE_VALUETYPE:
1975 if (arg->type != STACK_VTYPE)
1977 klass = mono_class_from_mono_type (simple_type);
1978 if (klass != arg->klass)
1981 case MONO_TYPE_TYPEDBYREF:
1982 if (arg->type != STACK_VTYPE)
1984 klass = mono_class_from_mono_type (simple_type);
1985 if (klass != arg->klass)
1988 case MONO_TYPE_GENERICINST:
1989 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1990 if (arg->type != STACK_VTYPE)
1992 klass = mono_class_from_mono_type (simple_type);
1993 if (klass != arg->klass)
1997 if (arg->type != STACK_OBJ)
1999 /* FIXME: check type compatibility */
2003 case MONO_TYPE_MVAR:
2004 /* FIXME: all the arguments must be references for now,
2005 * later look inside cfg and see if the arg num is
2006 * really a reference
2008 g_assert (cfg->generic_sharing_context);
2009 if (arg->type != STACK_OBJ)
2013 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2019 * Prepare arguments for passing to a function call.
2020 * Return a non-zero value if the arguments can't be passed to the given
2022 * The type checks are not yet complete and some conversions may need
2023 * casts on 32 or 64 bit architectures.
2025 * FIXME: implement this using target_type_is_incompatible ()
2028 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2030 MonoType *simple_type;
2034 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2038 for (i = 0; i < sig->param_count; ++i) {
2039 if (sig->params [i]->byref) {
2040 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2044 simple_type = sig->params [i];
2045 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2047 switch (simple_type->type) {
2048 case MONO_TYPE_VOID:
2053 case MONO_TYPE_BOOLEAN:
2056 case MONO_TYPE_CHAR:
2059 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2065 case MONO_TYPE_FNPTR:
2066 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2069 case MONO_TYPE_CLASS:
2070 case MONO_TYPE_STRING:
2071 case MONO_TYPE_OBJECT:
2072 case MONO_TYPE_SZARRAY:
2073 case MONO_TYPE_ARRAY:
2074 if (args [i]->type != STACK_OBJ)
2079 if (args [i]->type != STACK_I8)
2084 if (args [i]->type != STACK_R8)
2087 case MONO_TYPE_VALUETYPE:
2088 if (simple_type->data.klass->enumtype) {
2089 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2092 if (args [i]->type != STACK_VTYPE)
2095 case MONO_TYPE_TYPEDBYREF:
2096 if (args [i]->type != STACK_VTYPE)
2099 case MONO_TYPE_GENERICINST:
2100 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2104 g_error ("unknown type 0x%02x in check_call_signature",
2112 callvirt_to_call (int opcode)
2117 case OP_VOIDCALLVIRT:
2126 g_assert_not_reached ();
2133 callvirt_to_call_membase (int opcode)
2137 return OP_CALL_MEMBASE;
2138 case OP_VOIDCALLVIRT:
2139 return OP_VOIDCALL_MEMBASE;
2141 return OP_FCALL_MEMBASE;
2143 return OP_LCALL_MEMBASE;
2145 return OP_VCALL_MEMBASE;
2147 g_assert_not_reached ();
2153 #ifdef MONO_ARCH_HAVE_IMT
2155 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2159 if (COMPILE_LLVM (cfg)) {
2160 method_reg = alloc_preg (cfg);
2163 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2164 } else if (cfg->compile_aot) {
2165 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2168 MONO_INST_NEW (cfg, ins, OP_PCONST);
2169 ins->inst_p0 = call->method;
2170 ins->dreg = method_reg;
2171 MONO_ADD_INS (cfg->cbb, ins);
2175 call->imt_arg_reg = method_reg;
2177 #ifdef MONO_ARCH_IMT_REG
2178 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2180 /* Need this to keep the IMT arg alive */
2181 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2186 #ifdef MONO_ARCH_IMT_REG
2187 method_reg = alloc_preg (cfg);
2190 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2191 } else if (cfg->compile_aot) {
2192 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2195 MONO_INST_NEW (cfg, ins, OP_PCONST);
2196 ins->inst_p0 = call->method;
2197 ins->dreg = method_reg;
2198 MONO_ADD_INS (cfg->cbb, ins);
2201 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2203 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2208 static MonoJumpInfo *
2209 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2211 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2215 ji->data.target = target;
2220 inline static MonoCallInst *
2221 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2222 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2225 #ifdef MONO_ARCH_SOFT_FLOAT
2230 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2232 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2235 call->signature = sig;
2236 call->rgctx_reg = rgctx;
2238 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2241 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2242 call->vret_var = cfg->vret_addr;
2243 //g_assert_not_reached ();
2245 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2246 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2249 temp->backend.is_pinvoke = sig->pinvoke;
2252 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2253 * address of return value to increase optimization opportunities.
2254 * Before vtype decomposition, the dreg of the call ins itself represents the
2255 * fact the call modifies the return value. After decomposition, the call will
2256 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2257 * will be transformed into an LDADDR.
2259 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2260 loada->dreg = alloc_preg (cfg);
2261 loada->inst_p0 = temp;
2262 /* We reference the call too since call->dreg could change during optimization */
2263 loada->inst_p1 = call;
2264 MONO_ADD_INS (cfg->cbb, loada);
2266 call->inst.dreg = temp->dreg;
2268 call->vret_var = loada;
2269 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2270 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2272 #ifdef MONO_ARCH_SOFT_FLOAT
2273 if (COMPILE_SOFT_FLOAT (cfg)) {
2275 * If the call has a float argument, we would need to do an r8->r4 conversion using
2276 * an icall, but that cannot be done during the call sequence since it would clobber
2277 * the call registers + the stack. So we do it before emitting the call.
2279 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2281 MonoInst *in = call->args [i];
2283 if (i >= sig->hasthis)
2284 t = sig->params [i - sig->hasthis];
2286 t = &mono_defaults.int_class->byval_arg;
2287 t = mono_type_get_underlying_type (t);
2289 if (!t->byref && t->type == MONO_TYPE_R4) {
2290 MonoInst *iargs [1];
2294 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2296 /* The result will be in an int vreg */
2297 call->args [i] = conv;
2304 if (COMPILE_LLVM (cfg))
2305 mono_llvm_emit_call (cfg, call);
2307 mono_arch_emit_call (cfg, call);
2309 mono_arch_emit_call (cfg, call);
2312 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2313 cfg->flags |= MONO_CFG_HAS_CALLS;
2319 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2321 #ifdef MONO_ARCH_RGCTX_REG
2322 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2323 cfg->uses_rgctx_reg = TRUE;
2324 call->rgctx_reg = TRUE;
2326 call->rgctx_arg_reg = rgctx_reg;
2333 inline static MonoInst*
2334 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2340 rgctx_reg = mono_alloc_preg (cfg);
2341 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2344 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2346 call->inst.sreg1 = addr->dreg;
2348 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2351 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2353 return (MonoInst*)call;
2357 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2359 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2362 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2363 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2365 gboolean might_be_remote;
2366 gboolean virtual = this != NULL;
2367 gboolean enable_for_aot = TRUE;
2373 rgctx_reg = mono_alloc_preg (cfg);
2374 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2377 if (method->string_ctor) {
2378 /* Create the real signature */
2379 /* FIXME: Cache these */
2380 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2381 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2386 context_used = mono_method_check_context_used (method);
2388 might_be_remote = this && sig->hasthis &&
2389 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2390 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2392 if (might_be_remote && context_used) {
2395 g_assert (cfg->generic_sharing_context);
2397 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2399 return mono_emit_calli (cfg, sig, args, addr, NULL);
2402 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2404 if (might_be_remote)
2405 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2407 call->method = method;
2408 call->inst.flags |= MONO_INST_HAS_METHOD;
2409 call->inst.inst_left = this;
2412 int vtable_reg, slot_reg, this_reg;
2414 this_reg = this->dreg;
2416 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2417 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2418 MonoInst *dummy_use;
2420 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2422 /* Make a call to delegate->invoke_impl */
2423 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2424 call->inst.inst_basereg = this_reg;
2425 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2426 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2428 /* We must emit a dummy use here because the delegate trampoline will
2429 replace the 'this' argument with the delegate target making this activation
2430 no longer a root for the delegate.
2431 This is an issue for delegates that target collectible code such as dynamic
2432 methods of GC'able assemblies.
2434 For a test case look into #667921.
2436 FIXME: a dummy use is not the best way to do it as the local register allocator
2437 will put it on a caller save register and spil it around the call.
2438 Ideally, we would either put it on a callee save register or only do the store part.
2440 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2442 return (MonoInst*)call;
2446 if ((!cfg->compile_aot || enable_for_aot) &&
2447 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2448 (MONO_METHOD_IS_FINAL (method) &&
2449 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2450 !(method->klass->marshalbyref && context_used)) {
2452 * the method is not virtual, we just need to ensure this is not null
2453 * and then we can call the method directly.
2455 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2457 * The check above ensures method is not gshared, this is needed since
2458 * gshared methods can't have wrappers.
2460 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2463 if (!method->string_ctor)
2464 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2466 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2467 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2469 * the method is virtual, but we can statically dispatch since either
2470 * it's class or the method itself are sealed.
2471 * But first we need to ensure it's not a null reference.
2473 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2475 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2477 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2479 vtable_reg = alloc_preg (cfg);
2480 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2481 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2483 #ifdef MONO_ARCH_HAVE_IMT
2485 guint32 imt_slot = mono_method_get_imt_slot (method);
2486 emit_imt_argument (cfg, call, imt_arg);
2487 slot_reg = vtable_reg;
2488 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2491 if (slot_reg == -1) {
2492 slot_reg = alloc_preg (cfg);
2493 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2494 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2497 slot_reg = vtable_reg;
2498 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2499 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2500 #ifdef MONO_ARCH_HAVE_IMT
2502 g_assert (mono_method_signature (method)->generic_param_count);
2503 emit_imt_argument (cfg, call, imt_arg);
2508 call->inst.sreg1 = slot_reg;
2509 call->virtual = TRUE;
2513 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2516 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2518 return (MonoInst*)call;
2522 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2524 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2528 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2535 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2538 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2540 return (MonoInst*)call;
2544 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2546 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2550 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2554 * mono_emit_abs_call:
2556 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2558 inline static MonoInst*
2559 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2560 MonoMethodSignature *sig, MonoInst **args)
2562 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2566 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2569 if (cfg->abs_patches == NULL)
2570 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2571 g_hash_table_insert (cfg->abs_patches, ji, ji);
2572 ins = mono_emit_native_call (cfg, ji, sig, args);
2573 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2578 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2580 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2581 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2585 * Native code might return non register sized integers
2586 * without initializing the upper bits.
2588 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2589 case OP_LOADI1_MEMBASE:
2590 widen_op = OP_ICONV_TO_I1;
2592 case OP_LOADU1_MEMBASE:
2593 widen_op = OP_ICONV_TO_U1;
2595 case OP_LOADI2_MEMBASE:
2596 widen_op = OP_ICONV_TO_I2;
2598 case OP_LOADU2_MEMBASE:
2599 widen_op = OP_ICONV_TO_U2;
2605 if (widen_op != -1) {
2606 int dreg = alloc_preg (cfg);
2609 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2610 widen->type = ins->type;
2620 get_memcpy_method (void)
2622 static MonoMethod *memcpy_method = NULL;
2623 if (!memcpy_method) {
2624 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2626 g_error ("Old corlib found. Install a new one");
2628 return memcpy_method;
2632 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2634 MonoClassField *field;
2635 gpointer iter = NULL;
2637 while ((field = mono_class_get_fields (klass, &iter))) {
2640 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2642 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2643 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2644 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2645 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2647 MonoClass *field_class = mono_class_from_mono_type (field->type);
2648 if (field_class->has_references)
2649 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2655 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2657 int card_table_shift_bits;
2658 gpointer card_table_mask;
2660 MonoInst *dummy_use;
2661 int nursery_shift_bits;
2662 size_t nursery_size;
2663 gboolean has_card_table_wb = FALSE;
2665 if (!cfg->gen_write_barriers)
2668 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2670 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2672 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2673 has_card_table_wb = TRUE;
2676 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2679 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2680 wbarrier->sreg1 = ptr->dreg;
2682 wbarrier->sreg2 = value->dreg;
2684 wbarrier->sreg2 = value_reg;
2685 MONO_ADD_INS (cfg->cbb, wbarrier);
2686 } else if (card_table) {
2687 int offset_reg = alloc_preg (cfg);
2688 int card_reg = alloc_preg (cfg);
2691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2692 if (card_table_mask)
2693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2695 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2696 * IMM's larger than 32bits.
2698 if (cfg->compile_aot) {
2699 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2701 MONO_INST_NEW (cfg, ins, OP_PCONST);
2702 ins->inst_p0 = card_table;
2703 ins->dreg = card_reg;
2704 MONO_ADD_INS (cfg->cbb, ins);
2707 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2710 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2711 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2715 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2717 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2718 dummy_use->sreg1 = value_reg;
2719 MONO_ADD_INS (cfg->cbb, dummy_use);
2724 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2726 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2727 unsigned need_wb = 0;
2732 /*types with references can't have alignment smaller than sizeof(void*) */
2733 if (align < SIZEOF_VOID_P)
2736 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2737 if (size > 32 * SIZEOF_VOID_P)
2740 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2742 /* We don't unroll more than 5 stores to avoid code bloat. */
2743 if (size > 5 * SIZEOF_VOID_P) {
2744 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2745 size += (SIZEOF_VOID_P - 1);
2746 size &= ~(SIZEOF_VOID_P - 1);
2748 EMIT_NEW_ICONST (cfg, iargs [2], size);
2749 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2750 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2754 destreg = iargs [0]->dreg;
2755 srcreg = iargs [1]->dreg;
2758 dest_ptr_reg = alloc_preg (cfg);
2759 tmp_reg = alloc_preg (cfg);
2762 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2764 while (size >= SIZEOF_VOID_P) {
2765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2769 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2771 offset += SIZEOF_VOID_P;
2772 size -= SIZEOF_VOID_P;
2775 /*tmp += sizeof (void*)*/
2776 if (size >= SIZEOF_VOID_P) {
2777 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2778 MONO_ADD_INS (cfg->cbb, iargs [0]);
2782 /* Those cannot be references since size < sizeof (void*) */
2784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2798 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2808 * Emit code to copy a valuetype of type @klass whose address is stored in
2809 * @src->dreg to memory whose address is stored at @dest->dreg.
2812 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2814 MonoInst *iargs [4];
2817 MonoMethod *memcpy_method;
2821 * This check breaks with spilled vars... need to handle it during verification anyway.
2822 * g_assert (klass && klass == src->klass && klass == dest->klass);
2826 n = mono_class_native_size (klass, &align);
2828 n = mono_class_value_size (klass, &align);
2830 /* if native is true there should be no references in the struct */
2831 if (cfg->gen_write_barriers && klass->has_references && !native) {
2832 /* Avoid barriers when storing to the stack */
2833 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2834 (dest->opcode == OP_LDADDR))) {
2835 int context_used = 0;
2840 if (cfg->generic_sharing_context)
2841 context_used = mono_class_check_context_used (klass);
2843 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2844 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2846 } else if (context_used) {
2847 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2849 if (cfg->compile_aot) {
2850 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2852 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2853 mono_class_compute_gc_descriptor (klass);
2857 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2862 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2863 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2864 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2868 EMIT_NEW_ICONST (cfg, iargs [2], n);
2870 memcpy_method = get_memcpy_method ();
2871 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2876 get_memset_method (void)
2878 static MonoMethod *memset_method = NULL;
2879 if (!memset_method) {
2880 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2882 g_error ("Old corlib found. Install a new one");
2884 return memset_method;
2888 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2890 MonoInst *iargs [3];
2893 MonoMethod *memset_method;
2895 /* FIXME: Optimize this for the case when dest is an LDADDR */
2897 mono_class_init (klass);
2898 n = mono_class_value_size (klass, &align);
2900 if (n <= sizeof (gpointer) * 5) {
2901 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2904 memset_method = get_memset_method ();
2906 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2907 EMIT_NEW_ICONST (cfg, iargs [2], n);
2908 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2913 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2915 MonoInst *this = NULL;
2917 g_assert (cfg->generic_sharing_context);
2919 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2920 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2921 !method->klass->valuetype)
2922 EMIT_NEW_ARGLOAD (cfg, this, 0);
2924 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2925 MonoInst *mrgctx_loc, *mrgctx_var;
2928 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2930 mrgctx_loc = mono_get_vtable_var (cfg);
2931 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2934 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2935 MonoInst *vtable_loc, *vtable_var;
2939 vtable_loc = mono_get_vtable_var (cfg);
2940 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2942 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2943 MonoInst *mrgctx_var = vtable_var;
2946 vtable_reg = alloc_preg (cfg);
2947 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2948 vtable_var->type = STACK_PTR;
2956 vtable_reg = alloc_preg (cfg);
2957 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2962 static MonoJumpInfoRgctxEntry *
2963 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2965 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2966 res->method = method;
2967 res->in_mrgctx = in_mrgctx;
2968 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2969 res->data->type = patch_type;
2970 res->data->data.target = patch_data;
2971 res->info_type = info_type;
2976 static inline MonoInst*
2977 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2979 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2983 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2984 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2986 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2987 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2989 return emit_rgctx_fetch (cfg, rgctx, entry);
2993 * emit_get_rgctx_method:
2995 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2996 * normal constants, else emit a load from the rgctx.
2999 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3000 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3002 if (!context_used) {
3005 switch (rgctx_type) {
3006 case MONO_RGCTX_INFO_METHOD:
3007 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3009 case MONO_RGCTX_INFO_METHOD_RGCTX:
3010 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3013 g_assert_not_reached ();
3016 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3017 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3019 return emit_rgctx_fetch (cfg, rgctx, entry);
3024 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3025 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3027 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3028 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3030 return emit_rgctx_fetch (cfg, rgctx, entry);
3034 * On return the caller must check @klass for load errors.
3037 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3039 MonoInst *vtable_arg;
3041 int context_used = 0;
3043 if (cfg->generic_sharing_context)
3044 context_used = mono_class_check_context_used (klass);
3047 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3048 klass, MONO_RGCTX_INFO_VTABLE);
3050 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3054 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3057 if (COMPILE_LLVM (cfg))
3058 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3060 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3061 #ifdef MONO_ARCH_VTABLE_REG
3062 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3063 cfg->uses_vtable_reg = TRUE;
3070 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3074 if (cfg->gen_seq_points && cfg->method == method) {
3075 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3076 MONO_ADD_INS (cfg->cbb, ins);
3081 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3083 if (mini_get_debug_options ()->better_cast_details) {
3084 int to_klass_reg = alloc_preg (cfg);
3085 int vtable_reg = alloc_preg (cfg);
3086 int klass_reg = alloc_preg (cfg);
3087 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3090 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3094 MONO_ADD_INS (cfg->cbb, tls_get);
3095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3098 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3099 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3100 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3105 reset_cast_details (MonoCompile *cfg)
3107 /* Reset the variables holding the cast details */
3108 if (mini_get_debug_options ()->better_cast_details) {
3109 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3111 MONO_ADD_INS (cfg->cbb, tls_get);
3112 /* It is enough to reset the from field */
3113 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3118 * On return the caller must check @array_class for load errors
3121 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3123 int vtable_reg = alloc_preg (cfg);
3124 int context_used = 0;
3126 if (cfg->generic_sharing_context)
3127 context_used = mono_class_check_context_used (array_class);
3129 save_cast_details (cfg, array_class, obj->dreg);
3131 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3133 if (cfg->opt & MONO_OPT_SHARED) {
3134 int class_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3136 if (cfg->compile_aot) {
3137 int klass_reg = alloc_preg (cfg);
3138 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3139 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3143 } else if (context_used) {
3144 MonoInst *vtable_ins;
3146 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3147 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3149 if (cfg->compile_aot) {
3153 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3155 vt_reg = alloc_preg (cfg);
3156 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3157 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3160 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3166 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3168 reset_cast_details (cfg);
3172 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3173 * generic code is generated.
3176 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3178 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3181 MonoInst *rgctx, *addr;
3183 /* FIXME: What if the class is shared? We might not
3184 have to get the address of the method from the
3186 addr = emit_get_rgctx_method (cfg, context_used, method,
3187 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3189 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3191 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3193 return mono_emit_method_call (cfg, method, &val, NULL);
3198 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3202 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3203 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3204 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3205 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3207 obj_reg = sp [0]->dreg;
3208 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3209 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3211 /* FIXME: generics */
3212 g_assert (klass->rank == 0);
3215 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3216 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3222 MonoInst *element_class;
3224 /* This assertion is from the unboxcast insn */
3225 g_assert (klass->rank == 0);
3227 element_class = emit_get_rgctx_klass (cfg, context_used,
3228 klass->element_class, MONO_RGCTX_INFO_KLASS);
3230 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3231 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3233 save_cast_details (cfg, klass->element_class, obj_reg);
3234 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3235 reset_cast_details (cfg);
3238 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3239 MONO_ADD_INS (cfg->cbb, add);
3240 add->type = STACK_MP;
3247 * Returns NULL and set the cfg exception on error.
3250 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3252 MonoInst *iargs [2];
3258 MonoInst *iargs [2];
3261 FIXME: we cannot get managed_alloc here because we can't get
3262 the class's vtable (because it's not a closed class)
3264 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3265 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3268 if (cfg->opt & MONO_OPT_SHARED)
3269 rgctx_info = MONO_RGCTX_INFO_KLASS;
3271 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3272 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3274 if (cfg->opt & MONO_OPT_SHARED) {
3275 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3277 alloc_ftn = mono_object_new;
3280 alloc_ftn = mono_object_new_specific;
3283 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3286 if (cfg->opt & MONO_OPT_SHARED) {
3287 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3288 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3290 alloc_ftn = mono_object_new;
3291 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3292 /* This happens often in argument checking code, eg. throw new FooException... */
3293 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3294 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3295 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3297 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3298 MonoMethod *managed_alloc = NULL;
3302 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3303 cfg->exception_ptr = klass;
3307 #ifndef MONO_CROSS_COMPILE
3308 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3311 if (managed_alloc) {
3312 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3313 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3315 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3317 guint32 lw = vtable->klass->instance_size;
3318 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3319 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3320 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3323 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3327 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3331 * Returns NULL and set the cfg exception on error.
3334 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3336 MonoInst *alloc, *ins;
3338 if (mono_class_is_nullable (klass)) {
3339 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3342 /* FIXME: What if the class is shared? We might not
3343 have to get the method address from the RGCTX. */
3344 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3345 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3346 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3348 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3350 return mono_emit_method_call (cfg, method, &val, NULL);
3354 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3358 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3365 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3368 MonoGenericContainer *container;
3369 MonoGenericInst *ginst;
3371 if (klass->generic_class) {
3372 container = klass->generic_class->container_class->generic_container;
3373 ginst = klass->generic_class->context.class_inst;
3374 } else if (klass->generic_container && context_used) {
3375 container = klass->generic_container;
3376 ginst = container->context.class_inst;
3381 for (i = 0; i < container->type_argc; ++i) {
3383 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3385 type = ginst->type_argv [i];
3386 if (mini_type_is_reference (cfg, type))
3392 // FIXME: This doesn't work yet (class libs tests fail?)
3393 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3396 * Returns NULL and set the cfg exception on error.
3399 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3401 MonoBasicBlock *is_null_bb;
3402 int obj_reg = src->dreg;
3403 int vtable_reg = alloc_preg (cfg);
3404 MonoInst *klass_inst = NULL;
3409 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3410 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3411 MonoInst *cache_ins;
3413 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3418 /* klass - it's the second element of the cache entry*/
3419 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3422 args [2] = cache_ins;
3424 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3427 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3429 if (is_complex_isinst (klass)) {
3430 /* Complex case, handle by an icall */
3436 args [1] = klass_inst;
3438 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3440 /* Simple case, handled by the code below */
3444 NEW_BBLOCK (cfg, is_null_bb);
3446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3449 save_cast_details (cfg, klass, obj_reg);
3451 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3453 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3455 int klass_reg = alloc_preg (cfg);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3459 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3460 /* the remoting code is broken, access the class for now */
3461 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3462 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3464 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3465 cfg->exception_ptr = klass;
3468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3473 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3476 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3480 MONO_START_BB (cfg, is_null_bb);
3482 reset_cast_details (cfg);
3488 * Returns NULL and set the cfg exception on error.
3491 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3494 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3495 int obj_reg = src->dreg;
3496 int vtable_reg = alloc_preg (cfg);
3497 int res_reg = alloc_ireg_ref (cfg);
3498 MonoInst *klass_inst = NULL;
3503 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3504 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3505 MonoInst *cache_ins;
3507 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3512 /* klass - it's the second element of the cache entry*/
3513 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3516 args [2] = cache_ins;
3518 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3521 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3523 if (is_complex_isinst (klass)) {
3524 /* Complex case, handle by an icall */
3530 args [1] = klass_inst;
3532 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3534 /* Simple case, the code below can handle it */
3538 NEW_BBLOCK (cfg, is_null_bb);
3539 NEW_BBLOCK (cfg, false_bb);
3540 NEW_BBLOCK (cfg, end_bb);
3542 /* Do the assignment at the beginning, so the other assignment can be if converted */
3543 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3544 ins->type = STACK_OBJ;
3547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3552 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3553 g_assert (!context_used);
3554 /* the is_null_bb target simply copies the input register to the output */
3555 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3557 int klass_reg = alloc_preg (cfg);
3560 int rank_reg = alloc_preg (cfg);
3561 int eclass_reg = alloc_preg (cfg);
3563 g_assert (!context_used);
3564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3569 if (klass->cast_class == mono_defaults.object_class) {
3570 int parent_reg = alloc_preg (cfg);
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3572 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3573 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3575 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3576 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3577 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3579 } else if (klass->cast_class == mono_defaults.enum_class) {
3580 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3582 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3583 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3585 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3586 /* Check that the object is a vector too */
3587 int bounds_reg = alloc_preg (cfg);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3593 /* the is_null_bb target simply copies the input register to the output */
3594 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3596 } else if (mono_class_is_nullable (klass)) {
3597 g_assert (!context_used);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3599 /* the is_null_bb target simply copies the input register to the output */
3600 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3602 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3603 g_assert (!context_used);
3604 /* the remoting code is broken, access the class for now */
3605 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3606 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3608 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3609 cfg->exception_ptr = klass;
3612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3617 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3621 /* the is_null_bb target simply copies the input register to the output */
3622 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3627 MONO_START_BB (cfg, false_bb);
3629 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3632 MONO_START_BB (cfg, is_null_bb);
3634 MONO_START_BB (cfg, end_bb);
3640 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3642 /* This opcode takes as input an object reference and a class, and returns:
3643 0) if the object is an instance of the class,
3644 1) if the object is not instance of the class,
3645 2) if the object is a proxy whose type cannot be determined */
3648 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3649 int obj_reg = src->dreg;
3650 int dreg = alloc_ireg (cfg);
3652 int klass_reg = alloc_preg (cfg);
3654 NEW_BBLOCK (cfg, true_bb);
3655 NEW_BBLOCK (cfg, false_bb);
3656 NEW_BBLOCK (cfg, false2_bb);
3657 NEW_BBLOCK (cfg, end_bb);
3658 NEW_BBLOCK (cfg, no_proxy_bb);
3660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3663 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3664 NEW_BBLOCK (cfg, interface_fail_bb);
3666 tmp_reg = alloc_preg (cfg);
3667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3668 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3669 MONO_START_BB (cfg, interface_fail_bb);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3672 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3674 tmp_reg = alloc_preg (cfg);
3675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3679 tmp_reg = alloc_preg (cfg);
3680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3683 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3684 tmp_reg = alloc_preg (cfg);
3685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3688 tmp_reg = alloc_preg (cfg);
3689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3693 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3696 MONO_START_BB (cfg, no_proxy_bb);
3698 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3701 MONO_START_BB (cfg, false_bb);
3703 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3704 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3706 MONO_START_BB (cfg, false2_bb);
3708 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3711 MONO_START_BB (cfg, true_bb);
3713 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3715 MONO_START_BB (cfg, end_bb);
3718 MONO_INST_NEW (cfg, ins, OP_ICONST);
3720 ins->type = STACK_I4;
3726 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3728 /* This opcode takes as input an object reference and a class, and returns:
3729 0) if the object is an instance of the class,
3730 1) if the object is a proxy whose type cannot be determined
3731 an InvalidCastException exception is thrown otherwhise*/
3734 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3735 int obj_reg = src->dreg;
3736 int dreg = alloc_ireg (cfg);
3737 int tmp_reg = alloc_preg (cfg);
3738 int klass_reg = alloc_preg (cfg);
3740 NEW_BBLOCK (cfg, end_bb);
3741 NEW_BBLOCK (cfg, ok_result_bb);
3743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3744 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3746 save_cast_details (cfg, klass, obj_reg);
3748 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3749 NEW_BBLOCK (cfg, interface_fail_bb);
3751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3752 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3753 MONO_START_BB (cfg, interface_fail_bb);
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3756 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3758 tmp_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3761 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3763 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3767 NEW_BBLOCK (cfg, no_proxy_bb);
3769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3770 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3771 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3773 tmp_reg = alloc_preg (cfg);
3774 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3777 tmp_reg = alloc_preg (cfg);
3778 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3782 NEW_BBLOCK (cfg, fail_1_bb);
3784 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3786 MONO_START_BB (cfg, fail_1_bb);
3788 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3789 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3791 MONO_START_BB (cfg, no_proxy_bb);
3793 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3796 MONO_START_BB (cfg, ok_result_bb);
3798 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3800 MONO_START_BB (cfg, end_bb);
3803 MONO_INST_NEW (cfg, ins, OP_ICONST);
3805 ins->type = STACK_I4;
3811 * Returns NULL and set the cfg exception on error.
3813 static G_GNUC_UNUSED MonoInst*
3814 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3818 gpointer *trampoline;
3819 MonoInst *obj, *method_ins, *tramp_ins;
3823 obj = handle_alloc (cfg, klass, FALSE, 0);
3827 /* Inline the contents of mono_delegate_ctor */
3829 /* Set target field */
3830 /* Optimize away setting of NULL target */
3831 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3832 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3833 if (cfg->gen_write_barriers) {
3834 dreg = alloc_preg (cfg);
3835 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3836 emit_write_barrier (cfg, ptr, target, 0);
3840 /* Set method field */
3841 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3842 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3843 if (cfg->gen_write_barriers) {
3844 dreg = alloc_preg (cfg);
3845 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3846 emit_write_barrier (cfg, ptr, method_ins, 0);
3849 * To avoid looking up the compiled code belonging to the target method
3850 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3851 * store it, and we fill it after the method has been compiled.
3853 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3854 MonoInst *code_slot_ins;
3857 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3859 domain = mono_domain_get ();
3860 mono_domain_lock (domain);
3861 if (!domain_jit_info (domain)->method_code_hash)
3862 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3863 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3865 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3866 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3868 mono_domain_unlock (domain);
3870 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3875 /* Set invoke_impl field */
3876 if (cfg->compile_aot) {
3877 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3879 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
3880 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3882 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3884 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3890 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3892 MonoJitICallInfo *info;
3894 /* Need to register the icall so it gets an icall wrapper */
3895 info = mono_get_array_new_va_icall (rank);
3897 cfg->flags |= MONO_CFG_HAS_VARARGS;
3899 /* mono_array_new_va () needs a vararg calling convention */
3900 cfg->disable_llvm = TRUE;
3902 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3903 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3907 mono_emit_load_got_addr (MonoCompile *cfg)
3909 MonoInst *getaddr, *dummy_use;
3911 if (!cfg->got_var || cfg->got_var_allocated)
3914 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3915 getaddr->dreg = cfg->got_var->dreg;
3917 /* Add it to the start of the first bblock */
3918 if (cfg->bb_entry->code) {
3919 getaddr->next = cfg->bb_entry->code;
3920 cfg->bb_entry->code = getaddr;
3923 MONO_ADD_INS (cfg->bb_entry, getaddr);
3925 cfg->got_var_allocated = TRUE;
3928 * Add a dummy use to keep the got_var alive, since real uses might
3929 * only be generated by the back ends.
3930 * Add it to end_bblock, so the variable's lifetime covers the whole
3932 * It would be better to make the usage of the got var explicit in all
3933 * cases when the backend needs it (i.e. calls, throw etc.), so this
3934 * wouldn't be needed.
3936 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3937 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3940 static int inline_limit;
3941 static gboolean inline_limit_inited;
3944 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3946 MonoMethodHeaderSummary header;
3948 #ifdef MONO_ARCH_SOFT_FLOAT
3949 MonoMethodSignature *sig = mono_method_signature (method);
3953 if (cfg->generic_sharing_context)
3956 if (cfg->inline_depth > 10)
3959 #ifdef MONO_ARCH_HAVE_LMF_OPS
3960 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3961 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3962 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3967 if (!mono_method_get_header_summary (method, &header))
3970 /*runtime, icall and pinvoke are checked by summary call*/
3971 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3972 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3973 (method->klass->marshalbyref) ||
3977 /* also consider num_locals? */
3978 /* Do the size check early to avoid creating vtables */
3979 if (!inline_limit_inited) {
3980 if (getenv ("MONO_INLINELIMIT"))
3981 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3983 inline_limit = INLINE_LENGTH_LIMIT;
3984 inline_limit_inited = TRUE;
3986 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3990 * if we can initialize the class of the method right away, we do,
3991 * otherwise we don't allow inlining if the class needs initialization,
3992 * since it would mean inserting a call to mono_runtime_class_init()
3993 * inside the inlined code
3995 if (!(cfg->opt & MONO_OPT_SHARED)) {
3996 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3997 if (cfg->run_cctors && method->klass->has_cctor) {
3998 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3999 if (!method->klass->runtime_info)
4000 /* No vtable created yet */
4002 vtable = mono_class_vtable (cfg->domain, method->klass);
4005 /* This makes so that inline cannot trigger */
4006 /* .cctors: too many apps depend on them */
4007 /* running with a specific order... */
4008 if (! vtable->initialized)
4010 mono_runtime_class_init (vtable);
4012 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4013 if (!method->klass->runtime_info)
4014 /* No vtable created yet */
4016 vtable = mono_class_vtable (cfg->domain, method->klass);
4019 if (!vtable->initialized)
4024 * If we're compiling for shared code
4025 * the cctor will need to be run at aot method load time, for example,
4026 * or at the end of the compilation of the inlining method.
4028 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4033 * CAS - do not inline methods with declarative security
4034 * Note: this has to be before any possible return TRUE;
4036 if (mono_method_has_declsec (method))
4039 #ifdef MONO_ARCH_SOFT_FLOAT
4041 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4043 for (i = 0; i < sig->param_count; ++i)
4044 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4052 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4054 if (vtable->initialized && !cfg->compile_aot)
4057 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4060 if (!mono_class_needs_cctor_run (vtable->klass, method))
4063 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4064 /* The initialization is already done before the method is called */
4071 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4075 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4077 mono_class_init (klass);
4078 size = mono_class_array_element_size (klass);
4080 mult_reg = alloc_preg (cfg);
4081 array_reg = arr->dreg;
4082 index_reg = index->dreg;
4084 #if SIZEOF_REGISTER == 8
4085 /* The array reg is 64 bits but the index reg is only 32 */
4086 if (COMPILE_LLVM (cfg)) {
4088 index2_reg = index_reg;
4090 index2_reg = alloc_preg (cfg);
4091 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4094 if (index->type == STACK_I8) {
4095 index2_reg = alloc_preg (cfg);
4096 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4098 index2_reg = index_reg;
4103 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4105 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4106 if (size == 1 || size == 2 || size == 4 || size == 8) {
4107 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4109 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4110 ins->klass = mono_class_get_element_class (klass);
4111 ins->type = STACK_MP;
4117 add_reg = alloc_ireg_mp (cfg);
4119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4120 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4121 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4122 ins->klass = mono_class_get_element_class (klass);
4123 ins->type = STACK_MP;
4124 MONO_ADD_INS (cfg->cbb, ins);
4129 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4131 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4133 int bounds_reg = alloc_preg (cfg);
4134 int add_reg = alloc_ireg_mp (cfg);
4135 int mult_reg = alloc_preg (cfg);
4136 int mult2_reg = alloc_preg (cfg);
4137 int low1_reg = alloc_preg (cfg);
4138 int low2_reg = alloc_preg (cfg);
4139 int high1_reg = alloc_preg (cfg);
4140 int high2_reg = alloc_preg (cfg);
4141 int realidx1_reg = alloc_preg (cfg);
4142 int realidx2_reg = alloc_preg (cfg);
4143 int sum_reg = alloc_preg (cfg);
4148 mono_class_init (klass);
4149 size = mono_class_array_element_size (klass);
4151 index1 = index_ins1->dreg;
4152 index2 = index_ins2->dreg;
4154 /* range checking */
4155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4156 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4158 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4159 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4160 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4161 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4162 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4163 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4164 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4166 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4167 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4168 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4169 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4170 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4171 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4172 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4174 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4175 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4176 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4177 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4178 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4180 ins->type = STACK_MP;
4182 MONO_ADD_INS (cfg->cbb, ins);
4189 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4193 MonoMethod *addr_method;
4196 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4199 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4201 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4202 /* emit_ldelema_2 depends on OP_LMUL */
4203 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4204 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4208 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4209 addr_method = mono_marshal_get_array_address (rank, element_size);
4210 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4215 static MonoBreakPolicy
4216 always_insert_breakpoint (MonoMethod *method)
4218 return MONO_BREAK_POLICY_ALWAYS;
4221 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4224 * mono_set_break_policy:
4225 * policy_callback: the new callback function
4227 * Allow embedders to decide wherther to actually obey breakpoint instructions
4228 * (both break IL instructions and Debugger.Break () method calls), for example
4229 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4230 * untrusted or semi-trusted code.
4232 * @policy_callback will be called every time a break point instruction needs to
4233 * be inserted with the method argument being the method that calls Debugger.Break()
4234 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4235 * if it wants the breakpoint to not be effective in the given method.
4236 * #MONO_BREAK_POLICY_ALWAYS is the default.
4239 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4241 if (policy_callback)
4242 break_policy_func = policy_callback;
4244 break_policy_func = always_insert_breakpoint;
4248 should_insert_brekpoint (MonoMethod *method) {
4249 switch (break_policy_func (method)) {
4250 case MONO_BREAK_POLICY_ALWAYS:
4252 case MONO_BREAK_POLICY_NEVER:
4254 case MONO_BREAK_POLICY_ON_DBG:
4255 return mono_debug_using_mono_debugger ();
4257 g_warning ("Incorrect value returned from break policy callback");
4262 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4264 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4266 MonoInst *addr, *store, *load;
4267 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4269 /* the bounds check is already done by the callers */
4270 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4272 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4273 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4275 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4276 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4282 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4284 MonoInst *ins = NULL;
4285 #ifdef MONO_ARCH_SIMD_INTRINSICS
4286 if (cfg->opt & MONO_OPT_SIMD) {
4287 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4297 emit_memory_barrier (MonoCompile *cfg, int kind)
4299 MonoInst *ins = NULL;
4300 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4301 MONO_ADD_INS (cfg->cbb, ins);
4302 ins->backend.memory_barrier_kind = kind;
4308 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4310 MonoInst *ins = NULL;
4312 static MonoClass *runtime_helpers_class = NULL;
4313 if (! runtime_helpers_class)
4314 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4315 "System.Runtime.CompilerServices", "RuntimeHelpers");
4317 if (cmethod->klass == mono_defaults.string_class) {
4318 if (strcmp (cmethod->name, "get_Chars") == 0) {
4319 int dreg = alloc_ireg (cfg);
4320 int index_reg = alloc_preg (cfg);
4321 int mult_reg = alloc_preg (cfg);
4322 int add_reg = alloc_preg (cfg);
4324 #if SIZEOF_REGISTER == 8
4325 /* The array reg is 64 bits but the index reg is only 32 */
4326 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4328 index_reg = args [1]->dreg;
4330 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4332 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4333 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4334 add_reg = ins->dreg;
4335 /* Avoid a warning */
4337 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4341 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4342 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4343 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4345 type_from_op (ins, NULL, NULL);
4347 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4348 int dreg = alloc_ireg (cfg);
4349 /* Decompose later to allow more optimizations */
4350 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4351 ins->type = STACK_I4;
4352 ins->flags |= MONO_INST_FAULT;
4353 cfg->cbb->has_array_access = TRUE;
4354 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4357 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4358 int mult_reg = alloc_preg (cfg);
4359 int add_reg = alloc_preg (cfg);
4361 /* The corlib functions check for oob already. */
4362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4363 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4365 return cfg->cbb->last_ins;
4368 } else if (cmethod->klass == mono_defaults.object_class) {
4370 if (strcmp (cmethod->name, "GetType") == 0) {
4371 int dreg = alloc_ireg_ref (cfg);
4372 int vt_reg = alloc_preg (cfg);
4373 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4374 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4375 type_from_op (ins, NULL, NULL);
4378 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4379 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4380 int dreg = alloc_ireg (cfg);
4381 int t1 = alloc_ireg (cfg);
4383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4384 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4385 ins->type = STACK_I4;
4389 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4390 MONO_INST_NEW (cfg, ins, OP_NOP);
4391 MONO_ADD_INS (cfg->cbb, ins);
4395 } else if (cmethod->klass == mono_defaults.array_class) {
4396 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4397 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4399 #ifndef MONO_BIG_ARRAYS
4401 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4404 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4405 int dreg = alloc_ireg (cfg);
4406 int bounds_reg = alloc_ireg_mp (cfg);
4407 MonoBasicBlock *end_bb, *szarray_bb;
4408 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4410 NEW_BBLOCK (cfg, end_bb);
4411 NEW_BBLOCK (cfg, szarray_bb);
4413 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4414 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4416 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4417 /* Non-szarray case */
4419 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4420 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4422 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4423 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4424 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4425 MONO_START_BB (cfg, szarray_bb);
4428 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4429 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4431 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4432 MONO_START_BB (cfg, end_bb);
4434 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4435 ins->type = STACK_I4;
4441 if (cmethod->name [0] != 'g')
4444 if (strcmp (cmethod->name, "get_Rank") == 0) {
4445 int dreg = alloc_ireg (cfg);
4446 int vtable_reg = alloc_preg (cfg);
4447 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4448 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4449 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4450 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4451 type_from_op (ins, NULL, NULL);
4454 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4455 int dreg = alloc_ireg (cfg);
4457 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4458 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4459 type_from_op (ins, NULL, NULL);
4464 } else if (cmethod->klass == runtime_helpers_class) {
4466 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4467 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4471 } else if (cmethod->klass == mono_defaults.thread_class) {
4472 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4473 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4474 MONO_ADD_INS (cfg->cbb, ins);
4476 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4477 return emit_memory_barrier (cfg, FullBarrier);
4479 } else if (cmethod->klass == mono_defaults.monitor_class) {
4481 /* FIXME this should be integrated to the check below once we support the trampoline version */
4482 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4483 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4484 MonoMethod *fast_method = NULL;
4486 /* Avoid infinite recursion */
4487 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4490 fast_method = mono_monitor_get_fast_path (cmethod);
4494 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4498 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4499 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4502 if (COMPILE_LLVM (cfg)) {
4504 * Pass the argument normally, the LLVM backend will handle the
4505 * calling convention problems.
4507 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4509 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4510 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4511 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4512 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4515 return (MonoInst*)call;
4516 } else if (strcmp (cmethod->name, "Exit") == 0) {
4519 if (COMPILE_LLVM (cfg)) {
4520 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4522 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4523 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4524 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4525 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4528 return (MonoInst*)call;
4530 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4532 MonoMethod *fast_method = NULL;
4534 /* Avoid infinite recursion */
4535 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4536 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4537 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4540 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4541 strcmp (cmethod->name, "Exit") == 0)
4542 fast_method = mono_monitor_get_fast_path (cmethod);
4546 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4549 } else if (cmethod->klass->image == mono_defaults.corlib &&
4550 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4551 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4554 #if SIZEOF_REGISTER == 8
4555 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4556 /* 64 bit reads are already atomic */
4557 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4558 ins->dreg = mono_alloc_preg (cfg);
4559 ins->inst_basereg = args [0]->dreg;
4560 ins->inst_offset = 0;
4561 MONO_ADD_INS (cfg->cbb, ins);
4565 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4566 if (strcmp (cmethod->name, "Increment") == 0) {
4567 MonoInst *ins_iconst;
4570 if (fsig->params [0]->type == MONO_TYPE_I4)
4571 opcode = OP_ATOMIC_ADD_NEW_I4;
4572 #if SIZEOF_REGISTER == 8
4573 else if (fsig->params [0]->type == MONO_TYPE_I8)
4574 opcode = OP_ATOMIC_ADD_NEW_I8;
4577 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4578 ins_iconst->inst_c0 = 1;
4579 ins_iconst->dreg = mono_alloc_ireg (cfg);
4580 MONO_ADD_INS (cfg->cbb, ins_iconst);
4582 MONO_INST_NEW (cfg, ins, opcode);
4583 ins->dreg = mono_alloc_ireg (cfg);
4584 ins->inst_basereg = args [0]->dreg;
4585 ins->inst_offset = 0;
4586 ins->sreg2 = ins_iconst->dreg;
4587 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4588 MONO_ADD_INS (cfg->cbb, ins);
4590 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4591 MonoInst *ins_iconst;
4594 if (fsig->params [0]->type == MONO_TYPE_I4)
4595 opcode = OP_ATOMIC_ADD_NEW_I4;
4596 #if SIZEOF_REGISTER == 8
4597 else if (fsig->params [0]->type == MONO_TYPE_I8)
4598 opcode = OP_ATOMIC_ADD_NEW_I8;
4601 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4602 ins_iconst->inst_c0 = -1;
4603 ins_iconst->dreg = mono_alloc_ireg (cfg);
4604 MONO_ADD_INS (cfg->cbb, ins_iconst);
4606 MONO_INST_NEW (cfg, ins, opcode);
4607 ins->dreg = mono_alloc_ireg (cfg);
4608 ins->inst_basereg = args [0]->dreg;
4609 ins->inst_offset = 0;
4610 ins->sreg2 = ins_iconst->dreg;
4611 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4612 MONO_ADD_INS (cfg->cbb, ins);
4614 } else if (strcmp (cmethod->name, "Add") == 0) {
4617 if (fsig->params [0]->type == MONO_TYPE_I4)
4618 opcode = OP_ATOMIC_ADD_NEW_I4;
4619 #if SIZEOF_REGISTER == 8
4620 else if (fsig->params [0]->type == MONO_TYPE_I8)
4621 opcode = OP_ATOMIC_ADD_NEW_I8;
4625 MONO_INST_NEW (cfg, ins, opcode);
4626 ins->dreg = mono_alloc_ireg (cfg);
4627 ins->inst_basereg = args [0]->dreg;
4628 ins->inst_offset = 0;
4629 ins->sreg2 = args [1]->dreg;
4630 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4631 MONO_ADD_INS (cfg->cbb, ins);
4634 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4636 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4637 if (strcmp (cmethod->name, "Exchange") == 0) {
4639 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4641 if (fsig->params [0]->type == MONO_TYPE_I4)
4642 opcode = OP_ATOMIC_EXCHANGE_I4;
4643 #if SIZEOF_REGISTER == 8
4644 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4645 (fsig->params [0]->type == MONO_TYPE_I))
4646 opcode = OP_ATOMIC_EXCHANGE_I8;
4648 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4649 opcode = OP_ATOMIC_EXCHANGE_I4;
4654 MONO_INST_NEW (cfg, ins, opcode);
4655 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4656 ins->inst_basereg = args [0]->dreg;
4657 ins->inst_offset = 0;
4658 ins->sreg2 = args [1]->dreg;
4659 MONO_ADD_INS (cfg->cbb, ins);
4661 switch (fsig->params [0]->type) {
4663 ins->type = STACK_I4;
4667 ins->type = STACK_I8;
4669 case MONO_TYPE_OBJECT:
4670 ins->type = STACK_OBJ;
4673 g_assert_not_reached ();
4676 if (cfg->gen_write_barriers && is_ref)
4677 emit_write_barrier (cfg, args [0], args [1], -1);
4679 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4681 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4682 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4684 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4685 if (fsig->params [1]->type == MONO_TYPE_I4)
4687 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4688 size = sizeof (gpointer);
4689 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4692 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4693 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4694 ins->sreg1 = args [0]->dreg;
4695 ins->sreg2 = args [1]->dreg;
4696 ins->sreg3 = args [2]->dreg;
4697 ins->type = STACK_I4;
4698 MONO_ADD_INS (cfg->cbb, ins);
4699 } else if (size == 8) {
4700 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4701 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4702 ins->sreg1 = args [0]->dreg;
4703 ins->sreg2 = args [1]->dreg;
4704 ins->sreg3 = args [2]->dreg;
4705 ins->type = STACK_I8;
4706 MONO_ADD_INS (cfg->cbb, ins);
4708 /* g_assert_not_reached (); */
4710 if (cfg->gen_write_barriers && is_ref)
4711 emit_write_barrier (cfg, args [0], args [1], -1);
4713 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4717 } else if (cmethod->klass->image == mono_defaults.corlib) {
4718 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4719 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4720 if (should_insert_brekpoint (cfg->method)) {
4721 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4723 MONO_INST_NEW (cfg, ins, OP_NOP);
4724 MONO_ADD_INS (cfg->cbb, ins);
4728 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4729 && strcmp (cmethod->klass->name, "Environment") == 0) {
4731 EMIT_NEW_ICONST (cfg, ins, 1);
4733 EMIT_NEW_ICONST (cfg, ins, 0);
4737 } else if (cmethod->klass == mono_defaults.math_class) {
4739 * There is general branches code for Min/Max, but it does not work for
4741 * http://everything2.com/?node_id=1051618
4745 #ifdef MONO_ARCH_SIMD_INTRINSICS
4746 if (cfg->opt & MONO_OPT_SIMD) {
4747 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4753 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4757 * This entry point could be used later for arbitrary method
4760 inline static MonoInst*
4761 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4762 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4764 if (method->klass == mono_defaults.string_class) {
4765 /* managed string allocation support */
4766 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4767 MonoInst *iargs [2];
4768 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4769 MonoMethod *managed_alloc = NULL;
4771 g_assert (vtable); /*Should not fail since it System.String*/
4772 #ifndef MONO_CROSS_COMPILE
4773 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4777 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4778 iargs [1] = args [0];
4779 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4786 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4788 MonoInst *store, *temp;
4791 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4792 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4795 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4796 * would be different than the MonoInst's used to represent arguments, and
4797 * the ldelema implementation can't deal with that.
4798 * Solution: When ldelema is used on an inline argument, create a var for
4799 * it, emit ldelema on that var, and emit the saving code below in
4800 * inline_method () if needed.
4802 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4803 cfg->args [i] = temp;
4804 /* This uses cfg->args [i] which is set by the preceeding line */
4805 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4806 store->cil_code = sp [0]->cil_code;
4811 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4812 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4814 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4816 check_inline_called_method_name_limit (MonoMethod *called_method)
4819 static char *limit = NULL;
4821 if (limit == NULL) {
4822 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4824 if (limit_string != NULL)
4825 limit = limit_string;
4827 limit = (char *) "";
4830 if (limit [0] != '\0') {
4831 char *called_method_name = mono_method_full_name (called_method, TRUE);
4833 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4834 g_free (called_method_name);
4836 //return (strncmp_result <= 0);
4837 return (strncmp_result == 0);
4844 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4846 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4849 static char *limit = NULL;
4851 if (limit == NULL) {
4852 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4853 if (limit_string != NULL) {
4854 limit = limit_string;
4856 limit = (char *) "";
4860 if (limit [0] != '\0') {
4861 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4863 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4864 g_free (caller_method_name);
4866 //return (strncmp_result <= 0);
4867 return (strncmp_result == 0);
4875 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
4877 static double r8_0 = 0.0;
4880 switch (rvar->type) {
4882 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4885 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4890 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4893 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4894 ins->type = STACK_R8;
4895 ins->inst_p0 = (void*)&r8_0;
4896 ins->dreg = rvar->dreg;
4897 MONO_ADD_INS (cfg->cbb, ins);
4900 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
4903 g_assert_not_reached ();
4908 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4909 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4911 MonoInst *ins, *rvar = NULL;
4912 MonoMethodHeader *cheader;
4913 MonoBasicBlock *ebblock, *sbblock;
4915 MonoMethod *prev_inlined_method;
4916 MonoInst **prev_locals, **prev_args;
4917 MonoType **prev_arg_types;
4918 guint prev_real_offset;
4919 GHashTable *prev_cbb_hash;
4920 MonoBasicBlock **prev_cil_offset_to_bb;
4921 MonoBasicBlock *prev_cbb;
4922 unsigned char* prev_cil_start;
4923 guint32 prev_cil_offset_to_bb_len;
4924 MonoMethod *prev_current_method;
4925 MonoGenericContext *prev_generic_context;
4926 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4928 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4930 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4931 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4934 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4935 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4939 if (cfg->verbose_level > 2)
4940 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4942 if (!cmethod->inline_info) {
4943 cfg->stat_inlineable_methods++;
4944 cmethod->inline_info = 1;
4947 /* allocate local variables */
4948 cheader = mono_method_get_header (cmethod);
4950 if (cheader == NULL || mono_loader_get_last_error ()) {
4951 MonoLoaderError *error = mono_loader_get_last_error ();
4954 mono_metadata_free_mh (cheader);
4955 if (inline_always && error)
4956 mono_cfg_set_exception (cfg, error->exception_type);
4958 mono_loader_clear_error ();
4962 /*Must verify before creating locals as it can cause the JIT to assert.*/
4963 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4964 mono_metadata_free_mh (cheader);
4968 /* allocate space to store the return value */
4969 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4970 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4973 prev_locals = cfg->locals;
4974 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4975 for (i = 0; i < cheader->num_locals; ++i)
4976 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4978 /* allocate start and end blocks */
4979 /* This is needed so if the inline is aborted, we can clean up */
4980 NEW_BBLOCK (cfg, sbblock);
4981 sbblock->real_offset = real_offset;
4983 NEW_BBLOCK (cfg, ebblock);
4984 ebblock->block_num = cfg->num_bblocks++;
4985 ebblock->real_offset = real_offset;
4987 prev_args = cfg->args;
4988 prev_arg_types = cfg->arg_types;
4989 prev_inlined_method = cfg->inlined_method;
4990 cfg->inlined_method = cmethod;
4991 cfg->ret_var_set = FALSE;
4992 cfg->inline_depth ++;
4993 prev_real_offset = cfg->real_offset;
4994 prev_cbb_hash = cfg->cbb_hash;
4995 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4996 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4997 prev_cil_start = cfg->cil_start;
4998 prev_cbb = cfg->cbb;
4999 prev_current_method = cfg->current_method;
5000 prev_generic_context = cfg->generic_context;
5001 prev_ret_var_set = cfg->ret_var_set;
5003 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5006 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5008 ret_var_set = cfg->ret_var_set;
5010 cfg->inlined_method = prev_inlined_method;
5011 cfg->real_offset = prev_real_offset;
5012 cfg->cbb_hash = prev_cbb_hash;
5013 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5014 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5015 cfg->cil_start = prev_cil_start;
5016 cfg->locals = prev_locals;
5017 cfg->args = prev_args;
5018 cfg->arg_types = prev_arg_types;
5019 cfg->current_method = prev_current_method;
5020 cfg->generic_context = prev_generic_context;
5021 cfg->ret_var_set = prev_ret_var_set;
5022 cfg->inline_depth --;
5024 if ((costs >= 0 && costs < 60) || inline_always) {
5025 if (cfg->verbose_level > 2)
5026 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5028 cfg->stat_inlined_methods++;
5030 /* always add some code to avoid block split failures */
5031 MONO_INST_NEW (cfg, ins, OP_NOP);
5032 MONO_ADD_INS (prev_cbb, ins);
5034 prev_cbb->next_bb = sbblock;
5035 link_bblock (cfg, prev_cbb, sbblock);
5038 * Get rid of the begin and end bblocks if possible to aid local
5041 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5043 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5044 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5046 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5047 MonoBasicBlock *prev = ebblock->in_bb [0];
5048 mono_merge_basic_blocks (cfg, prev, ebblock);
5050 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5051 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5052 cfg->cbb = prev_cbb;
5056 * Its possible that the rvar is set in some prev bblock, but not in others.
5062 for (i = 0; i < ebblock->in_count; ++i) {
5063 bb = ebblock->in_bb [i];
5065 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5068 emit_init_rvar (cfg, rvar, fsig->ret);
5078 * If the inlined method contains only a throw, then the ret var is not
5079 * set, so set it to a dummy value.
5082 emit_init_rvar (cfg, rvar, fsig->ret);
5084 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5087 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5090 if (cfg->verbose_level > 2)
5091 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5092 cfg->exception_type = MONO_EXCEPTION_NONE;
5093 mono_loader_clear_error ();
5095 /* This gets rid of the newly added bblocks */
5096 cfg->cbb = prev_cbb;
5098 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5103 * Some of these comments may well be out-of-date.
5104 * Design decisions: we do a single pass over the IL code (and we do bblock
5105 * splitting/merging in the few cases when it's required: a back jump to an IL
5106 * address that was not already seen as bblock starting point).
5107 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5108 * Complex operations are decomposed in simpler ones right away. We need to let the
5109 * arch-specific code peek and poke inside this process somehow (except when the
5110 * optimizations can take advantage of the full semantic info of coarse opcodes).
5111 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5112 * MonoInst->opcode initially is the IL opcode or some simplification of that
5113 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5114 * opcode with value bigger than OP_LAST.
5115 * At this point the IR can be handed over to an interpreter, a dumb code generator
5116 * or to the optimizing code generator that will translate it to SSA form.
5118 * Profiling directed optimizations.
5119 * We may compile by default with few or no optimizations and instrument the code
5120 * or the user may indicate what methods to optimize the most either in a config file
5121 * or through repeated runs where the compiler applies offline the optimizations to
5122 * each method and then decides if it was worth it.
5125 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5126 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5127 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5128 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5129 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5130 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5131 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5132 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5134 /* offset from br.s -> br like opcodes */
5135 #define BIG_BRANCH_OFFSET 13
5138 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5140 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5142 return b == NULL || b == bb;
5146 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5148 unsigned char *ip = start;
5149 unsigned char *target;
5152 MonoBasicBlock *bblock;
5153 const MonoOpcode *opcode;
5156 cli_addr = ip - start;
5157 i = mono_opcode_value ((const guint8 **)&ip, end);
5160 opcode = &mono_opcodes [i];
5161 switch (opcode->argument) {
5162 case MonoInlineNone:
5165 case MonoInlineString:
5166 case MonoInlineType:
5167 case MonoInlineField:
5168 case MonoInlineMethod:
5171 case MonoShortInlineR:
5178 case MonoShortInlineVar:
5179 case MonoShortInlineI:
5182 case MonoShortInlineBrTarget:
5183 target = start + cli_addr + 2 + (signed char)ip [1];
5184 GET_BBLOCK (cfg, bblock, target);
5187 GET_BBLOCK (cfg, bblock, ip);
5189 case MonoInlineBrTarget:
5190 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5191 GET_BBLOCK (cfg, bblock, target);
5194 GET_BBLOCK (cfg, bblock, ip);
5196 case MonoInlineSwitch: {
5197 guint32 n = read32 (ip + 1);
5200 cli_addr += 5 + 4 * n;
5201 target = start + cli_addr;
5202 GET_BBLOCK (cfg, bblock, target);
5204 for (j = 0; j < n; ++j) {
5205 target = start + cli_addr + (gint32)read32 (ip);
5206 GET_BBLOCK (cfg, bblock, target);
5216 g_assert_not_reached ();
5219 if (i == CEE_THROW) {
5220 unsigned char *bb_start = ip - 1;
5222 /* Find the start of the bblock containing the throw */
5224 while ((bb_start >= start) && !bblock) {
5225 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5229 bblock->out_of_line = 1;
5238 static inline MonoMethod *
5239 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5243 if (m->wrapper_type != MONO_WRAPPER_NONE)
5244 return mono_method_get_wrapper_data (m, token);
5246 method = mono_get_method_full (m->klass->image, token, klass, context);
5251 static inline MonoMethod *
5252 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5254 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5256 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5262 static inline MonoClass*
5263 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5267 if (method->wrapper_type != MONO_WRAPPER_NONE)
5268 klass = mono_method_get_wrapper_data (method, token);
5270 klass = mono_class_get_full (method->klass->image, token, context);
5272 mono_class_init (klass);
5277 * Returns TRUE if the JIT should abort inlining because "callee"
5278 * is influenced by security attributes.
5281 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5285 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5289 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5290 if (result == MONO_JIT_SECURITY_OK)
5293 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5294 /* Generate code to throw a SecurityException before the actual call/link */
5295 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5298 NEW_ICONST (cfg, args [0], 4);
5299 NEW_METHODCONST (cfg, args [1], caller);
5300 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5301 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5302 /* don't hide previous results */
5303 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5304 cfg->exception_data = result;
5312 throw_exception (void)
5314 static MonoMethod *method = NULL;
5317 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5318 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5325 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5327 MonoMethod *thrower = throw_exception ();
5330 EMIT_NEW_PCONST (cfg, args [0], ex);
5331 mono_emit_method_call (cfg, thrower, args, NULL);
5335 * Return the original method is a wrapper is specified. We can only access
5336 * the custom attributes from the original method.
5339 get_original_method (MonoMethod *method)
5341 if (method->wrapper_type == MONO_WRAPPER_NONE)
5344 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5345 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5348 /* in other cases we need to find the original method */
5349 return mono_marshal_method_from_wrapper (method);
5353 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5354 MonoBasicBlock *bblock, unsigned char *ip)
5356 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5357 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5359 emit_throw_exception (cfg, ex);
5363 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5364 MonoBasicBlock *bblock, unsigned char *ip)
5366 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5367 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5369 emit_throw_exception (cfg, ex);
5373 * Check that the IL instructions at ip are the array initialization
5374 * sequence and return the pointer to the data and the size.
5377 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5380 * newarr[System.Int32]
5382 * ldtoken field valuetype ...
5383 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5385 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5386 guint32 token = read32 (ip + 7);
5387 guint32 field_token = read32 (ip + 2);
5388 guint32 field_index = field_token & 0xffffff;
5390 const char *data_ptr;
5392 MonoMethod *cmethod;
5393 MonoClass *dummy_class;
5394 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5400 *out_field_token = field_token;
5402 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5405 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5407 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5408 case MONO_TYPE_BOOLEAN:
5412 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5413 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5414 case MONO_TYPE_CHAR:
5424 return NULL; /* stupid ARM FP swapped format */
5434 if (size > mono_type_size (field->type, &dummy_align))
5437 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5438 if (!method->klass->image->dynamic) {
5439 field_index = read32 (ip + 2) & 0xffffff;
5440 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5441 data_ptr = mono_image_rva_map (method->klass->image, rva);
5442 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5443 /* for aot code we do the lookup on load */
5444 if (aot && data_ptr)
5445 return GUINT_TO_POINTER (rva);
5447 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5449 data_ptr = mono_field_get_data (field);
5457 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5459 char *method_fname = mono_method_full_name (method, TRUE);
5461 MonoMethodHeader *header = mono_method_get_header (method);
5463 if (header->code_size == 0)
5464 method_code = g_strdup ("method body is empty.");
5466 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5467 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5468 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5469 g_free (method_fname);
5470 g_free (method_code);
5471 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5475 set_exception_object (MonoCompile *cfg, MonoException *exception)
5477 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5478 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5479 cfg->exception_ptr = exception;
5483 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5485 return mini_type_is_reference (cfg, &klass->byval_arg);
5489 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5492 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5493 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5494 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5495 /* Optimize reg-reg moves away */
5497 * Can't optimize other opcodes, since sp[0] might point to
5498 * the last ins of a decomposed opcode.
5500 sp [0]->dreg = (cfg)->locals [n]->dreg;
5502 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5507 * ldloca inhibits many optimizations so try to get rid of it in common
5510 static inline unsigned char *
5511 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5520 local = read16 (ip + 2);
5524 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5525 gboolean skip = FALSE;
5527 /* From the INITOBJ case */
5528 token = read32 (ip + 2);
5529 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5530 CHECK_TYPELOAD (klass);
5531 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5532 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5533 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5534 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5547 is_exception_class (MonoClass *class)
5550 if (class == mono_defaults.exception_class)
5552 class = class->parent;
5558 * is_jit_optimizer_disabled:
5560 * Determine whenever M's assembly has a DebuggableAttribute with the
5561 * IsJITOptimizerDisabled flag set.
5564 is_jit_optimizer_disabled (MonoMethod *m)
5566 MonoAssembly *ass = m->klass->image->assembly;
5567 MonoCustomAttrInfo* attrs;
5568 static MonoClass *klass;
5570 gboolean val = FALSE;
5573 if (ass->jit_optimizer_disabled_inited)
5574 return ass->jit_optimizer_disabled;
5577 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5580 ass->jit_optimizer_disabled = FALSE;
5581 mono_memory_barrier ();
5582 ass->jit_optimizer_disabled_inited = TRUE;
5586 attrs = mono_custom_attrs_from_assembly (ass);
5588 for (i = 0; i < attrs->num_attrs; ++i) {
5589 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5592 MonoMethodSignature *sig;
5594 if (!attr->ctor || attr->ctor->klass != klass)
5596 /* Decode the attribute. See reflection.c */
5597 len = attr->data_size;
5598 p = (const char*)attr->data;
5599 g_assert (read16 (p) == 0x0001);
5602 // FIXME: Support named parameters
5603 sig = mono_method_signature (attr->ctor);
5604 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5606 /* Two boolean arguments */
5610 mono_custom_attrs_free (attrs);
5613 ass->jit_optimizer_disabled = val;
5614 mono_memory_barrier ();
5615 ass->jit_optimizer_disabled_inited = TRUE;
5621 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5623 gboolean supported_tail_call;
5626 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5627 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5629 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5632 for (i = 0; i < fsig->param_count; ++i) {
5633 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5634 /* These can point to the current method's stack */
5635 supported_tail_call = FALSE;
5637 if (fsig->hasthis && cmethod->klass->valuetype)
5638 /* this might point to the current method's stack */
5639 supported_tail_call = FALSE;
5640 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5641 supported_tail_call = FALSE;
5642 if (cfg->method->save_lmf)
5643 supported_tail_call = FALSE;
5644 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5645 supported_tail_call = FALSE;
5647 /* Debugging support */
5649 if (supported_tail_call) {
5650 static int count = 0;
5652 if (getenv ("COUNT")) {
5653 if (count == atoi (getenv ("COUNT")))
5654 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5655 if (count > atoi (getenv ("COUNT")))
5656 supported_tail_call = FALSE;
5661 return supported_tail_call;
5664 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5665 * it to the thread local value based on the tls_offset field. Every other kind of access to
5666 * the field causes an assert.
5669 is_magic_tls_access (MonoClassField *field)
5671 if (strcmp (field->name, "tlsdata"))
5673 if (strcmp (field->parent->name, "ThreadLocal`1"))
5675 return field->parent->image == mono_defaults.corlib;
5678 /* emits the code needed to access a managed tls var (like ThreadStatic)
5679 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5680 * pointer for the current thread.
5681 * Returns the MonoInst* representing the address of the tls var.
5684 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5687 int static_data_reg, array_reg, dreg;
5688 int offset2_reg, idx_reg;
5689 // inlined access to the tls data
5690 // idx = (offset >> 24) - 1;
5691 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5692 static_data_reg = alloc_ireg (cfg);
5693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5694 idx_reg = alloc_ireg (cfg);
5695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5698 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5699 array_reg = alloc_ireg (cfg);
5700 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5701 offset2_reg = alloc_ireg (cfg);
5702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5703 dreg = alloc_ireg (cfg);
5704 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5709 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5710 * this address is cached per-method in cached_tls_addr.
5713 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5715 MonoInst *load, *addr, *temp, *store, *thread_ins;
5716 MonoClassField *offset_field;
5718 if (*cached_tls_addr) {
5719 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5722 thread_ins = mono_get_thread_intrinsic (cfg);
5723 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5725 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5727 MONO_ADD_INS (cfg->cbb, thread_ins);
5729 MonoMethod *thread_method;
5730 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5731 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5733 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5734 addr->klass = mono_class_from_mono_type (tls_field->type);
5735 addr->type = STACK_MP;
5736 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5737 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5739 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5744 * mono_method_to_ir:
5746 * Translate the .net IL into linear IR.
5749 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5750 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5751 guint inline_offset, gboolean is_virtual_call)
5754 MonoInst *ins, **sp, **stack_start;
5755 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5756 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5757 MonoMethod *cmethod, *method_definition;
5758 MonoInst **arg_array;
5759 MonoMethodHeader *header;
5761 guint32 token, ins_flag;
5763 MonoClass *constrained_call = NULL;
5764 unsigned char *ip, *end, *target, *err_pos;
5765 static double r8_0 = 0.0;
5766 MonoMethodSignature *sig;
5767 MonoGenericContext *generic_context = NULL;
5768 MonoGenericContainer *generic_container = NULL;
5769 MonoType **param_types;
5770 int i, n, start_new_bblock, dreg;
5771 int num_calls = 0, inline_costs = 0;
5772 int breakpoint_id = 0;
5774 MonoBoolean security, pinvoke;
5775 MonoSecurityManager* secman = NULL;
5776 MonoDeclSecurityActions actions;
5777 GSList *class_inits = NULL;
5778 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5780 gboolean init_locals, seq_points, skip_dead_blocks;
5781 gboolean disable_inline, sym_seq_points = FALSE;
5782 MonoInst *cached_tls_addr = NULL;
5783 MonoDebugMethodInfo *minfo;
5784 MonoBitSet *seq_point_locs = NULL;
5786 disable_inline = is_jit_optimizer_disabled (method);
5788 /* serialization and xdomain stuff may need access to private fields and methods */
5789 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5790 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5791 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5792 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5793 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5794 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5796 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5798 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5799 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5800 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5801 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5802 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5804 image = method->klass->image;
5805 header = mono_method_get_header (method);
5807 MonoLoaderError *error;
5809 if ((error = mono_loader_get_last_error ())) {
5810 mono_cfg_set_exception (cfg, error->exception_type);
5812 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5813 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5815 goto exception_exit;
5817 generic_container = mono_method_get_generic_container (method);
5818 sig = mono_method_signature (method);
5819 num_args = sig->hasthis + sig->param_count;
5820 ip = (unsigned char*)header->code;
5821 cfg->cil_start = ip;
5822 end = ip + header->code_size;
5823 cfg->stat_cil_code_size += header->code_size;
5824 init_locals = header->init_locals;
5826 seq_points = cfg->gen_seq_points && cfg->method == method;
5828 if (cfg->gen_seq_points && cfg->method == method) {
5829 minfo = mono_debug_lookup_method (method);
5831 int i, n_il_offsets;
5835 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
5836 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
5837 sym_seq_points = TRUE;
5838 for (i = 0; i < n_il_offsets; ++i) {
5839 if (il_offsets [i] < header->code_size)
5840 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
5846 * Methods without init_locals set could cause asserts in various passes
5851 method_definition = method;
5852 while (method_definition->is_inflated) {
5853 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5854 method_definition = imethod->declaring;
5857 /* SkipVerification is not allowed if core-clr is enabled */
5858 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5860 dont_verify_stloc = TRUE;
5863 if (mono_debug_using_mono_debugger ())
5864 cfg->keep_cil_nops = TRUE;
5866 if (sig->is_inflated)
5867 generic_context = mono_method_get_context (method);
5868 else if (generic_container)
5869 generic_context = &generic_container->context;
5870 cfg->generic_context = generic_context;
5872 if (!cfg->generic_sharing_context)
5873 g_assert (!sig->has_type_parameters);
5875 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5876 g_assert (method->is_inflated);
5877 g_assert (mono_method_get_context (method)->method_inst);
5879 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5880 g_assert (sig->generic_param_count);
5882 if (cfg->method == method) {
5883 cfg->real_offset = 0;
5885 cfg->real_offset = inline_offset;
5888 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5889 cfg->cil_offset_to_bb_len = header->code_size;
5891 cfg->current_method = method;
5893 if (cfg->verbose_level > 2)
5894 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5896 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5898 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5899 for (n = 0; n < sig->param_count; ++n)
5900 param_types [n + sig->hasthis] = sig->params [n];
5901 cfg->arg_types = param_types;
5903 dont_inline = g_list_prepend (dont_inline, method);
5904 if (cfg->method == method) {
5906 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5907 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5910 NEW_BBLOCK (cfg, start_bblock);
5911 cfg->bb_entry = start_bblock;
5912 start_bblock->cil_code = NULL;
5913 start_bblock->cil_length = 0;
5914 #if defined(__native_client_codegen__)
5915 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5916 ins->dreg = alloc_dreg (cfg, STACK_I4);
5917 MONO_ADD_INS (start_bblock, ins);
5921 NEW_BBLOCK (cfg, end_bblock);
5922 cfg->bb_exit = end_bblock;
5923 end_bblock->cil_code = NULL;
5924 end_bblock->cil_length = 0;
5925 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5926 g_assert (cfg->num_bblocks == 2);
5928 arg_array = cfg->args;
5930 if (header->num_clauses) {
5931 cfg->spvars = g_hash_table_new (NULL, NULL);
5932 cfg->exvars = g_hash_table_new (NULL, NULL);
5934 /* handle exception clauses */
5935 for (i = 0; i < header->num_clauses; ++i) {
5936 MonoBasicBlock *try_bb;
5937 MonoExceptionClause *clause = &header->clauses [i];
5938 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5939 try_bb->real_offset = clause->try_offset;
5940 try_bb->try_start = TRUE;
5941 try_bb->region = ((i + 1) << 8) | clause->flags;
5942 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5943 tblock->real_offset = clause->handler_offset;
5944 tblock->flags |= BB_EXCEPTION_HANDLER;
5946 link_bblock (cfg, try_bb, tblock);
5948 if (*(ip + clause->handler_offset) == CEE_POP)
5949 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5951 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5952 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5953 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5954 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5955 MONO_ADD_INS (tblock, ins);
5957 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5958 /* finally clauses already have a seq point */
5959 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5960 MONO_ADD_INS (tblock, ins);
5963 /* todo: is a fault block unsafe to optimize? */
5964 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5965 tblock->flags |= BB_EXCEPTION_UNSAFE;
5969 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5971 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5973 /* catch and filter blocks get the exception object on the stack */
5974 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5975 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5976 MonoInst *dummy_use;
5978 /* mostly like handle_stack_args (), but just sets the input args */
5979 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5980 tblock->in_scount = 1;
5981 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5982 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5985 * Add a dummy use for the exvar so its liveness info will be
5989 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5991 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5992 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5993 tblock->flags |= BB_EXCEPTION_HANDLER;
5994 tblock->real_offset = clause->data.filter_offset;
5995 tblock->in_scount = 1;
5996 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5997 /* The filter block shares the exvar with the handler block */
5998 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5999 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6000 MONO_ADD_INS (tblock, ins);
6004 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6005 clause->data.catch_class &&
6006 cfg->generic_sharing_context &&
6007 mono_class_check_context_used (clause->data.catch_class)) {
6009 * In shared generic code with catch
6010 * clauses containing type variables
6011 * the exception handling code has to
6012 * be able to get to the rgctx.
6013 * Therefore we have to make sure that
6014 * the vtable/mrgctx argument (for
6015 * static or generic methods) or the
6016 * "this" argument (for non-static
6017 * methods) are live.
6019 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6020 mini_method_get_context (method)->method_inst ||
6021 method->klass->valuetype) {
6022 mono_get_vtable_var (cfg);
6024 MonoInst *dummy_use;
6026 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6031 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6032 cfg->cbb = start_bblock;
6033 cfg->args = arg_array;
6034 mono_save_args (cfg, sig, inline_args);
6037 /* FIRST CODE BLOCK */
6038 NEW_BBLOCK (cfg, bblock);
6039 bblock->cil_code = ip;
6043 ADD_BBLOCK (cfg, bblock);
6045 if (cfg->method == method) {
6046 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6047 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6048 MONO_INST_NEW (cfg, ins, OP_BREAK);
6049 MONO_ADD_INS (bblock, ins);
6053 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6054 secman = mono_security_manager_get_methods ();
6056 security = (secman && mono_method_has_declsec (method));
6057 /* at this point having security doesn't mean we have any code to generate */
6058 if (security && (cfg->method == method)) {
6059 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6060 * And we do not want to enter the next section (with allocation) if we
6061 * have nothing to generate */
6062 security = mono_declsec_get_demands (method, &actions);
6065 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6066 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6068 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6069 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6070 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6072 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6073 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6077 mono_custom_attrs_free (custom);
6080 custom = mono_custom_attrs_from_class (wrapped->klass);
6081 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6085 mono_custom_attrs_free (custom);
6088 /* not a P/Invoke after all */
6093 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6094 /* we use a separate basic block for the initialization code */
6095 NEW_BBLOCK (cfg, init_localsbb);
6096 cfg->bb_init = init_localsbb;
6097 init_localsbb->real_offset = cfg->real_offset;
6098 start_bblock->next_bb = init_localsbb;
6099 init_localsbb->next_bb = bblock;
6100 link_bblock (cfg, start_bblock, init_localsbb);
6101 link_bblock (cfg, init_localsbb, bblock);
6103 cfg->cbb = init_localsbb;
6105 start_bblock->next_bb = bblock;
6106 link_bblock (cfg, start_bblock, bblock);
6109 /* at this point we know, if security is TRUE, that some code needs to be generated */
6110 if (security && (cfg->method == method)) {
6113 cfg->stat_cas_demand_generation++;
6115 if (actions.demand.blob) {
6116 /* Add code for SecurityAction.Demand */
6117 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6118 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6119 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6120 mono_emit_method_call (cfg, secman->demand, args, NULL);
6122 if (actions.noncasdemand.blob) {
6123 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6124 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6125 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6126 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6127 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6128 mono_emit_method_call (cfg, secman->demand, args, NULL);
6130 if (actions.demandchoice.blob) {
6131 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6132 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6133 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6134 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6135 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6139 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6141 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6144 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6145 /* check if this is native code, e.g. an icall or a p/invoke */
6146 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6147 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6149 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6150 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6152 /* if this ia a native call then it can only be JITted from platform code */
6153 if ((icall || pinvk) && method->klass && method->klass->image) {
6154 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6155 MonoException *ex = icall ? mono_get_exception_security () :
6156 mono_get_exception_method_access ();
6157 emit_throw_exception (cfg, ex);
6164 if (header->code_size == 0)
6167 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6172 if (cfg->method == method)
6173 mono_debug_init_method (cfg, bblock, breakpoint_id);
6175 for (n = 0; n < header->num_locals; ++n) {
6176 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6181 /* We force the vtable variable here for all shared methods
6182 for the possibility that they might show up in a stack
6183 trace where their exact instantiation is needed. */
6184 if (cfg->generic_sharing_context && method == cfg->method) {
6185 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6186 mini_method_get_context (method)->method_inst ||
6187 method->klass->valuetype) {
6188 mono_get_vtable_var (cfg);
6190 /* FIXME: Is there a better way to do this?
6191 We need the variable live for the duration
6192 of the whole method. */
6193 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6197 /* add a check for this != NULL to inlined methods */
6198 if (is_virtual_call) {
6201 NEW_ARGLOAD (cfg, arg_ins, 0);
6202 MONO_ADD_INS (cfg->cbb, arg_ins);
6203 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6206 skip_dead_blocks = !dont_verify;
6207 if (skip_dead_blocks) {
6208 original_bb = bb = mono_basic_block_split (method, &error);
6209 if (!mono_error_ok (&error)) {
6210 mono_error_cleanup (&error);
6216 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6217 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6220 start_new_bblock = 0;
6223 if (cfg->method == method)
6224 cfg->real_offset = ip - header->code;
6226 cfg->real_offset = inline_offset;
6231 if (start_new_bblock) {
6232 bblock->cil_length = ip - bblock->cil_code;
6233 if (start_new_bblock == 2) {
6234 g_assert (ip == tblock->cil_code);
6236 GET_BBLOCK (cfg, tblock, ip);
6238 bblock->next_bb = tblock;
6241 start_new_bblock = 0;
6242 for (i = 0; i < bblock->in_scount; ++i) {
6243 if (cfg->verbose_level > 3)
6244 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6245 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6249 g_slist_free (class_inits);
6252 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6253 link_bblock (cfg, bblock, tblock);
6254 if (sp != stack_start) {
6255 handle_stack_args (cfg, stack_start, sp - stack_start);
6257 CHECK_UNVERIFIABLE (cfg);
6259 bblock->next_bb = tblock;
6262 for (i = 0; i < bblock->in_scount; ++i) {
6263 if (cfg->verbose_level > 3)
6264 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6265 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6268 g_slist_free (class_inits);
6273 if (skip_dead_blocks) {
6274 int ip_offset = ip - header->code;
6276 if (ip_offset == bb->end)
6280 int op_size = mono_opcode_size (ip, end);
6281 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6283 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6285 if (ip_offset + op_size == bb->end) {
6286 MONO_INST_NEW (cfg, ins, OP_NOP);
6287 MONO_ADD_INS (bblock, ins);
6288 start_new_bblock = 1;
6296 * Sequence points are points where the debugger can place a breakpoint.
6297 * Currently, we generate these automatically at points where the IL
6300 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6302 * Make methods interruptable at the beginning, and at the targets of
6303 * backward branches.
6304 * Also, do this at the start of every bblock in methods with clauses too,
6305 * to be able to handle instructions with inprecise control flow like
6307 * Backward branches are handled at the end of method-to-ir ().
6309 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6311 /* Avoid sequence points on empty IL like .volatile */
6312 // FIXME: Enable this
6313 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6314 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6315 MONO_ADD_INS (cfg->cbb, ins);
6318 bblock->real_offset = cfg->real_offset;
6320 if ((cfg->method == method) && cfg->coverage_info) {
6321 guint32 cil_offset = ip - header->code;
6322 cfg->coverage_info->data [cil_offset].cil_code = ip;
6324 /* TODO: Use an increment here */
6325 #if defined(TARGET_X86)
6326 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6327 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6329 MONO_ADD_INS (cfg->cbb, ins);
6331 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6332 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6336 if (cfg->verbose_level > 3)
6337 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6341 if (seq_points && !sym_seq_points && sp != stack_start) {
6343 * The C# compiler uses these nops to notify the JIT that it should
6344 * insert seq points.
6346 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6347 MONO_ADD_INS (cfg->cbb, ins);
6349 if (cfg->keep_cil_nops)
6350 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6352 MONO_INST_NEW (cfg, ins, OP_NOP);
6354 MONO_ADD_INS (bblock, ins);
6357 if (should_insert_brekpoint (cfg->method)) {
6358 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6360 MONO_INST_NEW (cfg, ins, OP_NOP);
6363 MONO_ADD_INS (bblock, ins);
6369 CHECK_STACK_OVF (1);
6370 n = (*ip)-CEE_LDARG_0;
6372 EMIT_NEW_ARGLOAD (cfg, ins, n);
6380 CHECK_STACK_OVF (1);
6381 n = (*ip)-CEE_LDLOC_0;
6383 EMIT_NEW_LOCLOAD (cfg, ins, n);
6392 n = (*ip)-CEE_STLOC_0;
6395 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6397 emit_stloc_ir (cfg, sp, header, n);
6404 CHECK_STACK_OVF (1);
6407 EMIT_NEW_ARGLOAD (cfg, ins, n);
6413 CHECK_STACK_OVF (1);
6416 NEW_ARGLOADA (cfg, ins, n);
6417 MONO_ADD_INS (cfg->cbb, ins);
6427 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6429 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6434 CHECK_STACK_OVF (1);
6437 EMIT_NEW_LOCLOAD (cfg, ins, n);
6441 case CEE_LDLOCA_S: {
6442 unsigned char *tmp_ip;
6444 CHECK_STACK_OVF (1);
6445 CHECK_LOCAL (ip [1]);
6447 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6453 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6462 CHECK_LOCAL (ip [1]);
6463 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6465 emit_stloc_ir (cfg, sp, header, ip [1]);
6470 CHECK_STACK_OVF (1);
6471 EMIT_NEW_PCONST (cfg, ins, NULL);
6472 ins->type = STACK_OBJ;
6477 CHECK_STACK_OVF (1);
6478 EMIT_NEW_ICONST (cfg, ins, -1);
6491 CHECK_STACK_OVF (1);
6492 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6498 CHECK_STACK_OVF (1);
6500 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6506 CHECK_STACK_OVF (1);
6507 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6513 CHECK_STACK_OVF (1);
6514 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6515 ins->type = STACK_I8;
6516 ins->dreg = alloc_dreg (cfg, STACK_I8);
6518 ins->inst_l = (gint64)read64 (ip);
6519 MONO_ADD_INS (bblock, ins);
6525 gboolean use_aotconst = FALSE;
6527 #ifdef TARGET_POWERPC
6528 /* FIXME: Clean this up */
6529 if (cfg->compile_aot)
6530 use_aotconst = TRUE;
6533 /* FIXME: we should really allocate this only late in the compilation process */
6534 f = mono_domain_alloc (cfg->domain, sizeof (float));
6536 CHECK_STACK_OVF (1);
6542 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6544 dreg = alloc_freg (cfg);
6545 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6546 ins->type = STACK_R8;
6548 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6549 ins->type = STACK_R8;
6550 ins->dreg = alloc_dreg (cfg, STACK_R8);
6552 MONO_ADD_INS (bblock, ins);
6562 gboolean use_aotconst = FALSE;
6564 #ifdef TARGET_POWERPC
6565 /* FIXME: Clean this up */
6566 if (cfg->compile_aot)
6567 use_aotconst = TRUE;
6570 /* FIXME: we should really allocate this only late in the compilation process */
6571 d = mono_domain_alloc (cfg->domain, sizeof (double));
6573 CHECK_STACK_OVF (1);
6579 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6581 dreg = alloc_freg (cfg);
6582 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6583 ins->type = STACK_R8;
6585 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6586 ins->type = STACK_R8;
6587 ins->dreg = alloc_dreg (cfg, STACK_R8);
6589 MONO_ADD_INS (bblock, ins);
6598 MonoInst *temp, *store;
6600 CHECK_STACK_OVF (1);
6604 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6605 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6607 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6610 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6623 if (sp [0]->type == STACK_R8)
6624 /* we need to pop the value from the x86 FP stack */
6625 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6634 if (stack_start != sp)
6636 token = read32 (ip + 1);
6637 /* FIXME: check the signature matches */
6638 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6640 if (!cmethod || mono_loader_get_last_error ())
6643 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6644 GENERIC_SHARING_FAILURE (CEE_JMP);
6646 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6647 CHECK_CFG_EXCEPTION;
6649 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6651 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6654 /* Handle tail calls similarly to calls */
6655 n = fsig->param_count + fsig->hasthis;
6657 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6658 call->method = cmethod;
6659 call->tail_call = TRUE;
6660 call->signature = mono_method_signature (cmethod);
6661 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6662 call->inst.inst_p0 = cmethod;
6663 for (i = 0; i < n; ++i)
6664 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6666 mono_arch_emit_call (cfg, call);
6667 MONO_ADD_INS (bblock, (MonoInst*)call);
6670 for (i = 0; i < num_args; ++i)
6671 /* Prevent arguments from being optimized away */
6672 arg_array [i]->flags |= MONO_INST_VOLATILE;
6674 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6675 ins = (MonoInst*)call;
6676 ins->inst_p0 = cmethod;
6677 MONO_ADD_INS (bblock, ins);
6681 start_new_bblock = 1;
6686 case CEE_CALLVIRT: {
6687 MonoInst *addr = NULL;
6688 MonoMethodSignature *fsig = NULL;
6690 int virtual = *ip == CEE_CALLVIRT;
6691 int calli = *ip == CEE_CALLI;
6692 gboolean pass_imt_from_rgctx = FALSE;
6693 MonoInst *imt_arg = NULL;
6694 gboolean pass_vtable = FALSE;
6695 gboolean pass_mrgctx = FALSE;
6696 MonoInst *vtable_arg = NULL;
6697 gboolean check_this = FALSE;
6698 gboolean supported_tail_call = FALSE;
6699 gboolean need_seq_point = FALSE;
6702 token = read32 (ip + 1);
6709 if (method->wrapper_type != MONO_WRAPPER_NONE)
6710 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6712 fsig = mono_metadata_parse_signature (image, token);
6714 n = fsig->param_count + fsig->hasthis;
6716 if (method->dynamic && fsig->pinvoke) {
6720 * This is a call through a function pointer using a pinvoke
6721 * signature. Have to create a wrapper and call that instead.
6722 * FIXME: This is very slow, need to create a wrapper at JIT time
6723 * instead based on the signature.
6725 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6726 EMIT_NEW_PCONST (cfg, args [1], fsig);
6728 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6731 MonoMethod *cil_method;
6733 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6734 if (constrained_call && cfg->verbose_level > 2)
6735 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6736 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6737 cil_method = cmethod;
6738 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6739 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6740 cfg->generic_sharing_context)) {
6741 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6743 } else if (constrained_call) {
6744 if (cfg->verbose_level > 2)
6745 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6747 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6749 * This is needed since get_method_constrained can't find
6750 * the method in klass representing a type var.
6751 * The type var is guaranteed to be a reference type in this
6754 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6755 cil_method = cmethod;
6756 g_assert (!cmethod->klass->valuetype);
6758 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6761 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6762 cil_method = cmethod;
6765 if (!cmethod || mono_loader_get_last_error ())
6767 if (!dont_verify && !cfg->skip_visibility) {
6768 MonoMethod *target_method = cil_method;
6769 if (method->is_inflated) {
6770 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6772 if (!mono_method_can_access_method (method_definition, target_method) &&
6773 !mono_method_can_access_method (method, cil_method))
6774 METHOD_ACCESS_FAILURE;
6777 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6778 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6780 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6781 /* MS.NET seems to silently convert this to a callvirt */
6786 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6787 * converts to a callvirt.
6789 * tests/bug-515884.il is an example of this behavior
6791 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6792 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6793 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6797 if (!cmethod->klass->inited)
6798 if (!mono_class_init (cmethod->klass))
6799 TYPE_LOAD_ERROR (cmethod->klass);
6801 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6802 mini_class_is_system_array (cmethod->klass)) {
6803 array_rank = cmethod->klass->rank;
6804 fsig = mono_method_signature (cmethod);
6806 fsig = mono_method_signature (cmethod);
6811 if (fsig->pinvoke) {
6812 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6813 check_for_pending_exc, FALSE);
6814 fsig = mono_method_signature (wrapper);
6815 } else if (constrained_call) {
6816 fsig = mono_method_signature (cmethod);
6818 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6822 mono_save_token_info (cfg, image, token, cil_method);
6824 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
6826 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
6827 * foo (bar (), baz ())
6828 * works correctly. MS does this also:
6829 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
6830 * The problem with this approach is that the debugger will stop after all calls returning a value,
6831 * even for simple cases, like:
6834 /* Special case a few common successor opcodes */
6835 if (!(ip + 5 < end && ip [5] == CEE_POP))
6836 need_seq_point = TRUE;
6839 n = fsig->param_count + fsig->hasthis;
6841 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6842 if (check_linkdemand (cfg, method, cmethod))
6844 CHECK_CFG_EXCEPTION;
6847 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6848 g_assert_not_reached ();
6851 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6854 if (!cfg->generic_sharing_context && cmethod)
6855 g_assert (!mono_method_check_context_used (cmethod));
6859 //g_assert (!virtual || fsig->hasthis);
6863 if (constrained_call) {
6865 * We have the `constrained.' prefix opcode.
6867 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6869 * The type parameter is instantiated as a valuetype,
6870 * but that type doesn't override the method we're
6871 * calling, so we need to box `this'.
6873 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6874 ins->klass = constrained_call;
6875 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6876 CHECK_CFG_EXCEPTION;
6877 } else if (!constrained_call->valuetype) {
6878 int dreg = alloc_ireg_ref (cfg);
6881 * The type parameter is instantiated as a reference
6882 * type. We have a managed pointer on the stack, so
6883 * we need to dereference it here.
6885 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6886 ins->type = STACK_OBJ;
6888 } else if (cmethod->klass->valuetype)
6890 constrained_call = NULL;
6893 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6897 * If the callee is a shared method, then its static cctor
6898 * might not get called after the call was patched.
6900 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6901 emit_generic_class_init (cfg, cmethod->klass);
6902 CHECK_TYPELOAD (cmethod->klass);
6905 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6906 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6907 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6908 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6909 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6912 * Pass vtable iff target method might
6913 * be shared, which means that sharing
6914 * is enabled for its class and its
6915 * context is sharable (and it's not a
6918 if (sharing_enabled && context_sharable &&
6919 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6923 if (cmethod && mini_method_get_context (cmethod) &&
6924 mini_method_get_context (cmethod)->method_inst) {
6925 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6926 MonoGenericContext *context = mini_method_get_context (cmethod);
6927 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6929 g_assert (!pass_vtable);
6931 if (sharing_enabled && context_sharable)
6935 if (cfg->generic_sharing_context && cmethod) {
6936 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6938 context_used = mono_method_check_context_used (cmethod);
6940 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6941 /* Generic method interface
6942 calls are resolved via a
6943 helper function and don't
6945 if (!cmethod_context || !cmethod_context->method_inst)
6946 pass_imt_from_rgctx = TRUE;
6950 * If a shared method calls another
6951 * shared method then the caller must
6952 * have a generic sharing context
6953 * because the magic trampoline
6954 * requires it. FIXME: We shouldn't
6955 * have to force the vtable/mrgctx
6956 * variable here. Instead there
6957 * should be a flag in the cfg to
6958 * request a generic sharing context.
6961 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6962 mono_get_vtable_var (cfg);
6967 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6969 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6971 CHECK_TYPELOAD (cmethod->klass);
6972 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6977 g_assert (!vtable_arg);
6979 if (!cfg->compile_aot) {
6981 * emit_get_rgctx_method () calls mono_class_vtable () so check
6982 * for type load errors before.
6984 mono_class_setup_vtable (cmethod->klass);
6985 CHECK_TYPELOAD (cmethod->klass);
6988 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6990 /* !marshalbyref is needed to properly handle generic methods + remoting */
6991 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6992 MONO_METHOD_IS_FINAL (cmethod)) &&
6993 !cmethod->klass->marshalbyref) {
7000 if (pass_imt_from_rgctx) {
7001 g_assert (!pass_vtable);
7004 imt_arg = emit_get_rgctx_method (cfg, context_used,
7005 cmethod, MONO_RGCTX_INFO_METHOD);
7009 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7011 /* Calling virtual generic methods */
7012 if (cmethod && virtual &&
7013 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7014 !(MONO_METHOD_IS_FINAL (cmethod) &&
7015 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7016 mono_method_signature (cmethod)->generic_param_count) {
7017 MonoInst *this_temp, *this_arg_temp, *store;
7018 MonoInst *iargs [4];
7020 g_assert (mono_method_signature (cmethod)->is_inflated);
7022 /* Prevent inlining of methods that contain indirect calls */
7025 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7026 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
7027 g_assert (!imt_arg);
7029 g_assert (cmethod->is_inflated);
7030 imt_arg = emit_get_rgctx_method (cfg, context_used,
7031 cmethod, MONO_RGCTX_INFO_METHOD);
7032 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
7036 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7037 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7038 MONO_ADD_INS (bblock, store);
7040 /* FIXME: This should be a managed pointer */
7041 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7043 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7044 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7045 cmethod, MONO_RGCTX_INFO_METHOD);
7046 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7047 addr = mono_emit_jit_icall (cfg,
7048 mono_helper_compile_generic_method, iargs);
7050 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7052 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7055 if (!MONO_TYPE_IS_VOID (fsig->ret))
7056 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7058 CHECK_CFG_EXCEPTION;
7063 emit_seq_point (cfg, method, ip, FALSE);
7068 * Implement a workaround for the inherent races involved in locking:
7074 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7075 * try block, the Exit () won't be executed, see:
7076 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7077 * To work around this, we extend such try blocks to include the last x bytes
7078 * of the Monitor.Enter () call.
7080 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7081 MonoBasicBlock *tbb;
7083 GET_BBLOCK (cfg, tbb, ip + 5);
7085 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7086 * from Monitor.Enter like ArgumentNullException.
7088 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7089 /* Mark this bblock as needing to be extended */
7090 tbb->extend_try_block = TRUE;
7094 /* Conversion to a JIT intrinsic */
7095 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7097 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7098 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7103 CHECK_CFG_EXCEPTION;
7108 emit_seq_point (cfg, method, ip, FALSE);
7113 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7114 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7115 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7116 !g_list_find (dont_inline, cmethod)) {
7118 gboolean always = FALSE;
7120 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7121 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7122 /* Prevent inlining of methods that call wrappers */
7124 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7128 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7130 cfg->real_offset += 5;
7133 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7134 /* *sp is already set by inline_method */
7138 inline_costs += costs;
7141 emit_seq_point (cfg, method, ip, FALSE);
7146 inline_costs += 10 * num_calls++;
7148 /* Tail recursion elimination */
7149 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7150 gboolean has_vtargs = FALSE;
7153 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7156 /* keep it simple */
7157 for (i = fsig->param_count - 1; i >= 0; i--) {
7158 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7163 for (i = 0; i < n; ++i)
7164 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7165 MONO_INST_NEW (cfg, ins, OP_BR);
7166 MONO_ADD_INS (bblock, ins);
7167 tblock = start_bblock->out_bb [0];
7168 link_bblock (cfg, bblock, tblock);
7169 ins->inst_target_bb = tblock;
7170 start_new_bblock = 1;
7172 /* skip the CEE_RET, too */
7173 if (ip_in_bb (cfg, bblock, ip + 5))
7183 /* Generic sharing */
7184 /* FIXME: only do this for generic methods if
7185 they are not shared! */
7186 if (context_used && !imt_arg && !array_rank &&
7187 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7188 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7189 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7190 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7193 g_assert (cfg->generic_sharing_context && cmethod);
7197 * We are compiling a call to a
7198 * generic method from shared code,
7199 * which means that we have to look up
7200 * the method in the rgctx and do an
7203 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7206 /* Indirect calls */
7208 g_assert (!imt_arg);
7210 if (*ip == CEE_CALL)
7211 g_assert (context_used);
7212 else if (*ip == CEE_CALLI)
7213 g_assert (!vtable_arg);
7215 /* FIXME: what the hell is this??? */
7216 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7217 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7219 /* Prevent inlining of methods with indirect calls */
7223 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7225 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7227 * Instead of emitting an indirect call, emit a direct call
7228 * with the contents of the aotconst as the patch info.
7230 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7232 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7233 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7236 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7239 if (!MONO_TYPE_IS_VOID (fsig->ret))
7240 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7242 CHECK_CFG_EXCEPTION;
7247 emit_seq_point (cfg, method, ip, FALSE);
7255 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7256 MonoInst *val = sp [fsig->param_count];
7258 if (val->type == STACK_OBJ) {
7259 MonoInst *iargs [2];
7264 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7267 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7268 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7269 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7270 emit_write_barrier (cfg, addr, val, 0);
7271 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7272 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7274 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7277 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7278 if (!cmethod->klass->element_class->valuetype && !readonly)
7279 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7280 CHECK_TYPELOAD (cmethod->klass);
7283 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7286 g_assert_not_reached ();
7289 CHECK_CFG_EXCEPTION;
7293 emit_seq_point (cfg, method, ip, FALSE);
7297 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7299 if (!MONO_TYPE_IS_VOID (fsig->ret))
7300 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7302 CHECK_CFG_EXCEPTION;
7307 emit_seq_point (cfg, method, ip, FALSE);
7311 /* Tail prefix / tail call optimization */
7313 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7314 /* FIXME: runtime generic context pointer for jumps? */
7315 /* FIXME: handle this for generic sharing eventually */
7316 supported_tail_call = cmethod &&
7317 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7318 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7319 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7321 if (supported_tail_call) {
7324 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7327 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7329 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7330 /* Handle tail calls similarly to calls */
7331 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7333 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7334 call->tail_call = TRUE;
7335 call->method = cmethod;
7336 call->signature = mono_method_signature (cmethod);
7339 * We implement tail calls by storing the actual arguments into the
7340 * argument variables, then emitting a CEE_JMP.
7342 for (i = 0; i < n; ++i) {
7343 /* Prevent argument from being register allocated */
7344 arg_array [i]->flags |= MONO_INST_VOLATILE;
7345 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7349 ins = (MonoInst*)call;
7350 ins->inst_p0 = cmethod;
7351 ins->inst_p1 = arg_array [0];
7352 MONO_ADD_INS (bblock, ins);
7353 link_bblock (cfg, bblock, end_bblock);
7354 start_new_bblock = 1;
7356 CHECK_CFG_EXCEPTION;
7361 // FIXME: Eliminate unreachable epilogs
7364 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7365 * only reachable from this call.
7367 GET_BBLOCK (cfg, tblock, ip);
7368 if (tblock == bblock || tblock->in_count == 0)
7374 * Synchronized wrappers.
7375 * Its hard to determine where to replace a method with its synchronized
7376 * wrapper without causing an infinite recursion. The current solution is
7377 * to add the synchronized wrapper in the trampolines, and to
7378 * change the called method to a dummy wrapper, and resolve that wrapper
7379 * to the real method in mono_jit_compile_method ().
7381 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod) {
7382 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7387 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7388 imt_arg, vtable_arg);
7390 if (!MONO_TYPE_IS_VOID (fsig->ret))
7391 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7393 CHECK_CFG_EXCEPTION;
7398 emit_seq_point (cfg, method, ip, FALSE);
7402 if (cfg->method != method) {
7403 /* return from inlined method */
7405 * If in_count == 0, that means the ret is unreachable due to
7406 * being preceeded by a throw. In that case, inline_method () will
7407 * handle setting the return value
7408 * (test case: test_0_inline_throw ()).
7410 if (return_var && cfg->cbb->in_count) {
7411 MonoType *ret_type = mono_method_signature (method)->ret;
7417 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7420 //g_assert (returnvar != -1);
7421 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7422 cfg->ret_var_set = TRUE;
7426 MonoType *ret_type = mono_method_signature (method)->ret;
7428 if (seq_points && !sym_seq_points) {
7430 * Place a seq point here too even through the IL stack is not
7431 * empty, so a step over on
7434 * will work correctly.
7436 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7437 MONO_ADD_INS (cfg->cbb, ins);
7440 g_assert (!return_var);
7444 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7447 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7450 if (!cfg->vret_addr) {
7453 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7455 EMIT_NEW_RETLOADA (cfg, ret_addr);
7457 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7458 ins->klass = mono_class_from_mono_type (ret_type);
7461 #ifdef MONO_ARCH_SOFT_FLOAT
7462 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7463 MonoInst *iargs [1];
7467 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7468 mono_arch_emit_setret (cfg, method, conv);
7470 mono_arch_emit_setret (cfg, method, *sp);
7473 mono_arch_emit_setret (cfg, method, *sp);
7478 if (sp != stack_start)
7480 MONO_INST_NEW (cfg, ins, OP_BR);
7482 ins->inst_target_bb = end_bblock;
7483 MONO_ADD_INS (bblock, ins);
7484 link_bblock (cfg, bblock, end_bblock);
7485 start_new_bblock = 1;
7489 MONO_INST_NEW (cfg, ins, OP_BR);
7491 target = ip + 1 + (signed char)(*ip);
7493 GET_BBLOCK (cfg, tblock, target);
7494 link_bblock (cfg, bblock, tblock);
7495 ins->inst_target_bb = tblock;
7496 if (sp != stack_start) {
7497 handle_stack_args (cfg, stack_start, sp - stack_start);
7499 CHECK_UNVERIFIABLE (cfg);
7501 MONO_ADD_INS (bblock, ins);
7502 start_new_bblock = 1;
7503 inline_costs += BRANCH_COST;
7517 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7519 target = ip + 1 + *(signed char*)ip;
7525 inline_costs += BRANCH_COST;
7529 MONO_INST_NEW (cfg, ins, OP_BR);
7532 target = ip + 4 + (gint32)read32(ip);
7534 GET_BBLOCK (cfg, tblock, target);
7535 link_bblock (cfg, bblock, tblock);
7536 ins->inst_target_bb = tblock;
7537 if (sp != stack_start) {
7538 handle_stack_args (cfg, stack_start, sp - stack_start);
7540 CHECK_UNVERIFIABLE (cfg);
7543 MONO_ADD_INS (bblock, ins);
7545 start_new_bblock = 1;
7546 inline_costs += BRANCH_COST;
7553 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7554 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7555 guint32 opsize = is_short ? 1 : 4;
7557 CHECK_OPSIZE (opsize);
7559 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7562 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7567 GET_BBLOCK (cfg, tblock, target);
7568 link_bblock (cfg, bblock, tblock);
7569 GET_BBLOCK (cfg, tblock, ip);
7570 link_bblock (cfg, bblock, tblock);
7572 if (sp != stack_start) {
7573 handle_stack_args (cfg, stack_start, sp - stack_start);
7574 CHECK_UNVERIFIABLE (cfg);
7577 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7578 cmp->sreg1 = sp [0]->dreg;
7579 type_from_op (cmp, sp [0], NULL);
7582 #if SIZEOF_REGISTER == 4
7583 if (cmp->opcode == OP_LCOMPARE_IMM) {
7584 /* Convert it to OP_LCOMPARE */
7585 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7586 ins->type = STACK_I8;
7587 ins->dreg = alloc_dreg (cfg, STACK_I8);
7589 MONO_ADD_INS (bblock, ins);
7590 cmp->opcode = OP_LCOMPARE;
7591 cmp->sreg2 = ins->dreg;
7594 MONO_ADD_INS (bblock, cmp);
7596 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7597 type_from_op (ins, sp [0], NULL);
7598 MONO_ADD_INS (bblock, ins);
7599 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7600 GET_BBLOCK (cfg, tblock, target);
7601 ins->inst_true_bb = tblock;
7602 GET_BBLOCK (cfg, tblock, ip);
7603 ins->inst_false_bb = tblock;
7604 start_new_bblock = 2;
7607 inline_costs += BRANCH_COST;
7622 MONO_INST_NEW (cfg, ins, *ip);
7624 target = ip + 4 + (gint32)read32(ip);
7630 inline_costs += BRANCH_COST;
7634 MonoBasicBlock **targets;
7635 MonoBasicBlock *default_bblock;
7636 MonoJumpInfoBBTable *table;
7637 int offset_reg = alloc_preg (cfg);
7638 int target_reg = alloc_preg (cfg);
7639 int table_reg = alloc_preg (cfg);
7640 int sum_reg = alloc_preg (cfg);
7641 gboolean use_op_switch;
7645 n = read32 (ip + 1);
7648 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7652 CHECK_OPSIZE (n * sizeof (guint32));
7653 target = ip + n * sizeof (guint32);
7655 GET_BBLOCK (cfg, default_bblock, target);
7656 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7658 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7659 for (i = 0; i < n; ++i) {
7660 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7661 targets [i] = tblock;
7662 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7666 if (sp != stack_start) {
7668 * Link the current bb with the targets as well, so handle_stack_args
7669 * will set their in_stack correctly.
7671 link_bblock (cfg, bblock, default_bblock);
7672 for (i = 0; i < n; ++i)
7673 link_bblock (cfg, bblock, targets [i]);
7675 handle_stack_args (cfg, stack_start, sp - stack_start);
7677 CHECK_UNVERIFIABLE (cfg);
7680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7681 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7684 for (i = 0; i < n; ++i)
7685 link_bblock (cfg, bblock, targets [i]);
7687 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7688 table->table = targets;
7689 table->table_size = n;
7691 use_op_switch = FALSE;
7693 /* ARM implements SWITCH statements differently */
7694 /* FIXME: Make it use the generic implementation */
7695 if (!cfg->compile_aot)
7696 use_op_switch = TRUE;
7699 if (COMPILE_LLVM (cfg))
7700 use_op_switch = TRUE;
7702 cfg->cbb->has_jump_table = 1;
7704 if (use_op_switch) {
7705 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7706 ins->sreg1 = src1->dreg;
7707 ins->inst_p0 = table;
7708 ins->inst_many_bb = targets;
7709 ins->klass = GUINT_TO_POINTER (n);
7710 MONO_ADD_INS (cfg->cbb, ins);
7712 if (sizeof (gpointer) == 8)
7713 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7715 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7717 #if SIZEOF_REGISTER == 8
7718 /* The upper word might not be zero, and we add it to a 64 bit address later */
7719 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7722 if (cfg->compile_aot) {
7723 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7725 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7726 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7727 ins->inst_p0 = table;
7728 ins->dreg = table_reg;
7729 MONO_ADD_INS (cfg->cbb, ins);
7732 /* FIXME: Use load_memindex */
7733 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7735 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7737 start_new_bblock = 1;
7738 inline_costs += (BRANCH_COST * 2);
7758 dreg = alloc_freg (cfg);
7761 dreg = alloc_lreg (cfg);
7764 dreg = alloc_ireg_ref (cfg);
7767 dreg = alloc_preg (cfg);
7770 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7771 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7772 ins->flags |= ins_flag;
7774 MONO_ADD_INS (bblock, ins);
7776 if (ins->flags & MONO_INST_VOLATILE) {
7777 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7778 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7779 emit_memory_barrier (cfg, FullBarrier);
7794 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7795 ins->flags |= ins_flag;
7798 if (ins->flags & MONO_INST_VOLATILE) {
7799 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7800 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7801 emit_memory_barrier (cfg, FullBarrier);
7804 MONO_ADD_INS (bblock, ins);
7806 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7807 emit_write_barrier (cfg, sp [0], sp [1], -1);
7816 MONO_INST_NEW (cfg, ins, (*ip));
7818 ins->sreg1 = sp [0]->dreg;
7819 ins->sreg2 = sp [1]->dreg;
7820 type_from_op (ins, sp [0], sp [1]);
7822 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7824 /* Use the immediate opcodes if possible */
7825 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7826 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7827 if (imm_opcode != -1) {
7828 ins->opcode = imm_opcode;
7829 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7832 sp [1]->opcode = OP_NOP;
7836 MONO_ADD_INS ((cfg)->cbb, (ins));
7838 *sp++ = mono_decompose_opcode (cfg, ins);
7855 MONO_INST_NEW (cfg, ins, (*ip));
7857 ins->sreg1 = sp [0]->dreg;
7858 ins->sreg2 = sp [1]->dreg;
7859 type_from_op (ins, sp [0], sp [1]);
7861 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7862 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7864 /* FIXME: Pass opcode to is_inst_imm */
7866 /* Use the immediate opcodes if possible */
7867 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7870 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7871 if (imm_opcode != -1) {
7872 ins->opcode = imm_opcode;
7873 if (sp [1]->opcode == OP_I8CONST) {
7874 #if SIZEOF_REGISTER == 8
7875 ins->inst_imm = sp [1]->inst_l;
7877 ins->inst_ls_word = sp [1]->inst_ls_word;
7878 ins->inst_ms_word = sp [1]->inst_ms_word;
7882 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7885 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7886 if (sp [1]->next == NULL)
7887 sp [1]->opcode = OP_NOP;
7890 MONO_ADD_INS ((cfg)->cbb, (ins));
7892 *sp++ = mono_decompose_opcode (cfg, ins);
7905 case CEE_CONV_OVF_I8:
7906 case CEE_CONV_OVF_U8:
7910 /* Special case this earlier so we have long constants in the IR */
7911 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7912 int data = sp [-1]->inst_c0;
7913 sp [-1]->opcode = OP_I8CONST;
7914 sp [-1]->type = STACK_I8;
7915 #if SIZEOF_REGISTER == 8
7916 if ((*ip) == CEE_CONV_U8)
7917 sp [-1]->inst_c0 = (guint32)data;
7919 sp [-1]->inst_c0 = data;
7921 sp [-1]->inst_ls_word = data;
7922 if ((*ip) == CEE_CONV_U8)
7923 sp [-1]->inst_ms_word = 0;
7925 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7927 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7934 case CEE_CONV_OVF_I4:
7935 case CEE_CONV_OVF_I1:
7936 case CEE_CONV_OVF_I2:
7937 case CEE_CONV_OVF_I:
7938 case CEE_CONV_OVF_U:
7941 if (sp [-1]->type == STACK_R8) {
7942 ADD_UNOP (CEE_CONV_OVF_I8);
7949 case CEE_CONV_OVF_U1:
7950 case CEE_CONV_OVF_U2:
7951 case CEE_CONV_OVF_U4:
7954 if (sp [-1]->type == STACK_R8) {
7955 ADD_UNOP (CEE_CONV_OVF_U8);
7962 case CEE_CONV_OVF_I1_UN:
7963 case CEE_CONV_OVF_I2_UN:
7964 case CEE_CONV_OVF_I4_UN:
7965 case CEE_CONV_OVF_I8_UN:
7966 case CEE_CONV_OVF_U1_UN:
7967 case CEE_CONV_OVF_U2_UN:
7968 case CEE_CONV_OVF_U4_UN:
7969 case CEE_CONV_OVF_U8_UN:
7970 case CEE_CONV_OVF_I_UN:
7971 case CEE_CONV_OVF_U_UN:
7978 CHECK_CFG_EXCEPTION;
7982 case CEE_ADD_OVF_UN:
7984 case CEE_MUL_OVF_UN:
7986 case CEE_SUB_OVF_UN:
7994 token = read32 (ip + 1);
7995 klass = mini_get_class (method, token, generic_context);
7996 CHECK_TYPELOAD (klass);
7998 if (generic_class_is_reference_type (cfg, klass)) {
7999 MonoInst *store, *load;
8000 int dreg = alloc_ireg_ref (cfg);
8002 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8003 load->flags |= ins_flag;
8004 MONO_ADD_INS (cfg->cbb, load);
8006 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8007 store->flags |= ins_flag;
8008 MONO_ADD_INS (cfg->cbb, store);
8010 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8011 emit_write_barrier (cfg, sp [0], sp [1], -1);
8013 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8025 token = read32 (ip + 1);
8026 klass = mini_get_class (method, token, generic_context);
8027 CHECK_TYPELOAD (klass);
8029 /* Optimize the common ldobj+stloc combination */
8039 loc_index = ip [5] - CEE_STLOC_0;
8046 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8047 CHECK_LOCAL (loc_index);
8049 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8050 ins->dreg = cfg->locals [loc_index]->dreg;
8056 /* Optimize the ldobj+stobj combination */
8057 /* The reference case ends up being a load+store anyway */
8058 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8063 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8070 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8079 CHECK_STACK_OVF (1);
8081 n = read32 (ip + 1);
8083 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8084 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8085 ins->type = STACK_OBJ;
8088 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8089 MonoInst *iargs [1];
8091 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8092 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8094 if (cfg->opt & MONO_OPT_SHARED) {
8095 MonoInst *iargs [3];
8097 if (cfg->compile_aot) {
8098 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8100 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8101 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8102 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8103 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8104 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8106 if (bblock->out_of_line) {
8107 MonoInst *iargs [2];
8109 if (image == mono_defaults.corlib) {
8111 * Avoid relocations in AOT and save some space by using a
8112 * version of helper_ldstr specialized to mscorlib.
8114 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8115 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8117 /* Avoid creating the string object */
8118 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8119 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8120 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8124 if (cfg->compile_aot) {
8125 NEW_LDSTRCONST (cfg, ins, image, n);
8127 MONO_ADD_INS (bblock, ins);
8130 NEW_PCONST (cfg, ins, NULL);
8131 ins->type = STACK_OBJ;
8132 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8134 OUT_OF_MEMORY_FAILURE;
8137 MONO_ADD_INS (bblock, ins);
8146 MonoInst *iargs [2];
8147 MonoMethodSignature *fsig;
8150 MonoInst *vtable_arg = NULL;
8153 token = read32 (ip + 1);
8154 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8155 if (!cmethod || mono_loader_get_last_error ())
8157 fsig = mono_method_get_signature (cmethod, image, token);
8161 mono_save_token_info (cfg, image, token, cmethod);
8163 if (!mono_class_init (cmethod->klass))
8164 TYPE_LOAD_ERROR (cmethod->klass);
8166 if (cfg->generic_sharing_context)
8167 context_used = mono_method_check_context_used (cmethod);
8169 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8170 if (check_linkdemand (cfg, method, cmethod))
8172 CHECK_CFG_EXCEPTION;
8173 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8174 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8177 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8178 emit_generic_class_init (cfg, cmethod->klass);
8179 CHECK_TYPELOAD (cmethod->klass);
8182 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8183 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8184 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8185 mono_class_vtable (cfg->domain, cmethod->klass);
8186 CHECK_TYPELOAD (cmethod->klass);
8188 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8189 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8192 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8193 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8195 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8197 CHECK_TYPELOAD (cmethod->klass);
8198 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8203 n = fsig->param_count;
8207 * Generate smaller code for the common newobj <exception> instruction in
8208 * argument checking code.
8210 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8211 is_exception_class (cmethod->klass) && n <= 2 &&
8212 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8213 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8214 MonoInst *iargs [3];
8216 g_assert (!vtable_arg);
8220 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8223 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8227 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8232 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8235 g_assert_not_reached ();
8243 /* move the args to allow room for 'this' in the first position */
8249 /* check_call_signature () requires sp[0] to be set */
8250 this_ins.type = STACK_OBJ;
8252 if (check_call_signature (cfg, fsig, sp))
8257 if (mini_class_is_system_array (cmethod->klass)) {
8258 g_assert (!vtable_arg);
8260 *sp = emit_get_rgctx_method (cfg, context_used,
8261 cmethod, MONO_RGCTX_INFO_METHOD);
8263 /* Avoid varargs in the common case */
8264 if (fsig->param_count == 1)
8265 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8266 else if (fsig->param_count == 2)
8267 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8268 else if (fsig->param_count == 3)
8269 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8271 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8272 } else if (cmethod->string_ctor) {
8273 g_assert (!context_used);
8274 g_assert (!vtable_arg);
8275 /* we simply pass a null pointer */
8276 EMIT_NEW_PCONST (cfg, *sp, NULL);
8277 /* now call the string ctor */
8278 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8280 MonoInst* callvirt_this_arg = NULL;
8282 if (cmethod->klass->valuetype) {
8283 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8284 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8285 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8290 * The code generated by mini_emit_virtual_call () expects
8291 * iargs [0] to be a boxed instance, but luckily the vcall
8292 * will be transformed into a normal call there.
8294 } else if (context_used) {
8295 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8298 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8300 CHECK_TYPELOAD (cmethod->klass);
8303 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8304 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8305 * As a workaround, we call class cctors before allocating objects.
8307 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8308 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8309 if (cfg->verbose_level > 2)
8310 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8311 class_inits = g_slist_prepend (class_inits, vtable);
8314 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8317 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8320 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8322 /* Now call the actual ctor */
8323 /* Avoid virtual calls to ctors if possible */
8324 if (cmethod->klass->marshalbyref)
8325 callvirt_this_arg = sp [0];
8328 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8329 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8330 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8335 CHECK_CFG_EXCEPTION;
8336 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8337 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8338 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8339 !g_list_find (dont_inline, cmethod)) {
8342 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8343 cfg->real_offset += 5;
8346 inline_costs += costs - 5;
8349 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8351 } else if (context_used &&
8352 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8353 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8354 MonoInst *cmethod_addr;
8356 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8357 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8359 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8362 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8363 callvirt_this_arg, NULL, vtable_arg);
8367 if (alloc == NULL) {
8369 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8370 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8384 token = read32 (ip + 1);
8385 klass = mini_get_class (method, token, generic_context);
8386 CHECK_TYPELOAD (klass);
8387 if (sp [0]->type != STACK_OBJ)
8390 if (cfg->generic_sharing_context)
8391 context_used = mono_class_check_context_used (klass);
8393 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8394 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8401 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8404 if (cfg->compile_aot)
8405 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8407 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8409 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8410 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8413 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8414 MonoMethod *mono_castclass;
8415 MonoInst *iargs [1];
8418 mono_castclass = mono_marshal_get_castclass (klass);
8421 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8422 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8423 CHECK_CFG_EXCEPTION;
8424 g_assert (costs > 0);
8427 cfg->real_offset += 5;
8432 inline_costs += costs;
8435 ins = handle_castclass (cfg, klass, *sp, context_used);
8436 CHECK_CFG_EXCEPTION;
8446 token = read32 (ip + 1);
8447 klass = mini_get_class (method, token, generic_context);
8448 CHECK_TYPELOAD (klass);
8449 if (sp [0]->type != STACK_OBJ)
8452 if (cfg->generic_sharing_context)
8453 context_used = mono_class_check_context_used (klass);
8455 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8456 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8463 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8466 if (cfg->compile_aot)
8467 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8469 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8471 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8474 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8475 MonoMethod *mono_isinst;
8476 MonoInst *iargs [1];
8479 mono_isinst = mono_marshal_get_isinst (klass);
8482 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8483 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8484 CHECK_CFG_EXCEPTION;
8485 g_assert (costs > 0);
8488 cfg->real_offset += 5;
8493 inline_costs += costs;
8496 ins = handle_isinst (cfg, klass, *sp, context_used);
8497 CHECK_CFG_EXCEPTION;
8504 case CEE_UNBOX_ANY: {
8508 token = read32 (ip + 1);
8509 klass = mini_get_class (method, token, generic_context);
8510 CHECK_TYPELOAD (klass);
8512 mono_save_token_info (cfg, image, token, klass);
8514 if (cfg->generic_sharing_context)
8515 context_used = mono_class_check_context_used (klass);
8517 if (generic_class_is_reference_type (cfg, klass)) {
8518 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8519 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8520 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8527 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8530 /*FIXME AOT support*/
8531 if (cfg->compile_aot)
8532 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8534 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8536 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8537 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8540 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8541 MonoMethod *mono_castclass;
8542 MonoInst *iargs [1];
8545 mono_castclass = mono_marshal_get_castclass (klass);
8548 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8549 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8550 CHECK_CFG_EXCEPTION;
8551 g_assert (costs > 0);
8554 cfg->real_offset += 5;
8558 inline_costs += costs;
8560 ins = handle_castclass (cfg, klass, *sp, context_used);
8561 CHECK_CFG_EXCEPTION;
8569 if (mono_class_is_nullable (klass)) {
8570 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8577 ins = handle_unbox (cfg, klass, sp, context_used);
8583 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8596 token = read32 (ip + 1);
8597 klass = mini_get_class (method, token, generic_context);
8598 CHECK_TYPELOAD (klass);
8600 mono_save_token_info (cfg, image, token, klass);
8602 if (cfg->generic_sharing_context)
8603 context_used = mono_class_check_context_used (klass);
8605 if (generic_class_is_reference_type (cfg, klass)) {
8611 if (klass == mono_defaults.void_class)
8613 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8615 /* frequent check in generic code: box (struct), brtrue */
8617 // FIXME: LLVM can't handle the inconsistent bb linking
8618 if (!mono_class_is_nullable (klass) &&
8619 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8620 (ip [5] == CEE_BRTRUE ||
8621 ip [5] == CEE_BRTRUE_S ||
8622 ip [5] == CEE_BRFALSE ||
8623 ip [5] == CEE_BRFALSE_S)) {
8624 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8626 MonoBasicBlock *true_bb, *false_bb;
8630 if (cfg->verbose_level > 3) {
8631 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8632 printf ("<box+brtrue opt>\n");
8640 target = ip + 1 + (signed char)(*ip);
8647 target = ip + 4 + (gint)(read32 (ip));
8651 g_assert_not_reached ();
8655 * We need to link both bblocks, since it is needed for handling stack
8656 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8657 * Branching to only one of them would lead to inconsistencies, so
8658 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8660 GET_BBLOCK (cfg, true_bb, target);
8661 GET_BBLOCK (cfg, false_bb, ip);
8663 mono_link_bblock (cfg, cfg->cbb, true_bb);
8664 mono_link_bblock (cfg, cfg->cbb, false_bb);
8666 if (sp != stack_start) {
8667 handle_stack_args (cfg, stack_start, sp - stack_start);
8669 CHECK_UNVERIFIABLE (cfg);
8672 if (COMPILE_LLVM (cfg)) {
8673 dreg = alloc_ireg (cfg);
8674 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8675 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8677 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8679 /* The JIT can't eliminate the iconst+compare */
8680 MONO_INST_NEW (cfg, ins, OP_BR);
8681 ins->inst_target_bb = is_true ? true_bb : false_bb;
8682 MONO_ADD_INS (cfg->cbb, ins);
8685 start_new_bblock = 1;
8689 *sp++ = handle_box (cfg, val, klass, context_used);
8691 CHECK_CFG_EXCEPTION;
8700 token = read32 (ip + 1);
8701 klass = mini_get_class (method, token, generic_context);
8702 CHECK_TYPELOAD (klass);
8704 mono_save_token_info (cfg, image, token, klass);
8706 if (cfg->generic_sharing_context)
8707 context_used = mono_class_check_context_used (klass);
8709 if (mono_class_is_nullable (klass)) {
8712 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8713 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8717 ins = handle_unbox (cfg, klass, sp, context_used);
8730 MonoClassField *field;
8733 gboolean is_instance;
8735 gpointer addr = NULL;
8736 gboolean is_special_static;
8738 MonoInst *store_val = NULL;
8741 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
8743 if (op == CEE_STFLD) {
8751 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8753 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8756 if (op == CEE_STSFLD) {
8764 token = read32 (ip + 1);
8765 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8766 field = mono_method_get_wrapper_data (method, token);
8767 klass = field->parent;
8770 field = mono_field_from_token (image, token, &klass, generic_context);
8774 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8775 FIELD_ACCESS_FAILURE;
8776 mono_class_init (klass);
8778 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
8781 /* if the class is Critical then transparent code cannot access it's fields */
8782 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8783 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8785 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8786 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8787 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8788 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8792 * LDFLD etc. is usable on static fields as well, so convert those cases to
8795 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
8807 g_assert_not_reached ();
8809 is_instance = FALSE;
8814 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8815 if (op == CEE_STFLD) {
8816 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8818 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8819 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8820 MonoInst *iargs [5];
8823 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8824 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8825 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8829 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8830 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8831 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8832 CHECK_CFG_EXCEPTION;
8833 g_assert (costs > 0);
8835 cfg->real_offset += 5;
8838 inline_costs += costs;
8840 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8845 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8847 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8848 if (sp [0]->opcode != OP_LDADDR)
8849 store->flags |= MONO_INST_FAULT;
8851 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8852 /* insert call to write barrier */
8856 dreg = alloc_ireg_mp (cfg);
8857 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8858 emit_write_barrier (cfg, ptr, sp [1], -1);
8861 store->flags |= ins_flag;
8868 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
8869 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8870 MonoInst *iargs [4];
8873 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8874 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8875 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8876 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8877 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8878 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8879 CHECK_CFG_EXCEPTION;
8881 g_assert (costs > 0);
8883 cfg->real_offset += 5;
8887 inline_costs += costs;
8889 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8892 } else if (is_instance) {
8893 if (sp [0]->type == STACK_VTYPE) {
8896 /* Have to compute the address of the variable */
8898 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8900 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8902 g_assert (var->klass == klass);
8904 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8908 if (op == CEE_LDFLDA) {
8909 if (is_magic_tls_access (field)) {
8911 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8913 if (sp [0]->type == STACK_OBJ) {
8914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8915 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8918 dreg = alloc_ireg_mp (cfg);
8920 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8921 ins->klass = mono_class_from_mono_type (field->type);
8922 ins->type = STACK_MP;
8928 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8930 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8931 load->flags |= ins_flag;
8932 if (sp [0]->opcode != OP_LDADDR)
8933 load->flags |= MONO_INST_FAULT;
8947 * We can only support shared generic static
8948 * field access on architectures where the
8949 * trampoline code has been extended to handle
8950 * the generic class init.
8952 #ifndef MONO_ARCH_VTABLE_REG
8953 GENERIC_SHARING_FAILURE (op);
8956 if (cfg->generic_sharing_context)
8957 context_used = mono_class_check_context_used (klass);
8959 ftype = mono_field_get_type (field);
8961 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
8964 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8965 * to be called here.
8967 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8968 mono_class_vtable (cfg->domain, klass);
8969 CHECK_TYPELOAD (klass);
8971 mono_domain_lock (cfg->domain);
8972 if (cfg->domain->special_static_fields)
8973 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8974 mono_domain_unlock (cfg->domain);
8976 is_special_static = mono_class_field_is_special_static (field);
8978 /* Generate IR to compute the field address */
8979 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8981 * Fast access to TLS data
8982 * Inline version of get_thread_static_data () in
8986 int idx, static_data_reg, array_reg, dreg;
8987 MonoInst *thread_ins;
8989 // offset &= 0x7fffffff;
8990 // idx = (offset >> 24) - 1;
8991 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8993 thread_ins = mono_get_thread_intrinsic (cfg);
8994 MONO_ADD_INS (cfg->cbb, thread_ins);
8995 static_data_reg = alloc_ireg (cfg);
8996 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8998 if (cfg->compile_aot) {
8999 int offset_reg, offset2_reg, idx_reg;
9001 /* For TLS variables, this will return the TLS offset */
9002 EMIT_NEW_SFLDACONST (cfg, ins, field);
9003 offset_reg = ins->dreg;
9004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9005 idx_reg = alloc_ireg (cfg);
9006 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9007 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9009 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9010 array_reg = alloc_ireg (cfg);
9011 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9012 offset2_reg = alloc_ireg (cfg);
9013 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9014 dreg = alloc_ireg (cfg);
9015 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9017 offset = (gsize)addr & 0x7fffffff;
9018 idx = (offset >> 24) - 1;
9020 array_reg = alloc_ireg (cfg);
9021 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9022 dreg = alloc_ireg (cfg);
9023 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9025 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9026 (cfg->compile_aot && is_special_static) ||
9027 (context_used && is_special_static)) {
9028 MonoInst *iargs [2];
9030 g_assert (field->parent);
9031 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9033 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9034 field, MONO_RGCTX_INFO_CLASS_FIELD);
9036 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9038 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9039 } else if (context_used) {
9040 MonoInst *static_data;
9043 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9044 method->klass->name_space, method->klass->name, method->name,
9045 depth, field->offset);
9048 if (mono_class_needs_cctor_run (klass, method))
9049 emit_generic_class_init (cfg, klass);
9052 * The pointer we're computing here is
9054 * super_info.static_data + field->offset
9056 static_data = emit_get_rgctx_klass (cfg, context_used,
9057 klass, MONO_RGCTX_INFO_STATIC_DATA);
9059 if (field->offset == 0) {
9062 int addr_reg = mono_alloc_preg (cfg);
9063 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9065 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9066 MonoInst *iargs [2];
9068 g_assert (field->parent);
9069 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9070 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9071 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9073 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9075 CHECK_TYPELOAD (klass);
9077 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9078 if (!(g_slist_find (class_inits, vtable))) {
9079 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9080 if (cfg->verbose_level > 2)
9081 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9082 class_inits = g_slist_prepend (class_inits, vtable);
9085 if (cfg->run_cctors) {
9087 /* This makes so that inline cannot trigger */
9088 /* .cctors: too many apps depend on them */
9089 /* running with a specific order... */
9090 if (! vtable->initialized)
9092 ex = mono_runtime_class_init_full (vtable, FALSE);
9094 set_exception_object (cfg, ex);
9095 goto exception_exit;
9099 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9101 if (cfg->compile_aot)
9102 EMIT_NEW_SFLDACONST (cfg, ins, field);
9104 EMIT_NEW_PCONST (cfg, ins, addr);
9106 MonoInst *iargs [1];
9107 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9108 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9112 /* Generate IR to do the actual load/store operation */
9114 if (op == CEE_LDSFLDA) {
9115 ins->klass = mono_class_from_mono_type (ftype);
9116 ins->type = STACK_PTR;
9118 } else if (op == CEE_STSFLD) {
9121 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9122 store->flags |= ins_flag;
9124 gboolean is_const = FALSE;
9125 MonoVTable *vtable = NULL;
9126 gpointer addr = NULL;
9128 if (!context_used) {
9129 vtable = mono_class_vtable (cfg->domain, klass);
9130 CHECK_TYPELOAD (klass);
9132 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9133 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9134 int ro_type = ftype->type;
9136 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9137 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9138 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9140 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9143 case MONO_TYPE_BOOLEAN:
9145 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9149 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9152 case MONO_TYPE_CHAR:
9154 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9158 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9163 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9167 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9173 case MONO_TYPE_FNPTR:
9174 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9175 type_to_eval_stack_type ((cfg), field->type, *sp);
9178 case MONO_TYPE_STRING:
9179 case MONO_TYPE_OBJECT:
9180 case MONO_TYPE_CLASS:
9181 case MONO_TYPE_SZARRAY:
9182 case MONO_TYPE_ARRAY:
9183 if (!mono_gc_is_moving ()) {
9184 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9185 type_to_eval_stack_type ((cfg), field->type, *sp);
9193 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9198 case MONO_TYPE_VALUETYPE:
9208 CHECK_STACK_OVF (1);
9210 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9211 load->flags |= ins_flag;
9224 token = read32 (ip + 1);
9225 klass = mini_get_class (method, token, generic_context);
9226 CHECK_TYPELOAD (klass);
9227 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9228 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9229 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9230 generic_class_is_reference_type (cfg, klass)) {
9231 /* insert call to write barrier */
9232 emit_write_barrier (cfg, sp [0], sp [1], -1);
9244 const char *data_ptr;
9246 guint32 field_token;
9252 token = read32 (ip + 1);
9254 klass = mini_get_class (method, token, generic_context);
9255 CHECK_TYPELOAD (klass);
9257 if (cfg->generic_sharing_context)
9258 context_used = mono_class_check_context_used (klass);
9260 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9261 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9262 ins->sreg1 = sp [0]->dreg;
9263 ins->type = STACK_I4;
9264 ins->dreg = alloc_ireg (cfg);
9265 MONO_ADD_INS (cfg->cbb, ins);
9266 *sp = mono_decompose_opcode (cfg, ins);
9271 MonoClass *array_class = mono_array_class_get (klass, 1);
9272 /* FIXME: we cannot get a managed
9273 allocator because we can't get the
9274 open generic class's vtable. We
9275 have the same problem in
9276 handle_alloc(). This
9277 needs to be solved so that we can
9278 have managed allocs of shared
9281 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9282 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9284 MonoMethod *managed_alloc = NULL;
9286 /* FIXME: Decompose later to help abcrem */
9289 args [0] = emit_get_rgctx_klass (cfg, context_used,
9290 array_class, MONO_RGCTX_INFO_VTABLE);
9295 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9297 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9299 if (cfg->opt & MONO_OPT_SHARED) {
9300 /* Decompose now to avoid problems with references to the domainvar */
9301 MonoInst *iargs [3];
9303 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9304 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9307 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9309 /* Decompose later since it is needed by abcrem */
9310 MonoClass *array_type = mono_array_class_get (klass, 1);
9311 mono_class_vtable (cfg->domain, array_type);
9312 CHECK_TYPELOAD (array_type);
9314 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9315 ins->dreg = alloc_ireg_ref (cfg);
9316 ins->sreg1 = sp [0]->dreg;
9317 ins->inst_newa_class = klass;
9318 ins->type = STACK_OBJ;
9319 ins->klass = array_type;
9320 MONO_ADD_INS (cfg->cbb, ins);
9321 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9322 cfg->cbb->has_array_access = TRUE;
9324 /* Needed so mono_emit_load_get_addr () gets called */
9325 mono_get_got_var (cfg);
9335 * we inline/optimize the initialization sequence if possible.
9336 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9337 * for small sizes open code the memcpy
9338 * ensure the rva field is big enough
9340 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9341 MonoMethod *memcpy_method = get_memcpy_method ();
9342 MonoInst *iargs [3];
9343 int add_reg = alloc_ireg_mp (cfg);
9345 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9346 if (cfg->compile_aot) {
9347 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9349 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9351 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9352 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9361 if (sp [0]->type != STACK_OBJ)
9364 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9365 ins->dreg = alloc_preg (cfg);
9366 ins->sreg1 = sp [0]->dreg;
9367 ins->type = STACK_I4;
9368 /* This flag will be inherited by the decomposition */
9369 ins->flags |= MONO_INST_FAULT;
9370 MONO_ADD_INS (cfg->cbb, ins);
9371 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9372 cfg->cbb->has_array_access = TRUE;
9380 if (sp [0]->type != STACK_OBJ)
9383 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9385 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9386 CHECK_TYPELOAD (klass);
9387 /* we need to make sure that this array is exactly the type it needs
9388 * to be for correctness. the wrappers are lax with their usage
9389 * so we need to ignore them here
9391 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9392 MonoClass *array_class = mono_array_class_get (klass, 1);
9393 mini_emit_check_array_type (cfg, sp [0], array_class);
9394 CHECK_TYPELOAD (array_class);
9398 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9413 case CEE_LDELEM_REF: {
9419 if (*ip == CEE_LDELEM) {
9421 token = read32 (ip + 1);
9422 klass = mini_get_class (method, token, generic_context);
9423 CHECK_TYPELOAD (klass);
9424 mono_class_init (klass);
9427 klass = array_access_to_klass (*ip);
9429 if (sp [0]->type != STACK_OBJ)
9432 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9434 if (sp [1]->opcode == OP_ICONST) {
9435 int array_reg = sp [0]->dreg;
9436 int index_reg = sp [1]->dreg;
9437 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9439 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9440 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9442 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9443 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9446 if (*ip == CEE_LDELEM)
9459 case CEE_STELEM_REF:
9466 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9468 if (*ip == CEE_STELEM) {
9470 token = read32 (ip + 1);
9471 klass = mini_get_class (method, token, generic_context);
9472 CHECK_TYPELOAD (klass);
9473 mono_class_init (klass);
9476 klass = array_access_to_klass (*ip);
9478 if (sp [0]->type != STACK_OBJ)
9481 /* storing a NULL doesn't need any of the complex checks in stelemref */
9482 if (generic_class_is_reference_type (cfg, klass) &&
9483 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9484 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9485 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9486 MonoInst *iargs [3];
9489 mono_class_setup_vtable (obj_array);
9490 g_assert (helper->slot);
9492 if (sp [0]->type != STACK_OBJ)
9494 if (sp [2]->type != STACK_OBJ)
9501 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9503 if (sp [1]->opcode == OP_ICONST) {
9504 int array_reg = sp [0]->dreg;
9505 int index_reg = sp [1]->dreg;
9506 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9508 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9509 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9511 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9512 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9516 if (*ip == CEE_STELEM)
9523 case CEE_CKFINITE: {
9527 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9528 ins->sreg1 = sp [0]->dreg;
9529 ins->dreg = alloc_freg (cfg);
9530 ins->type = STACK_R8;
9531 MONO_ADD_INS (bblock, ins);
9533 *sp++ = mono_decompose_opcode (cfg, ins);
9538 case CEE_REFANYVAL: {
9539 MonoInst *src_var, *src;
9541 int klass_reg = alloc_preg (cfg);
9542 int dreg = alloc_preg (cfg);
9545 MONO_INST_NEW (cfg, ins, *ip);
9548 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9549 CHECK_TYPELOAD (klass);
9550 mono_class_init (klass);
9552 if (cfg->generic_sharing_context)
9553 context_used = mono_class_check_context_used (klass);
9556 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9558 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9559 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9560 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9563 MonoInst *klass_ins;
9565 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9566 klass, MONO_RGCTX_INFO_KLASS);
9569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9570 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9572 mini_emit_class_check (cfg, klass_reg, klass);
9574 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9575 ins->type = STACK_MP;
9580 case CEE_MKREFANY: {
9581 MonoInst *loc, *addr;
9584 MONO_INST_NEW (cfg, ins, *ip);
9587 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9588 CHECK_TYPELOAD (klass);
9589 mono_class_init (klass);
9591 if (cfg->generic_sharing_context)
9592 context_used = mono_class_check_context_used (klass);
9594 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9595 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9598 MonoInst *const_ins;
9599 int type_reg = alloc_preg (cfg);
9601 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9602 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9604 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9605 } else if (cfg->compile_aot) {
9606 int const_reg = alloc_preg (cfg);
9607 int type_reg = alloc_preg (cfg);
9609 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9610 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9611 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9612 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9614 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9615 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9617 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9619 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9620 ins->type = STACK_VTYPE;
9621 ins->klass = mono_defaults.typed_reference_class;
9628 MonoClass *handle_class;
9630 CHECK_STACK_OVF (1);
9633 n = read32 (ip + 1);
9635 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9636 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9637 handle = mono_method_get_wrapper_data (method, n);
9638 handle_class = mono_method_get_wrapper_data (method, n + 1);
9639 if (handle_class == mono_defaults.typehandle_class)
9640 handle = &((MonoClass*)handle)->byval_arg;
9643 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9647 mono_class_init (handle_class);
9648 if (cfg->generic_sharing_context) {
9649 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9650 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9651 /* This case handles ldtoken
9652 of an open type, like for
9655 } else if (handle_class == mono_defaults.typehandle_class) {
9656 /* If we get a MONO_TYPE_CLASS
9657 then we need to provide the
9659 instantiation of it. */
9660 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9663 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9664 } else if (handle_class == mono_defaults.fieldhandle_class)
9665 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9666 else if (handle_class == mono_defaults.methodhandle_class)
9667 context_used = mono_method_check_context_used (handle);
9669 g_assert_not_reached ();
9672 if ((cfg->opt & MONO_OPT_SHARED) &&
9673 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9674 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9675 MonoInst *addr, *vtvar, *iargs [3];
9676 int method_context_used;
9678 if (cfg->generic_sharing_context)
9679 method_context_used = mono_method_check_context_used (method);
9681 method_context_used = 0;
9683 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9685 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9686 EMIT_NEW_ICONST (cfg, iargs [1], n);
9687 if (method_context_used) {
9688 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9689 method, MONO_RGCTX_INFO_METHOD);
9690 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9692 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9693 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9695 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9697 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9699 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9701 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9702 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9703 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9704 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9705 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9706 MonoClass *tclass = mono_class_from_mono_type (handle);
9708 mono_class_init (tclass);
9710 ins = emit_get_rgctx_klass (cfg, context_used,
9711 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9712 } else if (cfg->compile_aot) {
9713 if (method->wrapper_type) {
9714 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9715 /* Special case for static synchronized wrappers */
9716 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9718 /* FIXME: n is not a normal token */
9719 cfg->disable_aot = TRUE;
9720 EMIT_NEW_PCONST (cfg, ins, NULL);
9723 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9726 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9728 ins->type = STACK_OBJ;
9729 ins->klass = cmethod->klass;
9732 MonoInst *addr, *vtvar;
9734 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9737 if (handle_class == mono_defaults.typehandle_class) {
9738 ins = emit_get_rgctx_klass (cfg, context_used,
9739 mono_class_from_mono_type (handle),
9740 MONO_RGCTX_INFO_TYPE);
9741 } else if (handle_class == mono_defaults.methodhandle_class) {
9742 ins = emit_get_rgctx_method (cfg, context_used,
9743 handle, MONO_RGCTX_INFO_METHOD);
9744 } else if (handle_class == mono_defaults.fieldhandle_class) {
9745 ins = emit_get_rgctx_field (cfg, context_used,
9746 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9748 g_assert_not_reached ();
9750 } else if (cfg->compile_aot) {
9751 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9753 EMIT_NEW_PCONST (cfg, ins, handle);
9755 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9757 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9767 MONO_INST_NEW (cfg, ins, OP_THROW);
9769 ins->sreg1 = sp [0]->dreg;
9771 bblock->out_of_line = TRUE;
9772 MONO_ADD_INS (bblock, ins);
9773 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9774 MONO_ADD_INS (bblock, ins);
9777 link_bblock (cfg, bblock, end_bblock);
9778 start_new_bblock = 1;
9780 case CEE_ENDFINALLY:
9781 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9782 MONO_ADD_INS (bblock, ins);
9784 start_new_bblock = 1;
9787 * Control will leave the method so empty the stack, otherwise
9788 * the next basic block will start with a nonempty stack.
9790 while (sp != stack_start) {
9798 if (*ip == CEE_LEAVE) {
9800 target = ip + 5 + (gint32)read32(ip + 1);
9803 target = ip + 2 + (signed char)(ip [1]);
9806 /* empty the stack */
9807 while (sp != stack_start) {
9812 * If this leave statement is in a catch block, check for a
9813 * pending exception, and rethrow it if necessary.
9814 * We avoid doing this in runtime invoke wrappers, since those are called
9815 * by native code which excepts the wrapper to catch all exceptions.
9817 for (i = 0; i < header->num_clauses; ++i) {
9818 MonoExceptionClause *clause = &header->clauses [i];
9821 * Use <= in the final comparison to handle clauses with multiple
9822 * leave statements, like in bug #78024.
9823 * The ordering of the exception clauses guarantees that we find the
9826 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9828 MonoBasicBlock *dont_throw;
9833 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9836 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9838 NEW_BBLOCK (cfg, dont_throw);
9841 * Currently, we always rethrow the abort exception, despite the
9842 * fact that this is not correct. See thread6.cs for an example.
9843 * But propagating the abort exception is more important than
9844 * getting the sematics right.
9846 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9847 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9848 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9850 MONO_START_BB (cfg, dont_throw);
9855 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9857 MonoExceptionClause *clause;
9859 for (tmp = handlers; tmp; tmp = tmp->next) {
9861 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9863 link_bblock (cfg, bblock, tblock);
9864 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9865 ins->inst_target_bb = tblock;
9866 ins->inst_eh_block = clause;
9867 MONO_ADD_INS (bblock, ins);
9868 bblock->has_call_handler = 1;
9869 if (COMPILE_LLVM (cfg)) {
9870 MonoBasicBlock *target_bb;
9873 * Link the finally bblock with the target, since it will
9874 * conceptually branch there.
9875 * FIXME: Have to link the bblock containing the endfinally.
9877 GET_BBLOCK (cfg, target_bb, target);
9878 link_bblock (cfg, tblock, target_bb);
9881 g_list_free (handlers);
9884 MONO_INST_NEW (cfg, ins, OP_BR);
9885 MONO_ADD_INS (bblock, ins);
9886 GET_BBLOCK (cfg, tblock, target);
9887 link_bblock (cfg, bblock, tblock);
9888 ins->inst_target_bb = tblock;
9889 start_new_bblock = 1;
9891 if (*ip == CEE_LEAVE)
9900 * Mono specific opcodes
9902 case MONO_CUSTOM_PREFIX: {
9904 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9908 case CEE_MONO_ICALL: {
9910 MonoJitICallInfo *info;
9912 token = read32 (ip + 2);
9913 func = mono_method_get_wrapper_data (method, token);
9914 info = mono_find_jit_icall_by_addr (func);
9917 CHECK_STACK (info->sig->param_count);
9918 sp -= info->sig->param_count;
9920 ins = mono_emit_jit_icall (cfg, info->func, sp);
9921 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9925 inline_costs += 10 * num_calls++;
9929 case CEE_MONO_LDPTR: {
9932 CHECK_STACK_OVF (1);
9934 token = read32 (ip + 2);
9936 ptr = mono_method_get_wrapper_data (method, token);
9937 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9938 MonoJitICallInfo *callinfo;
9939 const char *icall_name;
9941 icall_name = method->name + strlen ("__icall_wrapper_");
9942 g_assert (icall_name);
9943 callinfo = mono_find_jit_icall_by_name (icall_name);
9944 g_assert (callinfo);
9946 if (ptr == callinfo->func) {
9947 /* Will be transformed into an AOTCONST later */
9948 EMIT_NEW_PCONST (cfg, ins, ptr);
9954 /* FIXME: Generalize this */
9955 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9956 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9961 EMIT_NEW_PCONST (cfg, ins, ptr);
9964 inline_costs += 10 * num_calls++;
9965 /* Can't embed random pointers into AOT code */
9966 cfg->disable_aot = 1;
9969 case CEE_MONO_ICALL_ADDR: {
9970 MonoMethod *cmethod;
9973 CHECK_STACK_OVF (1);
9975 token = read32 (ip + 2);
9977 cmethod = mono_method_get_wrapper_data (method, token);
9979 if (cfg->compile_aot) {
9980 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9982 ptr = mono_lookup_internal_call (cmethod);
9984 EMIT_NEW_PCONST (cfg, ins, ptr);
9990 case CEE_MONO_VTADDR: {
9991 MonoInst *src_var, *src;
9997 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9998 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10003 case CEE_MONO_NEWOBJ: {
10004 MonoInst *iargs [2];
10006 CHECK_STACK_OVF (1);
10008 token = read32 (ip + 2);
10009 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10010 mono_class_init (klass);
10011 NEW_DOMAINCONST (cfg, iargs [0]);
10012 MONO_ADD_INS (cfg->cbb, iargs [0]);
10013 NEW_CLASSCONST (cfg, iargs [1], klass);
10014 MONO_ADD_INS (cfg->cbb, iargs [1]);
10015 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10017 inline_costs += 10 * num_calls++;
10020 case CEE_MONO_OBJADDR:
10023 MONO_INST_NEW (cfg, ins, OP_MOVE);
10024 ins->dreg = alloc_ireg_mp (cfg);
10025 ins->sreg1 = sp [0]->dreg;
10026 ins->type = STACK_MP;
10027 MONO_ADD_INS (cfg->cbb, ins);
10031 case CEE_MONO_LDNATIVEOBJ:
10033 * Similar to LDOBJ, but instead load the unmanaged
10034 * representation of the vtype to the stack.
10039 token = read32 (ip + 2);
10040 klass = mono_method_get_wrapper_data (method, token);
10041 g_assert (klass->valuetype);
10042 mono_class_init (klass);
10045 MonoInst *src, *dest, *temp;
10048 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10049 temp->backend.is_pinvoke = 1;
10050 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10051 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10053 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10054 dest->type = STACK_VTYPE;
10055 dest->klass = klass;
10061 case CEE_MONO_RETOBJ: {
10063 * Same as RET, but return the native representation of a vtype
10066 g_assert (cfg->ret);
10067 g_assert (mono_method_signature (method)->pinvoke);
10072 token = read32 (ip + 2);
10073 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10075 if (!cfg->vret_addr) {
10076 g_assert (cfg->ret_var_is_local);
10078 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10080 EMIT_NEW_RETLOADA (cfg, ins);
10082 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10084 if (sp != stack_start)
10087 MONO_INST_NEW (cfg, ins, OP_BR);
10088 ins->inst_target_bb = end_bblock;
10089 MONO_ADD_INS (bblock, ins);
10090 link_bblock (cfg, bblock, end_bblock);
10091 start_new_bblock = 1;
10095 case CEE_MONO_CISINST:
10096 case CEE_MONO_CCASTCLASS: {
10101 token = read32 (ip + 2);
10102 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10103 if (ip [1] == CEE_MONO_CISINST)
10104 ins = handle_cisinst (cfg, klass, sp [0]);
10106 ins = handle_ccastclass (cfg, klass, sp [0]);
10112 case CEE_MONO_SAVE_LMF:
10113 case CEE_MONO_RESTORE_LMF:
10114 #ifdef MONO_ARCH_HAVE_LMF_OPS
10115 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10116 MONO_ADD_INS (bblock, ins);
10117 cfg->need_lmf_area = TRUE;
10121 case CEE_MONO_CLASSCONST:
10122 CHECK_STACK_OVF (1);
10124 token = read32 (ip + 2);
10125 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10128 inline_costs += 10 * num_calls++;
10130 case CEE_MONO_NOT_TAKEN:
10131 bblock->out_of_line = TRUE;
10135 CHECK_STACK_OVF (1);
10137 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10138 ins->dreg = alloc_preg (cfg);
10139 ins->inst_offset = (gint32)read32 (ip + 2);
10140 ins->type = STACK_PTR;
10141 MONO_ADD_INS (bblock, ins);
10145 case CEE_MONO_DYN_CALL: {
10146 MonoCallInst *call;
10148 /* It would be easier to call a trampoline, but that would put an
10149 * extra frame on the stack, confusing exception handling. So
10150 * implement it inline using an opcode for now.
10153 if (!cfg->dyn_call_var) {
10154 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10155 /* prevent it from being register allocated */
10156 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10159 /* Has to use a call inst since it local regalloc expects it */
10160 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10161 ins = (MonoInst*)call;
10163 ins->sreg1 = sp [0]->dreg;
10164 ins->sreg2 = sp [1]->dreg;
10165 MONO_ADD_INS (bblock, ins);
10167 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10168 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10172 inline_costs += 10 * num_calls++;
10176 case CEE_MONO_MEMORY_BARRIER: {
10178 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10182 case CEE_MONO_JIT_ATTACH: {
10183 MonoInst *args [16];
10184 MonoInst *ad_ins, *lmf_ins;
10185 MonoBasicBlock *next_bb = NULL;
10187 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10189 EMIT_NEW_PCONST (cfg, ins, NULL);
10190 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10196 ad_ins = mono_get_domain_intrinsic (cfg);
10197 lmf_ins = mono_get_lmf_intrinsic (cfg);
10200 #ifdef MONO_ARCH_HAVE_TLS_GET
10201 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
10202 NEW_BBLOCK (cfg, next_bb);
10204 MONO_ADD_INS (cfg->cbb, ad_ins);
10205 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
10206 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10208 MONO_ADD_INS (cfg->cbb, lmf_ins);
10209 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
10210 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10214 if (cfg->compile_aot) {
10215 /* AOT code is only used in the root domain */
10216 EMIT_NEW_PCONST (cfg, args [0], NULL);
10218 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
10220 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
10221 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10224 MONO_START_BB (cfg, next_bb);
10230 case CEE_MONO_JIT_DETACH: {
10231 MonoInst *args [16];
10233 /* Restore the original domain */
10234 dreg = alloc_ireg (cfg);
10235 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
10236 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
10241 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10247 case CEE_PREFIX1: {
10250 case CEE_ARGLIST: {
10251 /* somewhat similar to LDTOKEN */
10252 MonoInst *addr, *vtvar;
10253 CHECK_STACK_OVF (1);
10254 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10256 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10257 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10259 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10260 ins->type = STACK_VTYPE;
10261 ins->klass = mono_defaults.argumenthandle_class;
10274 * The following transforms:
10275 * CEE_CEQ into OP_CEQ
10276 * CEE_CGT into OP_CGT
10277 * CEE_CGT_UN into OP_CGT_UN
10278 * CEE_CLT into OP_CLT
10279 * CEE_CLT_UN into OP_CLT_UN
10281 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10283 MONO_INST_NEW (cfg, ins, cmp->opcode);
10285 cmp->sreg1 = sp [0]->dreg;
10286 cmp->sreg2 = sp [1]->dreg;
10287 type_from_op (cmp, sp [0], sp [1]);
10289 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10290 cmp->opcode = OP_LCOMPARE;
10291 else if (sp [0]->type == STACK_R8)
10292 cmp->opcode = OP_FCOMPARE;
10294 cmp->opcode = OP_ICOMPARE;
10295 MONO_ADD_INS (bblock, cmp);
10296 ins->type = STACK_I4;
10297 ins->dreg = alloc_dreg (cfg, ins->type);
10298 type_from_op (ins, sp [0], sp [1]);
10300 if (cmp->opcode == OP_FCOMPARE) {
10302 * The backends expect the fceq opcodes to do the
10305 cmp->opcode = OP_NOP;
10306 ins->sreg1 = cmp->sreg1;
10307 ins->sreg2 = cmp->sreg2;
10309 MONO_ADD_INS (bblock, ins);
10315 MonoInst *argconst;
10316 MonoMethod *cil_method;
10318 CHECK_STACK_OVF (1);
10320 n = read32 (ip + 2);
10321 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10322 if (!cmethod || mono_loader_get_last_error ())
10324 mono_class_init (cmethod->klass);
10326 mono_save_token_info (cfg, image, n, cmethod);
10328 if (cfg->generic_sharing_context)
10329 context_used = mono_method_check_context_used (cmethod);
10331 cil_method = cmethod;
10332 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10333 METHOD_ACCESS_FAILURE;
10335 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10336 if (check_linkdemand (cfg, method, cmethod))
10338 CHECK_CFG_EXCEPTION;
10339 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10340 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10344 * Optimize the common case of ldftn+delegate creation
10346 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10347 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10348 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10349 MonoInst *target_ins;
10350 MonoMethod *invoke;
10351 int invoke_context_used = 0;
10353 invoke = mono_get_delegate_invoke (ctor_method->klass);
10354 if (!invoke || !mono_method_signature (invoke))
10357 if (cfg->generic_sharing_context)
10358 invoke_context_used = mono_method_check_context_used (invoke);
10360 target_ins = sp [-1];
10362 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10363 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10365 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10366 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10367 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10368 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10369 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10373 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10374 /* FIXME: SGEN support */
10375 if (invoke_context_used == 0) {
10377 if (cfg->verbose_level > 3)
10378 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10380 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10381 CHECK_CFG_EXCEPTION;
10390 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10391 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10395 inline_costs += 10 * num_calls++;
10398 case CEE_LDVIRTFTN: {
10399 MonoInst *args [2];
10403 n = read32 (ip + 2);
10404 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10405 if (!cmethod || mono_loader_get_last_error ())
10407 mono_class_init (cmethod->klass);
10409 if (cfg->generic_sharing_context)
10410 context_used = mono_method_check_context_used (cmethod);
10412 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10413 if (check_linkdemand (cfg, method, cmethod))
10415 CHECK_CFG_EXCEPTION;
10416 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10417 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10423 args [1] = emit_get_rgctx_method (cfg, context_used,
10424 cmethod, MONO_RGCTX_INFO_METHOD);
10427 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10429 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10432 inline_costs += 10 * num_calls++;
10436 CHECK_STACK_OVF (1);
10438 n = read16 (ip + 2);
10440 EMIT_NEW_ARGLOAD (cfg, ins, n);
10445 CHECK_STACK_OVF (1);
10447 n = read16 (ip + 2);
10449 NEW_ARGLOADA (cfg, ins, n);
10450 MONO_ADD_INS (cfg->cbb, ins);
10458 n = read16 (ip + 2);
10460 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10462 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10466 CHECK_STACK_OVF (1);
10468 n = read16 (ip + 2);
10470 EMIT_NEW_LOCLOAD (cfg, ins, n);
10475 unsigned char *tmp_ip;
10476 CHECK_STACK_OVF (1);
10478 n = read16 (ip + 2);
10481 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10487 EMIT_NEW_LOCLOADA (cfg, ins, n);
10496 n = read16 (ip + 2);
10498 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10500 emit_stloc_ir (cfg, sp, header, n);
10507 if (sp != stack_start)
10509 if (cfg->method != method)
10511 * Inlining this into a loop in a parent could lead to
10512 * stack overflows which is different behavior than the
10513 * non-inlined case, thus disable inlining in this case.
10515 goto inline_failure;
10517 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10518 ins->dreg = alloc_preg (cfg);
10519 ins->sreg1 = sp [0]->dreg;
10520 ins->type = STACK_PTR;
10521 MONO_ADD_INS (cfg->cbb, ins);
10523 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10525 ins->flags |= MONO_INST_INIT;
10530 case CEE_ENDFILTER: {
10531 MonoExceptionClause *clause, *nearest;
10532 int cc, nearest_num;
10536 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10538 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10539 ins->sreg1 = (*sp)->dreg;
10540 MONO_ADD_INS (bblock, ins);
10541 start_new_bblock = 1;
10546 for (cc = 0; cc < header->num_clauses; ++cc) {
10547 clause = &header->clauses [cc];
10548 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10549 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10550 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10555 g_assert (nearest);
10556 if ((ip - header->code) != nearest->handler_offset)
10561 case CEE_UNALIGNED_:
10562 ins_flag |= MONO_INST_UNALIGNED;
10563 /* FIXME: record alignment? we can assume 1 for now */
10567 case CEE_VOLATILE_:
10568 ins_flag |= MONO_INST_VOLATILE;
10572 ins_flag |= MONO_INST_TAILCALL;
10573 cfg->flags |= MONO_CFG_HAS_TAIL;
10574 /* Can't inline tail calls at this time */
10575 inline_costs += 100000;
10582 token = read32 (ip + 2);
10583 klass = mini_get_class (method, token, generic_context);
10584 CHECK_TYPELOAD (klass);
10585 if (generic_class_is_reference_type (cfg, klass))
10586 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10588 mini_emit_initobj (cfg, *sp, NULL, klass);
10592 case CEE_CONSTRAINED_:
10594 token = read32 (ip + 2);
10595 if (method->wrapper_type != MONO_WRAPPER_NONE)
10596 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10598 constrained_call = mono_class_get_full (image, token, generic_context);
10599 CHECK_TYPELOAD (constrained_call);
10603 case CEE_INITBLK: {
10604 MonoInst *iargs [3];
10608 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10609 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10610 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10611 /* emit_memset only works when val == 0 */
10612 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10614 iargs [0] = sp [0];
10615 iargs [1] = sp [1];
10616 iargs [2] = sp [2];
10617 if (ip [1] == CEE_CPBLK) {
10618 MonoMethod *memcpy_method = get_memcpy_method ();
10619 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10621 MonoMethod *memset_method = get_memset_method ();
10622 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10632 ins_flag |= MONO_INST_NOTYPECHECK;
10634 ins_flag |= MONO_INST_NORANGECHECK;
10635 /* we ignore the no-nullcheck for now since we
10636 * really do it explicitly only when doing callvirt->call
10640 case CEE_RETHROW: {
10642 int handler_offset = -1;
10644 for (i = 0; i < header->num_clauses; ++i) {
10645 MonoExceptionClause *clause = &header->clauses [i];
10646 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10647 handler_offset = clause->handler_offset;
10652 bblock->flags |= BB_EXCEPTION_UNSAFE;
10654 g_assert (handler_offset != -1);
10656 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10657 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10658 ins->sreg1 = load->dreg;
10659 MONO_ADD_INS (bblock, ins);
10661 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10662 MONO_ADD_INS (bblock, ins);
10665 link_bblock (cfg, bblock, end_bblock);
10666 start_new_bblock = 1;
10674 CHECK_STACK_OVF (1);
10676 token = read32 (ip + 2);
10677 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10678 MonoType *type = mono_type_create_from_typespec (image, token);
10679 token = mono_type_size (type, &ialign);
10681 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10682 CHECK_TYPELOAD (klass);
10683 mono_class_init (klass);
10684 token = mono_class_value_size (klass, &align);
10686 EMIT_NEW_ICONST (cfg, ins, token);
10691 case CEE_REFANYTYPE: {
10692 MonoInst *src_var, *src;
10698 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10700 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10701 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10702 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10707 case CEE_READONLY_:
10720 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10730 g_warning ("opcode 0x%02x not handled", *ip);
10734 if (start_new_bblock != 1)
10737 bblock->cil_length = ip - bblock->cil_code;
10738 if (bblock->next_bb) {
10739 /* This could already be set because of inlining, #693905 */
10740 MonoBasicBlock *bb = bblock;
10742 while (bb->next_bb)
10744 bb->next_bb = end_bblock;
10746 bblock->next_bb = end_bblock;
10749 if (cfg->method == method && cfg->domainvar) {
10751 MonoInst *get_domain;
10753 cfg->cbb = init_localsbb;
10755 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10756 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10759 get_domain->dreg = alloc_preg (cfg);
10760 MONO_ADD_INS (cfg->cbb, get_domain);
10762 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10763 MONO_ADD_INS (cfg->cbb, store);
10766 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10767 if (cfg->compile_aot)
10768 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10769 mono_get_got_var (cfg);
10772 if (cfg->method == method && cfg->got_var)
10773 mono_emit_load_got_addr (cfg);
10778 cfg->cbb = init_localsbb;
10780 for (i = 0; i < header->num_locals; ++i) {
10781 MonoType *ptype = header->locals [i];
10782 int t = ptype->type;
10783 dreg = cfg->locals [i]->dreg;
10785 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10786 t = mono_class_enum_basetype (ptype->data.klass)->type;
10787 if (ptype->byref) {
10788 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10789 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10790 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10791 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10792 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10793 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10794 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10795 ins->type = STACK_R8;
10796 ins->inst_p0 = (void*)&r8_0;
10797 ins->dreg = alloc_dreg (cfg, STACK_R8);
10798 MONO_ADD_INS (init_localsbb, ins);
10799 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10800 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10801 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10802 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10804 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10809 if (cfg->init_ref_vars && cfg->method == method) {
10810 /* Emit initialization for ref vars */
10811 // FIXME: Avoid duplication initialization for IL locals.
10812 for (i = 0; i < cfg->num_varinfo; ++i) {
10813 MonoInst *ins = cfg->varinfo [i];
10815 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10816 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10821 MonoBasicBlock *bb;
10824 * Make seq points at backward branch targets interruptable.
10826 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
10827 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
10828 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
10831 /* Add a sequence point for method entry/exit events */
10833 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10834 MONO_ADD_INS (init_localsbb, ins);
10835 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10836 MONO_ADD_INS (cfg->bb_exit, ins);
10841 if (cfg->method == method) {
10842 MonoBasicBlock *bb;
10843 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10844 bb->region = mono_find_block_region (cfg, bb->real_offset);
10846 mono_create_spvar_for_region (cfg, bb->region);
10847 if (cfg->verbose_level > 2)
10848 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10852 g_slist_free (class_inits);
10853 dont_inline = g_list_remove (dont_inline, method);
10855 if (inline_costs < 0) {
10858 /* Method is too large */
10859 mname = mono_method_full_name (method, TRUE);
10860 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10861 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10863 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10864 mono_basic_block_free (original_bb);
10868 if ((cfg->verbose_level > 2) && (cfg->method == method))
10869 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10871 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10872 mono_basic_block_free (original_bb);
10873 return inline_costs;
10876 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10883 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10887 set_exception_type_from_invalid_il (cfg, method, ip);
10891 g_slist_free (class_inits);
10892 mono_basic_block_free (original_bb);
10893 dont_inline = g_list_remove (dont_inline, method);
10894 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10899 store_membase_reg_to_store_membase_imm (int opcode)
10902 case OP_STORE_MEMBASE_REG:
10903 return OP_STORE_MEMBASE_IMM;
10904 case OP_STOREI1_MEMBASE_REG:
10905 return OP_STOREI1_MEMBASE_IMM;
10906 case OP_STOREI2_MEMBASE_REG:
10907 return OP_STOREI2_MEMBASE_IMM;
10908 case OP_STOREI4_MEMBASE_REG:
10909 return OP_STOREI4_MEMBASE_IMM;
10910 case OP_STOREI8_MEMBASE_REG:
10911 return OP_STOREI8_MEMBASE_IMM;
10913 g_assert_not_reached ();
10919 #endif /* DISABLE_JIT */
10922 mono_op_to_op_imm (int opcode)
10926 return OP_IADD_IMM;
10928 return OP_ISUB_IMM;
10930 return OP_IDIV_IMM;
10932 return OP_IDIV_UN_IMM;
10934 return OP_IREM_IMM;
10936 return OP_IREM_UN_IMM;
10938 return OP_IMUL_IMM;
10940 return OP_IAND_IMM;
10944 return OP_IXOR_IMM;
10946 return OP_ISHL_IMM;
10948 return OP_ISHR_IMM;
10950 return OP_ISHR_UN_IMM;
10953 return OP_LADD_IMM;
10955 return OP_LSUB_IMM;
10957 return OP_LAND_IMM;
10961 return OP_LXOR_IMM;
10963 return OP_LSHL_IMM;
10965 return OP_LSHR_IMM;
10967 return OP_LSHR_UN_IMM;
10970 return OP_COMPARE_IMM;
10972 return OP_ICOMPARE_IMM;
10974 return OP_LCOMPARE_IMM;
10976 case OP_STORE_MEMBASE_REG:
10977 return OP_STORE_MEMBASE_IMM;
10978 case OP_STOREI1_MEMBASE_REG:
10979 return OP_STOREI1_MEMBASE_IMM;
10980 case OP_STOREI2_MEMBASE_REG:
10981 return OP_STOREI2_MEMBASE_IMM;
10982 case OP_STOREI4_MEMBASE_REG:
10983 return OP_STOREI4_MEMBASE_IMM;
10985 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10987 return OP_X86_PUSH_IMM;
10988 case OP_X86_COMPARE_MEMBASE_REG:
10989 return OP_X86_COMPARE_MEMBASE_IMM;
10991 #if defined(TARGET_AMD64)
10992 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10993 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10995 case OP_VOIDCALL_REG:
10996 return OP_VOIDCALL;
11004 return OP_LOCALLOC_IMM;
11011 ldind_to_load_membase (int opcode)
11015 return OP_LOADI1_MEMBASE;
11017 return OP_LOADU1_MEMBASE;
11019 return OP_LOADI2_MEMBASE;
11021 return OP_LOADU2_MEMBASE;
11023 return OP_LOADI4_MEMBASE;
11025 return OP_LOADU4_MEMBASE;
11027 return OP_LOAD_MEMBASE;
11028 case CEE_LDIND_REF:
11029 return OP_LOAD_MEMBASE;
11031 return OP_LOADI8_MEMBASE;
11033 return OP_LOADR4_MEMBASE;
11035 return OP_LOADR8_MEMBASE;
11037 g_assert_not_reached ();
11044 stind_to_store_membase (int opcode)
11048 return OP_STOREI1_MEMBASE_REG;
11050 return OP_STOREI2_MEMBASE_REG;
11052 return OP_STOREI4_MEMBASE_REG;
11054 case CEE_STIND_REF:
11055 return OP_STORE_MEMBASE_REG;
11057 return OP_STOREI8_MEMBASE_REG;
11059 return OP_STORER4_MEMBASE_REG;
11061 return OP_STORER8_MEMBASE_REG;
11063 g_assert_not_reached ();
11070 mono_load_membase_to_load_mem (int opcode)
11072 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11073 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11075 case OP_LOAD_MEMBASE:
11076 return OP_LOAD_MEM;
11077 case OP_LOADU1_MEMBASE:
11078 return OP_LOADU1_MEM;
11079 case OP_LOADU2_MEMBASE:
11080 return OP_LOADU2_MEM;
11081 case OP_LOADI4_MEMBASE:
11082 return OP_LOADI4_MEM;
11083 case OP_LOADU4_MEMBASE:
11084 return OP_LOADU4_MEM;
11085 #if SIZEOF_REGISTER == 8
11086 case OP_LOADI8_MEMBASE:
11087 return OP_LOADI8_MEM;
11096 op_to_op_dest_membase (int store_opcode, int opcode)
11098 #if defined(TARGET_X86)
11099 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11104 return OP_X86_ADD_MEMBASE_REG;
11106 return OP_X86_SUB_MEMBASE_REG;
11108 return OP_X86_AND_MEMBASE_REG;
11110 return OP_X86_OR_MEMBASE_REG;
11112 return OP_X86_XOR_MEMBASE_REG;
11115 return OP_X86_ADD_MEMBASE_IMM;
11118 return OP_X86_SUB_MEMBASE_IMM;
11121 return OP_X86_AND_MEMBASE_IMM;
11124 return OP_X86_OR_MEMBASE_IMM;
11127 return OP_X86_XOR_MEMBASE_IMM;
11133 #if defined(TARGET_AMD64)
11134 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11139 return OP_X86_ADD_MEMBASE_REG;
11141 return OP_X86_SUB_MEMBASE_REG;
11143 return OP_X86_AND_MEMBASE_REG;
11145 return OP_X86_OR_MEMBASE_REG;
11147 return OP_X86_XOR_MEMBASE_REG;
11149 return OP_X86_ADD_MEMBASE_IMM;
11151 return OP_X86_SUB_MEMBASE_IMM;
11153 return OP_X86_AND_MEMBASE_IMM;
11155 return OP_X86_OR_MEMBASE_IMM;
11157 return OP_X86_XOR_MEMBASE_IMM;
11159 return OP_AMD64_ADD_MEMBASE_REG;
11161 return OP_AMD64_SUB_MEMBASE_REG;
11163 return OP_AMD64_AND_MEMBASE_REG;
11165 return OP_AMD64_OR_MEMBASE_REG;
11167 return OP_AMD64_XOR_MEMBASE_REG;
11170 return OP_AMD64_ADD_MEMBASE_IMM;
11173 return OP_AMD64_SUB_MEMBASE_IMM;
11176 return OP_AMD64_AND_MEMBASE_IMM;
11179 return OP_AMD64_OR_MEMBASE_IMM;
11182 return OP_AMD64_XOR_MEMBASE_IMM;
11192 op_to_op_store_membase (int store_opcode, int opcode)
11194 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11197 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11198 return OP_X86_SETEQ_MEMBASE;
11200 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11201 return OP_X86_SETNE_MEMBASE;
11209 op_to_op_src1_membase (int load_opcode, int opcode)
11212 /* FIXME: This has sign extension issues */
11214 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11215 return OP_X86_COMPARE_MEMBASE8_IMM;
11218 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11223 return OP_X86_PUSH_MEMBASE;
11224 case OP_COMPARE_IMM:
11225 case OP_ICOMPARE_IMM:
11226 return OP_X86_COMPARE_MEMBASE_IMM;
11229 return OP_X86_COMPARE_MEMBASE_REG;
11233 #ifdef TARGET_AMD64
11234 /* FIXME: This has sign extension issues */
11236 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11237 return OP_X86_COMPARE_MEMBASE8_IMM;
11242 #ifdef __mono_ilp32__
11243 if (load_opcode == OP_LOADI8_MEMBASE)
11245 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11247 return OP_X86_PUSH_MEMBASE;
11249 /* FIXME: This only works for 32 bit immediates
11250 case OP_COMPARE_IMM:
11251 case OP_LCOMPARE_IMM:
11252 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11253 return OP_AMD64_COMPARE_MEMBASE_IMM;
11255 case OP_ICOMPARE_IMM:
11256 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11257 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11261 #ifdef __mono_ilp32__
11262 if (load_opcode == OP_LOAD_MEMBASE)
11263 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11264 if (load_opcode == OP_LOADI8_MEMBASE)
11266 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11268 return OP_AMD64_COMPARE_MEMBASE_REG;
11271 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11272 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11281 op_to_op_src2_membase (int load_opcode, int opcode)
11284 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11290 return OP_X86_COMPARE_REG_MEMBASE;
11292 return OP_X86_ADD_REG_MEMBASE;
11294 return OP_X86_SUB_REG_MEMBASE;
11296 return OP_X86_AND_REG_MEMBASE;
11298 return OP_X86_OR_REG_MEMBASE;
11300 return OP_X86_XOR_REG_MEMBASE;
11304 #ifdef TARGET_AMD64
11305 #ifdef __mono_ilp32__
11306 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11308 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11312 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11314 return OP_X86_ADD_REG_MEMBASE;
11316 return OP_X86_SUB_REG_MEMBASE;
11318 return OP_X86_AND_REG_MEMBASE;
11320 return OP_X86_OR_REG_MEMBASE;
11322 return OP_X86_XOR_REG_MEMBASE;
11324 #ifdef __mono_ilp32__
11325 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11327 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11332 return OP_AMD64_COMPARE_REG_MEMBASE;
11334 return OP_AMD64_ADD_REG_MEMBASE;
11336 return OP_AMD64_SUB_REG_MEMBASE;
11338 return OP_AMD64_AND_REG_MEMBASE;
11340 return OP_AMD64_OR_REG_MEMBASE;
11342 return OP_AMD64_XOR_REG_MEMBASE;
11351 mono_op_to_op_imm_noemul (int opcode)
11354 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11360 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11368 return mono_op_to_op_imm (opcode);
11372 #ifndef DISABLE_JIT
11375 * mono_handle_global_vregs:
11377 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11381 mono_handle_global_vregs (MonoCompile *cfg)
11383 gint32 *vreg_to_bb;
11384 MonoBasicBlock *bb;
11387 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11389 #ifdef MONO_ARCH_SIMD_INTRINSICS
11390 if (cfg->uses_simd_intrinsics)
11391 mono_simd_simplify_indirection (cfg);
11394 /* Find local vregs used in more than one bb */
11395 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11396 MonoInst *ins = bb->code;
11397 int block_num = bb->block_num;
11399 if (cfg->verbose_level > 2)
11400 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11403 for (; ins; ins = ins->next) {
11404 const char *spec = INS_INFO (ins->opcode);
11405 int regtype = 0, regindex;
11408 if (G_UNLIKELY (cfg->verbose_level > 2))
11409 mono_print_ins (ins);
11411 g_assert (ins->opcode >= MONO_CEE_LAST);
11413 for (regindex = 0; regindex < 4; regindex ++) {
11416 if (regindex == 0) {
11417 regtype = spec [MONO_INST_DEST];
11418 if (regtype == ' ')
11421 } else if (regindex == 1) {
11422 regtype = spec [MONO_INST_SRC1];
11423 if (regtype == ' ')
11426 } else if (regindex == 2) {
11427 regtype = spec [MONO_INST_SRC2];
11428 if (regtype == ' ')
11431 } else if (regindex == 3) {
11432 regtype = spec [MONO_INST_SRC3];
11433 if (regtype == ' ')
11438 #if SIZEOF_REGISTER == 4
11439 /* In the LLVM case, the long opcodes are not decomposed */
11440 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11442 * Since some instructions reference the original long vreg,
11443 * and some reference the two component vregs, it is quite hard
11444 * to determine when it needs to be global. So be conservative.
11446 if (!get_vreg_to_inst (cfg, vreg)) {
11447 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11449 if (cfg->verbose_level > 2)
11450 printf ("LONG VREG R%d made global.\n", vreg);
11454 * Make the component vregs volatile since the optimizations can
11455 * get confused otherwise.
11457 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11458 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11462 g_assert (vreg != -1);
11464 prev_bb = vreg_to_bb [vreg];
11465 if (prev_bb == 0) {
11466 /* 0 is a valid block num */
11467 vreg_to_bb [vreg] = block_num + 1;
11468 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11469 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11472 if (!get_vreg_to_inst (cfg, vreg)) {
11473 if (G_UNLIKELY (cfg->verbose_level > 2))
11474 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11478 if (vreg_is_ref (cfg, vreg))
11479 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11481 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11484 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11487 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11490 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11493 g_assert_not_reached ();
11497 /* Flag as having been used in more than one bb */
11498 vreg_to_bb [vreg] = -1;
11504 /* If a variable is used in only one bblock, convert it into a local vreg */
11505 for (i = 0; i < cfg->num_varinfo; i++) {
11506 MonoInst *var = cfg->varinfo [i];
11507 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11509 switch (var->type) {
11515 #if SIZEOF_REGISTER == 8
11518 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11519 /* Enabling this screws up the fp stack on x86 */
11522 /* Arguments are implicitly global */
11523 /* Putting R4 vars into registers doesn't work currently */
11524 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11526 * Make that the variable's liveness interval doesn't contain a call, since
11527 * that would cause the lvreg to be spilled, making the whole optimization
11530 /* This is too slow for JIT compilation */
11532 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11534 int def_index, call_index, ins_index;
11535 gboolean spilled = FALSE;
11540 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11541 const char *spec = INS_INFO (ins->opcode);
11543 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11544 def_index = ins_index;
11546 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11547 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11548 if (call_index > def_index) {
11554 if (MONO_IS_CALL (ins))
11555 call_index = ins_index;
11565 if (G_UNLIKELY (cfg->verbose_level > 2))
11566 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11567 var->flags |= MONO_INST_IS_DEAD;
11568 cfg->vreg_to_inst [var->dreg] = NULL;
11575 * Compress the varinfo and vars tables so the liveness computation is faster and
11576 * takes up less space.
11579 for (i = 0; i < cfg->num_varinfo; ++i) {
11580 MonoInst *var = cfg->varinfo [i];
11581 if (pos < i && cfg->locals_start == i)
11582 cfg->locals_start = pos;
11583 if (!(var->flags & MONO_INST_IS_DEAD)) {
11585 cfg->varinfo [pos] = cfg->varinfo [i];
11586 cfg->varinfo [pos]->inst_c0 = pos;
11587 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11588 cfg->vars [pos].idx = pos;
11589 #if SIZEOF_REGISTER == 4
11590 if (cfg->varinfo [pos]->type == STACK_I8) {
11591 /* Modify the two component vars too */
11594 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11595 var1->inst_c0 = pos;
11596 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11597 var1->inst_c0 = pos;
11604 cfg->num_varinfo = pos;
11605 if (cfg->locals_start > cfg->num_varinfo)
11606 cfg->locals_start = cfg->num_varinfo;
11610 * mono_spill_global_vars:
11612 * Generate spill code for variables which are not allocated to registers,
11613 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11614 * code is generated which could be optimized by the local optimization passes.
11617 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11619 MonoBasicBlock *bb;
11621 int orig_next_vreg;
11622 guint32 *vreg_to_lvreg;
11624 guint32 i, lvregs_len;
11625 gboolean dest_has_lvreg = FALSE;
11626 guint32 stacktypes [128];
11627 MonoInst **live_range_start, **live_range_end;
11628 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11630 *need_local_opts = FALSE;
11632 memset (spec2, 0, sizeof (spec2));
11634 /* FIXME: Move this function to mini.c */
11635 stacktypes ['i'] = STACK_PTR;
11636 stacktypes ['l'] = STACK_I8;
11637 stacktypes ['f'] = STACK_R8;
11638 #ifdef MONO_ARCH_SIMD_INTRINSICS
11639 stacktypes ['x'] = STACK_VTYPE;
11642 #if SIZEOF_REGISTER == 4
11643 /* Create MonoInsts for longs */
11644 for (i = 0; i < cfg->num_varinfo; i++) {
11645 MonoInst *ins = cfg->varinfo [i];
11647 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11648 switch (ins->type) {
11653 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11656 g_assert (ins->opcode == OP_REGOFFSET);
11658 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11660 tree->opcode = OP_REGOFFSET;
11661 tree->inst_basereg = ins->inst_basereg;
11662 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11664 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11666 tree->opcode = OP_REGOFFSET;
11667 tree->inst_basereg = ins->inst_basereg;
11668 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11678 if (cfg->compute_gc_maps) {
11679 /* registers need liveness info even for !non refs */
11680 for (i = 0; i < cfg->num_varinfo; i++) {
11681 MonoInst *ins = cfg->varinfo [i];
11683 if (ins->opcode == OP_REGVAR)
11684 ins->flags |= MONO_INST_GC_TRACK;
11688 /* FIXME: widening and truncation */
11691 * As an optimization, when a variable allocated to the stack is first loaded into
11692 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11693 * the variable again.
11695 orig_next_vreg = cfg->next_vreg;
11696 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11697 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11701 * These arrays contain the first and last instructions accessing a given
11703 * Since we emit bblocks in the same order we process them here, and we
11704 * don't split live ranges, these will precisely describe the live range of
11705 * the variable, i.e. the instruction range where a valid value can be found
11706 * in the variables location.
11707 * The live range is computed using the liveness info computed by the liveness pass.
11708 * We can't use vmv->range, since that is an abstract live range, and we need
11709 * one which is instruction precise.
11710 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11712 /* FIXME: Only do this if debugging info is requested */
11713 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11714 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11715 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11716 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11718 /* Add spill loads/stores */
11719 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11722 if (cfg->verbose_level > 2)
11723 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11725 /* Clear vreg_to_lvreg array */
11726 for (i = 0; i < lvregs_len; i++)
11727 vreg_to_lvreg [lvregs [i]] = 0;
11731 MONO_BB_FOR_EACH_INS (bb, ins) {
11732 const char *spec = INS_INFO (ins->opcode);
11733 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11734 gboolean store, no_lvreg;
11735 int sregs [MONO_MAX_SRC_REGS];
11737 if (G_UNLIKELY (cfg->verbose_level > 2))
11738 mono_print_ins (ins);
11740 if (ins->opcode == OP_NOP)
11744 * We handle LDADDR here as well, since it can only be decomposed
11745 * when variable addresses are known.
11747 if (ins->opcode == OP_LDADDR) {
11748 MonoInst *var = ins->inst_p0;
11750 if (var->opcode == OP_VTARG_ADDR) {
11751 /* Happens on SPARC/S390 where vtypes are passed by reference */
11752 MonoInst *vtaddr = var->inst_left;
11753 if (vtaddr->opcode == OP_REGVAR) {
11754 ins->opcode = OP_MOVE;
11755 ins->sreg1 = vtaddr->dreg;
11757 else if (var->inst_left->opcode == OP_REGOFFSET) {
11758 ins->opcode = OP_LOAD_MEMBASE;
11759 ins->inst_basereg = vtaddr->inst_basereg;
11760 ins->inst_offset = vtaddr->inst_offset;
11764 g_assert (var->opcode == OP_REGOFFSET);
11766 ins->opcode = OP_ADD_IMM;
11767 ins->sreg1 = var->inst_basereg;
11768 ins->inst_imm = var->inst_offset;
11771 *need_local_opts = TRUE;
11772 spec = INS_INFO (ins->opcode);
11775 if (ins->opcode < MONO_CEE_LAST) {
11776 mono_print_ins (ins);
11777 g_assert_not_reached ();
11781 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11785 if (MONO_IS_STORE_MEMBASE (ins)) {
11786 tmp_reg = ins->dreg;
11787 ins->dreg = ins->sreg2;
11788 ins->sreg2 = tmp_reg;
11791 spec2 [MONO_INST_DEST] = ' ';
11792 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11793 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11794 spec2 [MONO_INST_SRC3] = ' ';
11796 } else if (MONO_IS_STORE_MEMINDEX (ins))
11797 g_assert_not_reached ();
11802 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11803 printf ("\t %.3s %d", spec, ins->dreg);
11804 num_sregs = mono_inst_get_src_registers (ins, sregs);
11805 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11806 printf (" %d", sregs [srcindex]);
11813 regtype = spec [MONO_INST_DEST];
11814 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11817 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11818 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11819 MonoInst *store_ins;
11821 MonoInst *def_ins = ins;
11822 int dreg = ins->dreg; /* The original vreg */
11824 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11826 if (var->opcode == OP_REGVAR) {
11827 ins->dreg = var->dreg;
11828 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11830 * Instead of emitting a load+store, use a _membase opcode.
11832 g_assert (var->opcode == OP_REGOFFSET);
11833 if (ins->opcode == OP_MOVE) {
11837 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11838 ins->inst_basereg = var->inst_basereg;
11839 ins->inst_offset = var->inst_offset;
11842 spec = INS_INFO (ins->opcode);
11846 g_assert (var->opcode == OP_REGOFFSET);
11848 prev_dreg = ins->dreg;
11850 /* Invalidate any previous lvreg for this vreg */
11851 vreg_to_lvreg [ins->dreg] = 0;
11855 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11857 store_opcode = OP_STOREI8_MEMBASE_REG;
11860 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11862 if (regtype == 'l') {
11863 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11864 mono_bblock_insert_after_ins (bb, ins, store_ins);
11865 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11866 mono_bblock_insert_after_ins (bb, ins, store_ins);
11867 def_ins = store_ins;
11870 g_assert (store_opcode != OP_STOREV_MEMBASE);
11872 /* Try to fuse the store into the instruction itself */
11873 /* FIXME: Add more instructions */
11874 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11875 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11876 ins->inst_imm = ins->inst_c0;
11877 ins->inst_destbasereg = var->inst_basereg;
11878 ins->inst_offset = var->inst_offset;
11879 spec = INS_INFO (ins->opcode);
11880 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11881 ins->opcode = store_opcode;
11882 ins->inst_destbasereg = var->inst_basereg;
11883 ins->inst_offset = var->inst_offset;
11887 tmp_reg = ins->dreg;
11888 ins->dreg = ins->sreg2;
11889 ins->sreg2 = tmp_reg;
11892 spec2 [MONO_INST_DEST] = ' ';
11893 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11894 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11895 spec2 [MONO_INST_SRC3] = ' ';
11897 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11898 // FIXME: The backends expect the base reg to be in inst_basereg
11899 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11901 ins->inst_basereg = var->inst_basereg;
11902 ins->inst_offset = var->inst_offset;
11903 spec = INS_INFO (ins->opcode);
11905 /* printf ("INS: "); mono_print_ins (ins); */
11906 /* Create a store instruction */
11907 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11909 /* Insert it after the instruction */
11910 mono_bblock_insert_after_ins (bb, ins, store_ins);
11912 def_ins = store_ins;
11915 * We can't assign ins->dreg to var->dreg here, since the
11916 * sregs could use it. So set a flag, and do it after
11919 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11920 dest_has_lvreg = TRUE;
11925 if (def_ins && !live_range_start [dreg]) {
11926 live_range_start [dreg] = def_ins;
11927 live_range_start_bb [dreg] = bb;
11930 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11933 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11934 tmp->inst_c1 = dreg;
11935 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11942 num_sregs = mono_inst_get_src_registers (ins, sregs);
11943 for (srcindex = 0; srcindex < 3; ++srcindex) {
11944 regtype = spec [MONO_INST_SRC1 + srcindex];
11945 sreg = sregs [srcindex];
11947 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11948 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11949 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11950 MonoInst *use_ins = ins;
11951 MonoInst *load_ins;
11952 guint32 load_opcode;
11954 if (var->opcode == OP_REGVAR) {
11955 sregs [srcindex] = var->dreg;
11956 //mono_inst_set_src_registers (ins, sregs);
11957 live_range_end [sreg] = use_ins;
11958 live_range_end_bb [sreg] = bb;
11960 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11963 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11964 /* var->dreg is a hreg */
11965 tmp->inst_c1 = sreg;
11966 mono_bblock_insert_after_ins (bb, ins, tmp);
11972 g_assert (var->opcode == OP_REGOFFSET);
11974 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11976 g_assert (load_opcode != OP_LOADV_MEMBASE);
11978 if (vreg_to_lvreg [sreg]) {
11979 g_assert (vreg_to_lvreg [sreg] != -1);
11981 /* The variable is already loaded to an lvreg */
11982 if (G_UNLIKELY (cfg->verbose_level > 2))
11983 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11984 sregs [srcindex] = vreg_to_lvreg [sreg];
11985 //mono_inst_set_src_registers (ins, sregs);
11989 /* Try to fuse the load into the instruction */
11990 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11991 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11992 sregs [0] = var->inst_basereg;
11993 //mono_inst_set_src_registers (ins, sregs);
11994 ins->inst_offset = var->inst_offset;
11995 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11996 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11997 sregs [1] = var->inst_basereg;
11998 //mono_inst_set_src_registers (ins, sregs);
11999 ins->inst_offset = var->inst_offset;
12001 if (MONO_IS_REAL_MOVE (ins)) {
12002 ins->opcode = OP_NOP;
12005 //printf ("%d ", srcindex); mono_print_ins (ins);
12007 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12009 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12010 if (var->dreg == prev_dreg) {
12012 * sreg refers to the value loaded by the load
12013 * emitted below, but we need to use ins->dreg
12014 * since it refers to the store emitted earlier.
12018 g_assert (sreg != -1);
12019 vreg_to_lvreg [var->dreg] = sreg;
12020 g_assert (lvregs_len < 1024);
12021 lvregs [lvregs_len ++] = var->dreg;
12025 sregs [srcindex] = sreg;
12026 //mono_inst_set_src_registers (ins, sregs);
12028 if (regtype == 'l') {
12029 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12030 mono_bblock_insert_before_ins (bb, ins, load_ins);
12031 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12032 mono_bblock_insert_before_ins (bb, ins, load_ins);
12033 use_ins = load_ins;
12036 #if SIZEOF_REGISTER == 4
12037 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12039 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12040 mono_bblock_insert_before_ins (bb, ins, load_ins);
12041 use_ins = load_ins;
12045 if (var->dreg < orig_next_vreg) {
12046 live_range_end [var->dreg] = use_ins;
12047 live_range_end_bb [var->dreg] = bb;
12050 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12053 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12054 tmp->inst_c1 = var->dreg;
12055 mono_bblock_insert_after_ins (bb, ins, tmp);
12059 mono_inst_set_src_registers (ins, sregs);
12061 if (dest_has_lvreg) {
12062 g_assert (ins->dreg != -1);
12063 vreg_to_lvreg [prev_dreg] = ins->dreg;
12064 g_assert (lvregs_len < 1024);
12065 lvregs [lvregs_len ++] = prev_dreg;
12066 dest_has_lvreg = FALSE;
12070 tmp_reg = ins->dreg;
12071 ins->dreg = ins->sreg2;
12072 ins->sreg2 = tmp_reg;
12075 if (MONO_IS_CALL (ins)) {
12076 /* Clear vreg_to_lvreg array */
12077 for (i = 0; i < lvregs_len; i++)
12078 vreg_to_lvreg [lvregs [i]] = 0;
12080 } else if (ins->opcode == OP_NOP) {
12082 MONO_INST_NULLIFY_SREGS (ins);
12085 if (cfg->verbose_level > 2)
12086 mono_print_ins_index (1, ins);
12089 /* Extend the live range based on the liveness info */
12090 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12091 for (i = 0; i < cfg->num_varinfo; i ++) {
12092 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12094 if (vreg_is_volatile (cfg, vi->vreg))
12095 /* The liveness info is incomplete */
12098 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12099 /* Live from at least the first ins of this bb */
12100 live_range_start [vi->vreg] = bb->code;
12101 live_range_start_bb [vi->vreg] = bb;
12104 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12105 /* Live at least until the last ins of this bb */
12106 live_range_end [vi->vreg] = bb->last_ins;
12107 live_range_end_bb [vi->vreg] = bb;
12113 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12115 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12116 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12118 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12119 for (i = 0; i < cfg->num_varinfo; ++i) {
12120 int vreg = MONO_VARINFO (cfg, i)->vreg;
12123 if (live_range_start [vreg]) {
12124 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12126 ins->inst_c1 = vreg;
12127 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12129 if (live_range_end [vreg]) {
12130 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12132 ins->inst_c1 = vreg;
12133 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12134 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12136 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12142 g_free (live_range_start);
12143 g_free (live_range_end);
12144 g_free (live_range_start_bb);
12145 g_free (live_range_end_bb);
12150 * - use 'iadd' instead of 'int_add'
12151 * - handling ovf opcodes: decompose in method_to_ir.
12152 * - unify iregs/fregs
12153 * -> partly done, the missing parts are:
12154 * - a more complete unification would involve unifying the hregs as well, so
12155 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12156 * would no longer map to the machine hregs, so the code generators would need to
12157 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12158 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12159 * fp/non-fp branches speeds it up by about 15%.
12160 * - use sext/zext opcodes instead of shifts
12162 * - get rid of TEMPLOADs if possible and use vregs instead
12163 * - clean up usage of OP_P/OP_ opcodes
12164 * - cleanup usage of DUMMY_USE
12165 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12167 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12168 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12169 * - make sure handle_stack_args () is called before the branch is emitted
12170 * - when the new IR is done, get rid of all unused stuff
12171 * - COMPARE/BEQ as separate instructions or unify them ?
12172 * - keeping them separate allows specialized compare instructions like
12173 * compare_imm, compare_membase
12174 * - most back ends unify fp compare+branch, fp compare+ceq
12175 * - integrate mono_save_args into inline_method
12176 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12177 * - handle long shift opts on 32 bit platforms somehow: they require
12178 * 3 sregs (2 for arg1 and 1 for arg2)
12179 * - make byref a 'normal' type.
12180 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12181 * variable if needed.
12182 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12183 * like inline_method.
12184 * - remove inlining restrictions
12185 * - fix LNEG and enable cfold of INEG
12186 * - generalize x86 optimizations like ldelema as a peephole optimization
12187 * - add store_mem_imm for amd64
12188 * - optimize the loading of the interruption flag in the managed->native wrappers
12189 * - avoid special handling of OP_NOP in passes
12190 * - move code inserting instructions into one function/macro.
12191 * - try a coalescing phase after liveness analysis
12192 * - add float -> vreg conversion + local optimizations on !x86
12193 * - figure out how to handle decomposed branches during optimizations, ie.
12194 * compare+branch, op_jump_table+op_br etc.
12195 * - promote RuntimeXHandles to vregs
12196 * - vtype cleanups:
12197 * - add a NEW_VARLOADA_VREG macro
12198 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12199 * accessing vtype fields.
12200 * - get rid of I8CONST on 64 bit platforms
12201 * - dealing with the increase in code size due to branches created during opcode
12203 * - use extended basic blocks
12204 * - all parts of the JIT
12205 * - handle_global_vregs () && local regalloc
12206 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12207 * - sources of increase in code size:
12210 * - isinst and castclass
12211 * - lvregs not allocated to global registers even if used multiple times
12212 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12214 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12215 * - add all micro optimizations from the old JIT
12216 * - put tree optimizations into the deadce pass
12217 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12218 * specific function.
12219 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12220 * fcompare + branchCC.
12221 * - create a helper function for allocating a stack slot, taking into account
12222 * MONO_CFG_HAS_SPILLUP.
12224 * - merge the ia64 switch changes.
12225 * - optimize mono_regstate2_alloc_int/float.
12226 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12227 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12228 * parts of the tree could be separated by other instructions, killing the tree
12229 * arguments, or stores killing loads etc. Also, should we fold loads into other
12230 * instructions if the result of the load is used multiple times ?
12231 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12232 * - LAST MERGE: 108395.
12233 * - when returning vtypes in registers, generate IR and append it to the end of the
12234 * last bb instead of doing it in the epilog.
12235 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12243 - When to decompose opcodes:
12244 - earlier: this makes some optimizations hard to implement, since the low level IR
12245 no longer contains the neccessary information. But it is easier to do.
12246 - later: harder to implement, enables more optimizations.
12247 - Branches inside bblocks:
12248 - created when decomposing complex opcodes.
12249 - branches to another bblock: harmless, but not tracked by the branch
12250 optimizations, so need to branch to a label at the start of the bblock.
12251 - branches to inside the same bblock: very problematic, trips up the local
12252 reg allocator. Can be fixed by spitting the current bblock, but that is a
12253 complex operation, since some local vregs can become global vregs etc.
12254 - Local/global vregs:
12255 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12256 local register allocator.
12257 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12258 structure, created by mono_create_var (). Assigned to hregs or the stack by
12259 the global register allocator.
12260 - When to do optimizations like alu->alu_imm:
12261 - earlier -> saves work later on since the IR will be smaller/simpler
12262 - later -> can work on more instructions
12263 - Handling of valuetypes:
12264 - When a vtype is pushed on the stack, a new temporary is created, an
12265 instruction computing its address (LDADDR) is emitted and pushed on
12266 the stack. Need to optimize cases when the vtype is used immediately as in
12267 argument passing, stloc etc.
12268 - Instead of the to_end stuff in the old JIT, simply call the function handling
12269 the values on the stack before emitting the last instruction of the bb.
12272 #endif /* DISABLE_JIT */