2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/metadata/debug-mono-symfile.h>
56 #include <mono/utils/mono-compiler.h>
57 #include <mono/utils/mono-memory-model.h>
58 #include <mono/metadata/mono-basic-block.h>
65 #include "jit-icalls.h"
67 #include "debugger-agent.h"
69 #define BRANCH_COST 10
70 #define INLINE_LENGTH_LIMIT 20
71 #define INLINE_FAILURE do {\
72 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
75 #define CHECK_CFG_EXCEPTION do {\
76 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
79 #define METHOD_ACCESS_FAILURE do { \
80 char *method_fname = mono_method_full_name (method, TRUE); \
81 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
82 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
83 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
84 g_free (method_fname); \
85 g_free (cil_method_fname); \
86 goto exception_exit; \
88 #define FIELD_ACCESS_FAILURE do { \
89 char *method_fname = mono_method_full_name (method, TRUE); \
90 char *field_fname = mono_field_full_name (field); \
91 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
92 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
93 g_free (method_fname); \
94 g_free (field_fname); \
95 goto exception_exit; \
97 #define GENERIC_SHARING_FAILURE(opcode) do { \
98 if (cfg->generic_sharing_context) { \
99 if (cfg->verbose_level > 2) \
100 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
101 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
102 goto exception_exit; \
105 #define OUT_OF_MEMORY_FAILURE do { \
106 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
107 goto exception_exit; \
109 /* Determine whenever 'ins' represents a load of the 'this' argument */
110 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
112 static int ldind_to_load_membase (int opcode);
113 static int stind_to_store_membase (int opcode);
115 int mono_op_to_op_imm (int opcode);
116 int mono_op_to_op_imm_noemul (int opcode);
118 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
119 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
120 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
122 /* helper methods signatures */
123 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_domain_get = NULL;
125 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
126 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
127 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
128 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
129 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
132 * Instruction metadata
140 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
141 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
147 #if SIZEOF_REGISTER == 8
152 /* keep in sync with the enum in mini.h */
155 #include "mini-ops.h"
160 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
161 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
163 * This should contain the index of the last sreg + 1. This is not the same
164 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
166 const gint8 ins_sreg_counts[] = {
167 #include "mini-ops.h"
172 #define MONO_INIT_VARINFO(vi,id) do { \
173 (vi)->range.first_use.pos.bid = 0xffff; \
179 mono_inst_set_src_registers (MonoInst *ins, int *regs)
181 ins->sreg1 = regs [0];
182 ins->sreg2 = regs [1];
183 ins->sreg3 = regs [2];
187 mono_alloc_ireg (MonoCompile *cfg)
189 return alloc_ireg (cfg);
193 mono_alloc_freg (MonoCompile *cfg)
195 return alloc_freg (cfg);
199 mono_alloc_preg (MonoCompile *cfg)
201 return alloc_preg (cfg);
205 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
207 return alloc_dreg (cfg, stack_type);
211 * mono_alloc_ireg_ref:
213 * Allocate an IREG, and mark it as holding a GC ref.
216 mono_alloc_ireg_ref (MonoCompile *cfg)
218 return alloc_ireg_ref (cfg);
222 * mono_alloc_ireg_mp:
224 * Allocate an IREG, and mark it as holding a managed pointer.
227 mono_alloc_ireg_mp (MonoCompile *cfg)
229 return alloc_ireg_mp (cfg);
233 * mono_alloc_ireg_copy:
235 * Allocate an IREG with the same GC type as VREG.
238 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
240 if (vreg_is_ref (cfg, vreg))
241 return alloc_ireg_ref (cfg);
242 else if (vreg_is_mp (cfg, vreg))
243 return alloc_ireg_mp (cfg);
245 return alloc_ireg (cfg);
249 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
255 switch (type->type) {
258 case MONO_TYPE_BOOLEAN:
270 case MONO_TYPE_FNPTR:
272 case MONO_TYPE_CLASS:
273 case MONO_TYPE_STRING:
274 case MONO_TYPE_OBJECT:
275 case MONO_TYPE_SZARRAY:
276 case MONO_TYPE_ARRAY:
280 #if SIZEOF_REGISTER == 8
289 case MONO_TYPE_VALUETYPE:
290 if (type->data.klass->enumtype) {
291 type = mono_class_enum_basetype (type->data.klass);
294 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
297 case MONO_TYPE_TYPEDBYREF:
299 case MONO_TYPE_GENERICINST:
300 type = &type->data.generic_class->container_class->byval_arg;
304 g_assert (cfg->generic_sharing_context);
307 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
313 mono_print_bb (MonoBasicBlock *bb, const char *msg)
318 printf ("\n%s %d: [IN: ", msg, bb->block_num);
319 for (i = 0; i < bb->in_count; ++i)
320 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
322 for (i = 0; i < bb->out_count; ++i)
323 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
325 for (tree = bb->code; tree; tree = tree->next)
326 mono_print_ins_index (-1, tree);
330 mono_create_helper_signatures (void)
332 helper_sig_domain_get = mono_create_icall_signature ("ptr");
333 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
334 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
335 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
336 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
337 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
338 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
342 * Can't put this at the beginning, since other files reference stuff from this
347 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
349 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
351 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
353 #define GET_BBLOCK(cfg,tblock,ip) do { \
354 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
356 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
357 NEW_BBLOCK (cfg, (tblock)); \
358 (tblock)->cil_code = (ip); \
359 ADD_BBLOCK (cfg, (tblock)); \
363 #if defined(TARGET_X86) || defined(TARGET_AMD64)
364 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
365 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
366 (dest)->dreg = alloc_ireg_mp ((cfg)); \
367 (dest)->sreg1 = (sr1); \
368 (dest)->sreg2 = (sr2); \
369 (dest)->inst_imm = (imm); \
370 (dest)->backend.shift_amount = (shift); \
371 MONO_ADD_INS ((cfg)->cbb, (dest)); \
375 #if SIZEOF_REGISTER == 8
376 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
377 /* FIXME: Need to add many more cases */ \
378 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
380 int dr = alloc_preg (cfg); \
381 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
382 (ins)->sreg2 = widen->dreg; \
386 #define ADD_WIDEN_OP(ins, arg1, arg2)
389 #define ADD_BINOP(op) do { \
390 MONO_INST_NEW (cfg, ins, (op)); \
392 ins->sreg1 = sp [0]->dreg; \
393 ins->sreg2 = sp [1]->dreg; \
394 type_from_op (ins, sp [0], sp [1]); \
396 /* Have to insert a widening op */ \
397 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
398 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
399 MONO_ADD_INS ((cfg)->cbb, (ins)); \
400 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
403 #define ADD_UNOP(op) do { \
404 MONO_INST_NEW (cfg, ins, (op)); \
406 ins->sreg1 = sp [0]->dreg; \
407 type_from_op (ins, sp [0], NULL); \
409 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
410 MONO_ADD_INS ((cfg)->cbb, (ins)); \
411 *sp++ = mono_decompose_opcode (cfg, ins); \
414 #define ADD_BINCOND(next_block) do { \
417 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
418 cmp->sreg1 = sp [0]->dreg; \
419 cmp->sreg2 = sp [1]->dreg; \
420 type_from_op (cmp, sp [0], sp [1]); \
422 type_from_op (ins, sp [0], sp [1]); \
423 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
424 GET_BBLOCK (cfg, tblock, target); \
425 link_bblock (cfg, bblock, tblock); \
426 ins->inst_true_bb = tblock; \
427 if ((next_block)) { \
428 link_bblock (cfg, bblock, (next_block)); \
429 ins->inst_false_bb = (next_block); \
430 start_new_bblock = 1; \
432 GET_BBLOCK (cfg, tblock, ip); \
433 link_bblock (cfg, bblock, tblock); \
434 ins->inst_false_bb = tblock; \
435 start_new_bblock = 2; \
437 if (sp != stack_start) { \
438 handle_stack_args (cfg, stack_start, sp - stack_start); \
439 CHECK_UNVERIFIABLE (cfg); \
441 MONO_ADD_INS (bblock, cmp); \
442 MONO_ADD_INS (bblock, ins); \
446 * link_bblock: Links two basic blocks
448 * links two basic blocks in the control flow graph, the 'from'
449 * argument is the starting block and the 'to' argument is the block
450 * the control flow ends to after 'from'.
453 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
455 MonoBasicBlock **newa;
459 if (from->cil_code) {
461 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
463 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
466 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
468 printf ("edge from entry to exit\n");
473 for (i = 0; i < from->out_count; ++i) {
474 if (to == from->out_bb [i]) {
480 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
481 for (i = 0; i < from->out_count; ++i) {
482 newa [i] = from->out_bb [i];
490 for (i = 0; i < to->in_count; ++i) {
491 if (from == to->in_bb [i]) {
497 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
498 for (i = 0; i < to->in_count; ++i) {
499 newa [i] = to->in_bb [i];
508 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
510 link_bblock (cfg, from, to);
514 * mono_find_block_region:
516 * We mark each basic block with a region ID. We use that to avoid BB
517 * optimizations when blocks are in different regions.
520 * A region token that encodes where this region is, and information
521 * about the clause owner for this block.
523 * The region encodes the try/catch/filter clause that owns this block
524 * as well as the type. -1 is a special value that represents a block
525 * that is in none of try/catch/filter.
528 mono_find_block_region (MonoCompile *cfg, int offset)
530 MonoMethodHeader *header = cfg->header;
531 MonoExceptionClause *clause;
534 for (i = 0; i < header->num_clauses; ++i) {
535 clause = &header->clauses [i];
536 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
537 (offset < (clause->handler_offset)))
538 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
540 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
541 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
542 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
543 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
544 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
546 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
549 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
550 return ((i + 1) << 8) | clause->flags;
557 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
559 MonoMethodHeader *header = cfg->header;
560 MonoExceptionClause *clause;
564 for (i = 0; i < header->num_clauses; ++i) {
565 clause = &header->clauses [i];
566 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
567 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
568 if (clause->flags == type)
569 res = g_list_append (res, clause);
576 mono_create_spvar_for_region (MonoCompile *cfg, int region)
580 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
584 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
585 /* prevent it from being register allocated */
586 var->flags |= MONO_INST_INDIRECT;
588 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
592 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
594 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
598 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
602 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
606 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
607 /* prevent it from being register allocated */
608 var->flags |= MONO_INST_INDIRECT;
610 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
616 * Returns the type used in the eval stack when @type is loaded.
617 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
620 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
624 inst->klass = klass = mono_class_from_mono_type (type);
626 inst->type = STACK_MP;
631 switch (type->type) {
633 inst->type = STACK_INV;
637 case MONO_TYPE_BOOLEAN:
643 inst->type = STACK_I4;
648 case MONO_TYPE_FNPTR:
649 inst->type = STACK_PTR;
651 case MONO_TYPE_CLASS:
652 case MONO_TYPE_STRING:
653 case MONO_TYPE_OBJECT:
654 case MONO_TYPE_SZARRAY:
655 case MONO_TYPE_ARRAY:
656 inst->type = STACK_OBJ;
660 inst->type = STACK_I8;
664 inst->type = STACK_R8;
666 case MONO_TYPE_VALUETYPE:
667 if (type->data.klass->enumtype) {
668 type = mono_class_enum_basetype (type->data.klass);
672 inst->type = STACK_VTYPE;
675 case MONO_TYPE_TYPEDBYREF:
676 inst->klass = mono_defaults.typed_reference_class;
677 inst->type = STACK_VTYPE;
679 case MONO_TYPE_GENERICINST:
680 type = &type->data.generic_class->container_class->byval_arg;
683 case MONO_TYPE_MVAR :
684 /* FIXME: all the arguments must be references for now,
685 * later look inside cfg and see if the arg num is
688 g_assert (cfg->generic_sharing_context);
689 inst->type = STACK_OBJ;
692 g_error ("unknown type 0x%02x in eval stack type", type->type);
697 * The following tables are used to quickly validate the IL code in type_from_op ().
700 bin_num_table [STACK_MAX] [STACK_MAX] = {
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
713 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
716 /* reduce the size of this table */
718 bin_int_table [STACK_MAX] [STACK_MAX] = {
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
721 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
722 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
723 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
724 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
725 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
726 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
730 bin_comp_table [STACK_MAX] [STACK_MAX] = {
731 /* Inv i L p F & O vt */
733 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
734 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
735 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
736 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
737 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
738 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
739 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
742 /* reduce the size of this table */
744 shift_table [STACK_MAX] [STACK_MAX] = {
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
756 * Tables to map from the non-specific opcode to the matching
757 * type-specific opcode.
759 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
761 binops_op_map [STACK_MAX] = {
762 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
765 /* handles from CEE_NEG to CEE_CONV_U8 */
767 unops_op_map [STACK_MAX] = {
768 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
771 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
773 ovfops_op_map [STACK_MAX] = {
774 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
777 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
779 ovf2ops_op_map [STACK_MAX] = {
780 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
783 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
785 ovf3ops_op_map [STACK_MAX] = {
786 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
789 /* handles from CEE_BEQ to CEE_BLT_UN */
791 beqops_op_map [STACK_MAX] = {
792 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
795 /* handles from CEE_CEQ to CEE_CLT_UN */
797 ceqops_op_map [STACK_MAX] = {
798 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
802 * Sets ins->type (the type on the eval stack) according to the
803 * type of the opcode and the arguments to it.
804 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
806 * FIXME: this function sets ins->type unconditionally in some cases, but
807 * it should set it to invalid for some types (a conv.x on an object)
810 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
812 switch (ins->opcode) {
819 /* FIXME: check unverifiable args for STACK_MP */
820 ins->type = bin_num_table [src1->type] [src2->type];
821 ins->opcode += binops_op_map [ins->type];
828 ins->type = bin_int_table [src1->type] [src2->type];
829 ins->opcode += binops_op_map [ins->type];
834 ins->type = shift_table [src1->type] [src2->type];
835 ins->opcode += binops_op_map [ins->type];
840 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
841 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
842 ins->opcode = OP_LCOMPARE;
843 else if (src1->type == STACK_R8)
844 ins->opcode = OP_FCOMPARE;
846 ins->opcode = OP_ICOMPARE;
848 case OP_ICOMPARE_IMM:
849 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
850 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
851 ins->opcode = OP_LCOMPARE_IMM;
863 ins->opcode += beqops_op_map [src1->type];
866 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
867 ins->opcode += ceqops_op_map [src1->type];
873 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
874 ins->opcode += ceqops_op_map [src1->type];
878 ins->type = neg_table [src1->type];
879 ins->opcode += unops_op_map [ins->type];
882 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
883 ins->type = src1->type;
885 ins->type = STACK_INV;
886 ins->opcode += unops_op_map [ins->type];
892 ins->type = STACK_I4;
893 ins->opcode += unops_op_map [src1->type];
896 ins->type = STACK_R8;
897 switch (src1->type) {
900 ins->opcode = OP_ICONV_TO_R_UN;
903 ins->opcode = OP_LCONV_TO_R_UN;
907 case CEE_CONV_OVF_I1:
908 case CEE_CONV_OVF_U1:
909 case CEE_CONV_OVF_I2:
910 case CEE_CONV_OVF_U2:
911 case CEE_CONV_OVF_I4:
912 case CEE_CONV_OVF_U4:
913 ins->type = STACK_I4;
914 ins->opcode += ovf3ops_op_map [src1->type];
916 case CEE_CONV_OVF_I_UN:
917 case CEE_CONV_OVF_U_UN:
918 ins->type = STACK_PTR;
919 ins->opcode += ovf2ops_op_map [src1->type];
921 case CEE_CONV_OVF_I1_UN:
922 case CEE_CONV_OVF_I2_UN:
923 case CEE_CONV_OVF_I4_UN:
924 case CEE_CONV_OVF_U1_UN:
925 case CEE_CONV_OVF_U2_UN:
926 case CEE_CONV_OVF_U4_UN:
927 ins->type = STACK_I4;
928 ins->opcode += ovf2ops_op_map [src1->type];
931 ins->type = STACK_PTR;
932 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_U;
938 #if SIZEOF_VOID_P == 8
939 ins->opcode = OP_LCONV_TO_U;
941 ins->opcode = OP_MOVE;
945 ins->opcode = OP_LCONV_TO_U;
948 ins->opcode = OP_FCONV_TO_U;
954 ins->type = STACK_I8;
955 ins->opcode += unops_op_map [src1->type];
957 case CEE_CONV_OVF_I8:
958 case CEE_CONV_OVF_U8:
959 ins->type = STACK_I8;
960 ins->opcode += ovf3ops_op_map [src1->type];
962 case CEE_CONV_OVF_U8_UN:
963 case CEE_CONV_OVF_I8_UN:
964 ins->type = STACK_I8;
965 ins->opcode += ovf2ops_op_map [src1->type];
969 ins->type = STACK_R8;
970 ins->opcode += unops_op_map [src1->type];
973 ins->type = STACK_R8;
977 ins->type = STACK_I4;
978 ins->opcode += ovfops_op_map [src1->type];
983 ins->type = STACK_PTR;
984 ins->opcode += ovfops_op_map [src1->type];
992 ins->type = bin_num_table [src1->type] [src2->type];
993 ins->opcode += ovfops_op_map [src1->type];
994 if (ins->type == STACK_R8)
995 ins->type = STACK_INV;
997 case OP_LOAD_MEMBASE:
998 ins->type = STACK_PTR;
1000 case OP_LOADI1_MEMBASE:
1001 case OP_LOADU1_MEMBASE:
1002 case OP_LOADI2_MEMBASE:
1003 case OP_LOADU2_MEMBASE:
1004 case OP_LOADI4_MEMBASE:
1005 case OP_LOADU4_MEMBASE:
1006 ins->type = STACK_PTR;
1008 case OP_LOADI8_MEMBASE:
1009 ins->type = STACK_I8;
1011 case OP_LOADR4_MEMBASE:
1012 case OP_LOADR8_MEMBASE:
1013 ins->type = STACK_R8;
1016 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1020 if (ins->type == STACK_MP)
1021 ins->klass = mono_defaults.object_class;
1026 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1032 param_table [STACK_MAX] [STACK_MAX] = {
1037 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1041 switch (args->type) {
1051 for (i = 0; i < sig->param_count; ++i) {
1052 switch (args [i].type) {
1056 if (!sig->params [i]->byref)
1060 if (sig->params [i]->byref)
1062 switch (sig->params [i]->type) {
1063 case MONO_TYPE_CLASS:
1064 case MONO_TYPE_STRING:
1065 case MONO_TYPE_OBJECT:
1066 case MONO_TYPE_SZARRAY:
1067 case MONO_TYPE_ARRAY:
1074 if (sig->params [i]->byref)
1076 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1085 /*if (!param_table [args [i].type] [sig->params [i]->type])
1093 * When we need a pointer to the current domain many times in a method, we
1094 * call mono_domain_get() once and we store the result in a local variable.
1095 * This function returns the variable that represents the MonoDomain*.
1097 inline static MonoInst *
1098 mono_get_domainvar (MonoCompile *cfg)
1100 if (!cfg->domainvar)
1101 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1102 return cfg->domainvar;
1106 * The got_var contains the address of the Global Offset Table when AOT
1110 mono_get_got_var (MonoCompile *cfg)
1112 #ifdef MONO_ARCH_NEED_GOT_VAR
1113 if (!cfg->compile_aot)
1115 if (!cfg->got_var) {
1116 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1118 return cfg->got_var;
1125 mono_get_vtable_var (MonoCompile *cfg)
1127 g_assert (cfg->generic_sharing_context);
1129 if (!cfg->rgctx_var) {
1130 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1131 /* force the var to be stack allocated */
1132 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1135 return cfg->rgctx_var;
1139 type_from_stack_type (MonoInst *ins) {
1140 switch (ins->type) {
1141 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1142 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1143 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1144 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1146 return &ins->klass->this_arg;
1147 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1148 case STACK_VTYPE: return &ins->klass->byval_arg;
1150 g_error ("stack type %d to monotype not handled\n", ins->type);
1155 static G_GNUC_UNUSED int
1156 type_to_stack_type (MonoType *t)
1158 t = mono_type_get_underlying_type (t);
1162 case MONO_TYPE_BOOLEAN:
1165 case MONO_TYPE_CHAR:
1172 case MONO_TYPE_FNPTR:
1174 case MONO_TYPE_CLASS:
1175 case MONO_TYPE_STRING:
1176 case MONO_TYPE_OBJECT:
1177 case MONO_TYPE_SZARRAY:
1178 case MONO_TYPE_ARRAY:
1186 case MONO_TYPE_VALUETYPE:
1187 case MONO_TYPE_TYPEDBYREF:
1189 case MONO_TYPE_GENERICINST:
1190 if (mono_type_generic_inst_is_valuetype (t))
1196 g_assert_not_reached ();
1203 array_access_to_klass (int opcode)
1207 return mono_defaults.byte_class;
1209 return mono_defaults.uint16_class;
1212 return mono_defaults.int_class;
1215 return mono_defaults.sbyte_class;
1218 return mono_defaults.int16_class;
1221 return mono_defaults.int32_class;
1223 return mono_defaults.uint32_class;
1226 return mono_defaults.int64_class;
1229 return mono_defaults.single_class;
1232 return mono_defaults.double_class;
1233 case CEE_LDELEM_REF:
1234 case CEE_STELEM_REF:
1235 return mono_defaults.object_class;
1237 g_assert_not_reached ();
1243 * We try to share variables when possible
1246 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1251 /* inlining can result in deeper stacks */
1252 if (slot >= cfg->header->max_stack)
1253 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1255 pos = ins->type - 1 + slot * STACK_MAX;
1257 switch (ins->type) {
1264 if ((vnum = cfg->intvars [pos]))
1265 return cfg->varinfo [vnum];
1266 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1267 cfg->intvars [pos] = res->inst_c0;
1270 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1276 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1279 * Don't use this if a generic_context is set, since that means AOT can't
1280 * look up the method using just the image+token.
1281 * table == 0 means this is a reference made from a wrapper.
1283 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1284 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1285 jump_info_token->image = image;
1286 jump_info_token->token = token;
1287 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1292 * This function is called to handle items that are left on the evaluation stack
1293 * at basic block boundaries. What happens is that we save the values to local variables
1294 * and we reload them later when first entering the target basic block (with the
1295 * handle_loaded_temps () function).
1296 * A single joint point will use the same variables (stored in the array bb->out_stack or
1297 * bb->in_stack, if the basic block is before or after the joint point).
1299 * This function needs to be called _before_ emitting the last instruction of
1300 * the bb (i.e. before emitting a branch).
1301 * If the stack merge fails at a join point, cfg->unverifiable is set.
1304 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1307 MonoBasicBlock *bb = cfg->cbb;
1308 MonoBasicBlock *outb;
1309 MonoInst *inst, **locals;
1314 if (cfg->verbose_level > 3)
1315 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1316 if (!bb->out_scount) {
1317 bb->out_scount = count;
1318 //printf ("bblock %d has out:", bb->block_num);
1320 for (i = 0; i < bb->out_count; ++i) {
1321 outb = bb->out_bb [i];
1322 /* exception handlers are linked, but they should not be considered for stack args */
1323 if (outb->flags & BB_EXCEPTION_HANDLER)
1325 //printf (" %d", outb->block_num);
1326 if (outb->in_stack) {
1328 bb->out_stack = outb->in_stack;
1334 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1335 for (i = 0; i < count; ++i) {
1337 * try to reuse temps already allocated for this purpouse, if they occupy the same
1338 * stack slot and if they are of the same type.
1339 * This won't cause conflicts since if 'local' is used to
1340 * store one of the values in the in_stack of a bblock, then
1341 * the same variable will be used for the same outgoing stack
1343 * This doesn't work when inlining methods, since the bblocks
1344 * in the inlined methods do not inherit their in_stack from
1345 * the bblock they are inlined to. See bug #58863 for an
1348 if (cfg->inlined_method)
1349 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1351 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1356 for (i = 0; i < bb->out_count; ++i) {
1357 outb = bb->out_bb [i];
1358 /* exception handlers are linked, but they should not be considered for stack args */
1359 if (outb->flags & BB_EXCEPTION_HANDLER)
1361 if (outb->in_scount) {
1362 if (outb->in_scount != bb->out_scount) {
1363 cfg->unverifiable = TRUE;
1366 continue; /* check they are the same locals */
1368 outb->in_scount = count;
1369 outb->in_stack = bb->out_stack;
1372 locals = bb->out_stack;
1374 for (i = 0; i < count; ++i) {
1375 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1376 inst->cil_code = sp [i]->cil_code;
1377 sp [i] = locals [i];
1378 if (cfg->verbose_level > 3)
1379 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1383 * It is possible that the out bblocks already have in_stack assigned, and
1384 * the in_stacks differ. In this case, we will store to all the different
1391 /* Find a bblock which has a different in_stack */
1393 while (bindex < bb->out_count) {
1394 outb = bb->out_bb [bindex];
1395 /* exception handlers are linked, but they should not be considered for stack args */
1396 if (outb->flags & BB_EXCEPTION_HANDLER) {
1400 if (outb->in_stack != locals) {
1401 for (i = 0; i < count; ++i) {
1402 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1403 inst->cil_code = sp [i]->cil_code;
1404 sp [i] = locals [i];
1405 if (cfg->verbose_level > 3)
1406 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1408 locals = outb->in_stack;
1417 /* Emit code which loads interface_offsets [klass->interface_id]
1418 * The array is stored in memory before vtable.
1421 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1423 if (cfg->compile_aot) {
1424 int ioffset_reg = alloc_preg (cfg);
1425 int iid_reg = alloc_preg (cfg);
1427 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1428 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1437 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1439 int ibitmap_reg = alloc_preg (cfg);
1440 #ifdef COMPRESSED_INTERFACE_BITMAP
1442 MonoInst *res, *ins;
1443 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1444 MONO_ADD_INS (cfg->cbb, ins);
1446 if (cfg->compile_aot)
1447 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1449 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1450 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1451 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1453 int ibitmap_byte_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1457 if (cfg->compile_aot) {
1458 int iid_reg = alloc_preg (cfg);
1459 int shifted_iid_reg = alloc_preg (cfg);
1460 int ibitmap_byte_address_reg = alloc_preg (cfg);
1461 int masked_iid_reg = alloc_preg (cfg);
1462 int iid_one_bit_reg = alloc_preg (cfg);
1463 int iid_bit_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1466 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1467 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1469 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1470 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1471 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1474 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1480 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1481 * stored in "klass_reg" implements the interface "klass".
1484 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1486 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1490 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1491 * stored in "vtable_reg" implements the interface "klass".
1494 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1496 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1500 * Emit code which checks whenever the interface id of @klass is smaller than
1501 * than the value given by max_iid_reg.
1504 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1505 MonoBasicBlock *false_target)
1507 if (cfg->compile_aot) {
1508 int iid_reg = alloc_preg (cfg);
1509 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1510 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1517 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1520 /* Same as above, but obtains max_iid from a vtable */
1522 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1523 MonoBasicBlock *false_target)
1525 int max_iid_reg = alloc_preg (cfg);
1527 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1528 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1531 /* Same as above, but obtains max_iid from a klass */
1533 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1534 MonoBasicBlock *false_target)
1536 int max_iid_reg = alloc_preg (cfg);
1538 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1539 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1543 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1545 int idepth_reg = alloc_preg (cfg);
1546 int stypes_reg = alloc_preg (cfg);
1547 int stype = alloc_preg (cfg);
1549 mono_class_setup_supertypes (klass);
1551 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1552 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1554 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1556 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1559 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1560 } else if (cfg->compile_aot) {
1561 int const_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1563 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1571 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1573 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1577 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int intf_reg = alloc_preg (cfg);
1581 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1582 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1585 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1587 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1591 * Variant of the above that takes a register to the class, not the vtable.
1594 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1596 int intf_bit_reg = alloc_preg (cfg);
1598 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1599 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1604 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1608 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1611 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1612 } else if (cfg->compile_aot) {
1613 int const_reg = alloc_preg (cfg);
1614 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1615 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1619 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1623 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1625 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1629 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1631 if (cfg->compile_aot) {
1632 int const_reg = alloc_preg (cfg);
1633 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1634 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1638 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1642 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1645 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1648 int rank_reg = alloc_preg (cfg);
1649 int eclass_reg = alloc_preg (cfg);
1651 g_assert (!klass_inst);
1652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1654 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1655 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1657 if (klass->cast_class == mono_defaults.object_class) {
1658 int parent_reg = alloc_preg (cfg);
1659 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1660 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1661 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1662 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1663 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1664 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1665 } else if (klass->cast_class == mono_defaults.enum_class) {
1666 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1667 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1668 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1670 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1671 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1674 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1675 /* Check that the object is a vector too */
1676 int bounds_reg = alloc_preg (cfg);
1677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1678 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1679 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1682 int idepth_reg = alloc_preg (cfg);
1683 int stypes_reg = alloc_preg (cfg);
1684 int stype = alloc_preg (cfg);
1686 mono_class_setup_supertypes (klass);
1688 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1689 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1691 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1694 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1695 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1700 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1702 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1706 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1710 g_assert (val == 0);
1715 if ((size <= 4) && (size <= align)) {
1718 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1721 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1724 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1726 #if SIZEOF_REGISTER == 8
1728 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1734 val_reg = alloc_preg (cfg);
1736 if (SIZEOF_REGISTER == 8)
1737 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1739 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1742 /* This could be optimized further if neccesary */
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1751 #if !NO_UNALIGNED_ACCESS
1752 if (SIZEOF_REGISTER == 8) {
1754 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1772 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1777 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1784 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1791 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1792 g_assert (size < 10000);
1795 /* This could be optimized further if neccesary */
1797 cur_reg = alloc_preg (cfg);
1798 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1806 #if !NO_UNALIGNED_ACCESS
1807 if (SIZEOF_REGISTER == 8) {
1809 cur_reg = alloc_preg (cfg);
1810 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1820 cur_reg = alloc_preg (cfg);
1821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1828 cur_reg = alloc_preg (cfg);
1829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1836 cur_reg = alloc_preg (cfg);
1837 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1838 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1846 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1849 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1852 type = mini_get_basic_type_from_generic (gsctx, type);
1853 switch (type->type) {
1854 case MONO_TYPE_VOID:
1855 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1858 case MONO_TYPE_BOOLEAN:
1861 case MONO_TYPE_CHAR:
1864 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1868 case MONO_TYPE_FNPTR:
1869 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1870 case MONO_TYPE_CLASS:
1871 case MONO_TYPE_STRING:
1872 case MONO_TYPE_OBJECT:
1873 case MONO_TYPE_SZARRAY:
1874 case MONO_TYPE_ARRAY:
1875 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1878 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1881 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1882 case MONO_TYPE_VALUETYPE:
1883 if (type->data.klass->enumtype) {
1884 type = mono_class_enum_basetype (type->data.klass);
1887 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1888 case MONO_TYPE_TYPEDBYREF:
1889 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1890 case MONO_TYPE_GENERICINST:
1891 type = &type->data.generic_class->container_class->byval_arg;
1894 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1900 * target_type_is_incompatible:
1901 * @cfg: MonoCompile context
1903 * Check that the item @arg on the evaluation stack can be stored
1904 * in the target type (can be a local, or field, etc).
1905 * The cfg arg can be used to check if we need verification or just
1908 * Returns: non-0 value if arg can't be stored on a target.
1911 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1913 MonoType *simple_type;
1916 if (target->byref) {
1917 /* FIXME: check that the pointed to types match */
1918 if (arg->type == STACK_MP)
1919 return arg->klass != mono_class_from_mono_type (target);
1920 if (arg->type == STACK_PTR)
1925 simple_type = mono_type_get_underlying_type (target);
1926 switch (simple_type->type) {
1927 case MONO_TYPE_VOID:
1931 case MONO_TYPE_BOOLEAN:
1934 case MONO_TYPE_CHAR:
1937 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1941 /* STACK_MP is needed when setting pinned locals */
1942 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1947 case MONO_TYPE_FNPTR:
1949 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1950 * in native int. (#688008).
1952 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1955 case MONO_TYPE_CLASS:
1956 case MONO_TYPE_STRING:
1957 case MONO_TYPE_OBJECT:
1958 case MONO_TYPE_SZARRAY:
1959 case MONO_TYPE_ARRAY:
1960 if (arg->type != STACK_OBJ)
1962 /* FIXME: check type compatibility */
1966 if (arg->type != STACK_I8)
1971 if (arg->type != STACK_R8)
1974 case MONO_TYPE_VALUETYPE:
1975 if (arg->type != STACK_VTYPE)
1977 klass = mono_class_from_mono_type (simple_type);
1978 if (klass != arg->klass)
1981 case MONO_TYPE_TYPEDBYREF:
1982 if (arg->type != STACK_VTYPE)
1984 klass = mono_class_from_mono_type (simple_type);
1985 if (klass != arg->klass)
1988 case MONO_TYPE_GENERICINST:
1989 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1990 if (arg->type != STACK_VTYPE)
1992 klass = mono_class_from_mono_type (simple_type);
1993 if (klass != arg->klass)
1997 if (arg->type != STACK_OBJ)
1999 /* FIXME: check type compatibility */
2003 case MONO_TYPE_MVAR:
2004 /* FIXME: all the arguments must be references for now,
2005 * later look inside cfg and see if the arg num is
2006 * really a reference
2008 g_assert (cfg->generic_sharing_context);
2009 if (arg->type != STACK_OBJ)
2013 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2019 * Prepare arguments for passing to a function call.
2020 * Return a non-zero value if the arguments can't be passed to the given
2022 * The type checks are not yet complete and some conversions may need
2023 * casts on 32 or 64 bit architectures.
2025 * FIXME: implement this using target_type_is_incompatible ()
2028 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2030 MonoType *simple_type;
2034 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2038 for (i = 0; i < sig->param_count; ++i) {
2039 if (sig->params [i]->byref) {
2040 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2044 simple_type = sig->params [i];
2045 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2047 switch (simple_type->type) {
2048 case MONO_TYPE_VOID:
2053 case MONO_TYPE_BOOLEAN:
2056 case MONO_TYPE_CHAR:
2059 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2065 case MONO_TYPE_FNPTR:
2066 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2069 case MONO_TYPE_CLASS:
2070 case MONO_TYPE_STRING:
2071 case MONO_TYPE_OBJECT:
2072 case MONO_TYPE_SZARRAY:
2073 case MONO_TYPE_ARRAY:
2074 if (args [i]->type != STACK_OBJ)
2079 if (args [i]->type != STACK_I8)
2084 if (args [i]->type != STACK_R8)
2087 case MONO_TYPE_VALUETYPE:
2088 if (simple_type->data.klass->enumtype) {
2089 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2092 if (args [i]->type != STACK_VTYPE)
2095 case MONO_TYPE_TYPEDBYREF:
2096 if (args [i]->type != STACK_VTYPE)
2099 case MONO_TYPE_GENERICINST:
2100 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2104 g_error ("unknown type 0x%02x in check_call_signature",
2112 callvirt_to_call (int opcode)
2117 case OP_VOIDCALLVIRT:
2126 g_assert_not_reached ();
2133 callvirt_to_call_membase (int opcode)
2137 return OP_CALL_MEMBASE;
2138 case OP_VOIDCALLVIRT:
2139 return OP_VOIDCALL_MEMBASE;
2141 return OP_FCALL_MEMBASE;
2143 return OP_LCALL_MEMBASE;
2145 return OP_VCALL_MEMBASE;
2147 g_assert_not_reached ();
2153 #ifdef MONO_ARCH_HAVE_IMT
2155 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2159 if (COMPILE_LLVM (cfg)) {
2160 method_reg = alloc_preg (cfg);
2163 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2164 } else if (cfg->compile_aot) {
2165 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2168 MONO_INST_NEW (cfg, ins, OP_PCONST);
2169 ins->inst_p0 = call->method;
2170 ins->dreg = method_reg;
2171 MONO_ADD_INS (cfg->cbb, ins);
2175 call->imt_arg_reg = method_reg;
2177 #ifdef MONO_ARCH_IMT_REG
2178 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2180 /* Need this to keep the IMT arg alive */
2181 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2186 #ifdef MONO_ARCH_IMT_REG
2187 method_reg = alloc_preg (cfg);
2190 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2191 } else if (cfg->compile_aot) {
2192 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2195 MONO_INST_NEW (cfg, ins, OP_PCONST);
2196 ins->inst_p0 = call->method;
2197 ins->dreg = method_reg;
2198 MONO_ADD_INS (cfg->cbb, ins);
2201 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2203 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2208 static MonoJumpInfo *
2209 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2211 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2215 ji->data.target = target;
2220 inline static MonoCallInst *
2221 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2222 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2225 #ifdef MONO_ARCH_SOFT_FLOAT
2230 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2232 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2235 call->signature = sig;
2236 call->rgctx_reg = rgctx;
2238 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2241 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2242 call->vret_var = cfg->vret_addr;
2243 //g_assert_not_reached ();
2245 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2246 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2249 temp->backend.is_pinvoke = sig->pinvoke;
2252 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2253 * address of return value to increase optimization opportunities.
2254 * Before vtype decomposition, the dreg of the call ins itself represents the
2255 * fact the call modifies the return value. After decomposition, the call will
2256 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2257 * will be transformed into an LDADDR.
2259 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2260 loada->dreg = alloc_preg (cfg);
2261 loada->inst_p0 = temp;
2262 /* We reference the call too since call->dreg could change during optimization */
2263 loada->inst_p1 = call;
2264 MONO_ADD_INS (cfg->cbb, loada);
2266 call->inst.dreg = temp->dreg;
2268 call->vret_var = loada;
2269 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2270 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2272 #ifdef MONO_ARCH_SOFT_FLOAT
2273 if (COMPILE_SOFT_FLOAT (cfg)) {
2275 * If the call has a float argument, we would need to do an r8->r4 conversion using
2276 * an icall, but that cannot be done during the call sequence since it would clobber
2277 * the call registers + the stack. So we do it before emitting the call.
2279 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2281 MonoInst *in = call->args [i];
2283 if (i >= sig->hasthis)
2284 t = sig->params [i - sig->hasthis];
2286 t = &mono_defaults.int_class->byval_arg;
2287 t = mono_type_get_underlying_type (t);
2289 if (!t->byref && t->type == MONO_TYPE_R4) {
2290 MonoInst *iargs [1];
2294 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2296 /* The result will be in an int vreg */
2297 call->args [i] = conv;
2304 if (COMPILE_LLVM (cfg))
2305 mono_llvm_emit_call (cfg, call);
2307 mono_arch_emit_call (cfg, call);
2309 mono_arch_emit_call (cfg, call);
2312 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2313 cfg->flags |= MONO_CFG_HAS_CALLS;
2319 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2321 #ifdef MONO_ARCH_RGCTX_REG
2322 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2323 cfg->uses_rgctx_reg = TRUE;
2324 call->rgctx_reg = TRUE;
2326 call->rgctx_arg_reg = rgctx_reg;
2333 inline static MonoInst*
2334 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2340 rgctx_reg = mono_alloc_preg (cfg);
2341 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2344 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2346 call->inst.sreg1 = addr->dreg;
2348 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2351 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2353 return (MonoInst*)call;
2357 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2359 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2362 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2363 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2365 gboolean might_be_remote;
2366 gboolean virtual = this != NULL;
2367 gboolean enable_for_aot = TRUE;
2373 rgctx_reg = mono_alloc_preg (cfg);
2374 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2377 if (method->string_ctor) {
2378 /* Create the real signature */
2379 /* FIXME: Cache these */
2380 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2381 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2386 context_used = mono_method_check_context_used (method);
2388 might_be_remote = this && sig->hasthis &&
2389 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2390 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2392 if (might_be_remote && context_used) {
2395 g_assert (cfg->generic_sharing_context);
2397 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2399 return mono_emit_calli (cfg, sig, args, addr, NULL);
2402 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2404 if (might_be_remote)
2405 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2407 call->method = method;
2408 call->inst.flags |= MONO_INST_HAS_METHOD;
2409 call->inst.inst_left = this;
2412 int vtable_reg, slot_reg, this_reg;
2414 this_reg = this->dreg;
2416 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2417 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2418 MonoInst *dummy_use;
2420 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2422 /* Make a call to delegate->invoke_impl */
2423 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2424 call->inst.inst_basereg = this_reg;
2425 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2426 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2428 /* We must emit a dummy use here because the delegate trampoline will
2429 replace the 'this' argument with the delegate target making this activation
2430 no longer a root for the delegate.
2431 This is an issue for delegates that target collectible code such as dynamic
2432 methods of GC'able assemblies.
2434 For a test case look into #667921.
2436 FIXME: a dummy use is not the best way to do it as the local register allocator
2437 will put it on a caller save register and spil it around the call.
2438 Ideally, we would either put it on a callee save register or only do the store part.
2440 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2442 return (MonoInst*)call;
2446 if ((!cfg->compile_aot || enable_for_aot) &&
2447 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2448 (MONO_METHOD_IS_FINAL (method) &&
2449 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2450 !(method->klass->marshalbyref && context_used)) {
2452 * the method is not virtual, we just need to ensure this is not null
2453 * and then we can call the method directly.
2455 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2457 * The check above ensures method is not gshared, this is needed since
2458 * gshared methods can't have wrappers.
2460 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2463 if (!method->string_ctor)
2464 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2466 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2467 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2469 * the method is virtual, but we can statically dispatch since either
2470 * it's class or the method itself are sealed.
2471 * But first we need to ensure it's not a null reference.
2473 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2475 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2477 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2479 vtable_reg = alloc_preg (cfg);
2480 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2481 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2483 #ifdef MONO_ARCH_HAVE_IMT
2485 guint32 imt_slot = mono_method_get_imt_slot (method);
2486 emit_imt_argument (cfg, call, imt_arg);
2487 slot_reg = vtable_reg;
2488 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2491 if (slot_reg == -1) {
2492 slot_reg = alloc_preg (cfg);
2493 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2494 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2497 slot_reg = vtable_reg;
2498 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2499 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2500 #ifdef MONO_ARCH_HAVE_IMT
2502 g_assert (mono_method_signature (method)->generic_param_count);
2503 emit_imt_argument (cfg, call, imt_arg);
2508 call->inst.sreg1 = slot_reg;
2509 call->virtual = TRUE;
2513 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2516 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2518 return (MonoInst*)call;
2522 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2524 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2528 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2535 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2538 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2540 return (MonoInst*)call;
2544 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2546 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2550 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2554 * mono_emit_abs_call:
2556 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2558 inline static MonoInst*
2559 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2560 MonoMethodSignature *sig, MonoInst **args)
2562 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2566 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2569 if (cfg->abs_patches == NULL)
2570 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2571 g_hash_table_insert (cfg->abs_patches, ji, ji);
2572 ins = mono_emit_native_call (cfg, ji, sig, args);
2573 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2578 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2580 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2581 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2585 * Native code might return non register sized integers
2586 * without initializing the upper bits.
2588 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2589 case OP_LOADI1_MEMBASE:
2590 widen_op = OP_ICONV_TO_I1;
2592 case OP_LOADU1_MEMBASE:
2593 widen_op = OP_ICONV_TO_U1;
2595 case OP_LOADI2_MEMBASE:
2596 widen_op = OP_ICONV_TO_I2;
2598 case OP_LOADU2_MEMBASE:
2599 widen_op = OP_ICONV_TO_U2;
2605 if (widen_op != -1) {
2606 int dreg = alloc_preg (cfg);
2609 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2610 widen->type = ins->type;
2620 get_memcpy_method (void)
2622 static MonoMethod *memcpy_method = NULL;
2623 if (!memcpy_method) {
2624 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2626 g_error ("Old corlib found. Install a new one");
2628 return memcpy_method;
2632 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2634 MonoClassField *field;
2635 gpointer iter = NULL;
2637 while ((field = mono_class_get_fields (klass, &iter))) {
2640 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2642 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2643 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2644 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2645 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2647 MonoClass *field_class = mono_class_from_mono_type (field->type);
2648 if (field_class->has_references)
2649 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2655 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2657 int card_table_shift_bits;
2658 gpointer card_table_mask;
2660 MonoInst *dummy_use;
2661 int nursery_shift_bits;
2662 size_t nursery_size;
2663 gboolean has_card_table_wb = FALSE;
2665 if (!cfg->gen_write_barriers)
2668 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2670 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2672 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2673 has_card_table_wb = TRUE;
2676 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2679 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2680 wbarrier->sreg1 = ptr->dreg;
2682 wbarrier->sreg2 = value->dreg;
2684 wbarrier->sreg2 = value_reg;
2685 MONO_ADD_INS (cfg->cbb, wbarrier);
2686 } else if (card_table) {
2687 int offset_reg = alloc_preg (cfg);
2688 int card_reg = alloc_preg (cfg);
2691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2692 if (card_table_mask)
2693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2695 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2696 * IMM's larger than 32bits.
2698 if (cfg->compile_aot) {
2699 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2701 MONO_INST_NEW (cfg, ins, OP_PCONST);
2702 ins->inst_p0 = card_table;
2703 ins->dreg = card_reg;
2704 MONO_ADD_INS (cfg->cbb, ins);
2707 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2708 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2710 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2711 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2715 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2717 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2718 dummy_use->sreg1 = value_reg;
2719 MONO_ADD_INS (cfg->cbb, dummy_use);
2724 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2726 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2727 unsigned need_wb = 0;
2732 /*types with references can't have alignment smaller than sizeof(void*) */
2733 if (align < SIZEOF_VOID_P)
2736 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2737 if (size > 32 * SIZEOF_VOID_P)
2740 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2742 /* We don't unroll more than 5 stores to avoid code bloat. */
2743 if (size > 5 * SIZEOF_VOID_P) {
2744 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2745 size += (SIZEOF_VOID_P - 1);
2746 size &= ~(SIZEOF_VOID_P - 1);
2748 EMIT_NEW_ICONST (cfg, iargs [2], size);
2749 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2750 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2754 destreg = iargs [0]->dreg;
2755 srcreg = iargs [1]->dreg;
2758 dest_ptr_reg = alloc_preg (cfg);
2759 tmp_reg = alloc_preg (cfg);
2762 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2764 while (size >= SIZEOF_VOID_P) {
2765 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2766 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2769 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2771 offset += SIZEOF_VOID_P;
2772 size -= SIZEOF_VOID_P;
2775 /*tmp += sizeof (void*)*/
2776 if (size >= SIZEOF_VOID_P) {
2777 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2778 MONO_ADD_INS (cfg->cbb, iargs [0]);
2782 /* Those cannot be references since size < sizeof (void*) */
2784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2791 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2798 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2808 * Emit code to copy a valuetype of type @klass whose address is stored in
2809 * @src->dreg to memory whose address is stored at @dest->dreg.
2812 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2814 MonoInst *iargs [4];
2817 MonoMethod *memcpy_method;
2821 * This check breaks with spilled vars... need to handle it during verification anyway.
2822 * g_assert (klass && klass == src->klass && klass == dest->klass);
2826 n = mono_class_native_size (klass, &align);
2828 n = mono_class_value_size (klass, &align);
2830 /* if native is true there should be no references in the struct */
2831 if (cfg->gen_write_barriers && klass->has_references && !native) {
2832 /* Avoid barriers when storing to the stack */
2833 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2834 (dest->opcode == OP_LDADDR))) {
2835 int context_used = 0;
2840 if (cfg->generic_sharing_context)
2841 context_used = mono_class_check_context_used (klass);
2843 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2844 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2846 } else if (context_used) {
2847 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2849 if (cfg->compile_aot) {
2850 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2852 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2853 mono_class_compute_gc_descriptor (klass);
2857 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2862 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2863 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2864 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2868 EMIT_NEW_ICONST (cfg, iargs [2], n);
2870 memcpy_method = get_memcpy_method ();
2871 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2876 get_memset_method (void)
2878 static MonoMethod *memset_method = NULL;
2879 if (!memset_method) {
2880 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2882 g_error ("Old corlib found. Install a new one");
2884 return memset_method;
2888 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2890 MonoInst *iargs [3];
2893 MonoMethod *memset_method;
2895 /* FIXME: Optimize this for the case when dest is an LDADDR */
2897 mono_class_init (klass);
2898 n = mono_class_value_size (klass, &align);
2900 if (n <= sizeof (gpointer) * 5) {
2901 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2904 memset_method = get_memset_method ();
2906 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2907 EMIT_NEW_ICONST (cfg, iargs [2], n);
2908 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2913 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2915 MonoInst *this = NULL;
2917 g_assert (cfg->generic_sharing_context);
2919 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2920 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2921 !method->klass->valuetype)
2922 EMIT_NEW_ARGLOAD (cfg, this, 0);
2924 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2925 MonoInst *mrgctx_loc, *mrgctx_var;
2928 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2930 mrgctx_loc = mono_get_vtable_var (cfg);
2931 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2934 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2935 MonoInst *vtable_loc, *vtable_var;
2939 vtable_loc = mono_get_vtable_var (cfg);
2940 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2942 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2943 MonoInst *mrgctx_var = vtable_var;
2946 vtable_reg = alloc_preg (cfg);
2947 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2948 vtable_var->type = STACK_PTR;
2956 vtable_reg = alloc_preg (cfg);
2957 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2962 static MonoJumpInfoRgctxEntry *
2963 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
2965 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2966 res->method = method;
2967 res->in_mrgctx = in_mrgctx;
2968 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2969 res->data->type = patch_type;
2970 res->data->data.target = patch_data;
2971 res->info_type = info_type;
2976 static inline MonoInst*
2977 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2979 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2983 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2984 MonoClass *klass, MonoRgctxInfoType rgctx_type)
2986 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2987 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2989 return emit_rgctx_fetch (cfg, rgctx, entry);
2993 * emit_get_rgctx_method:
2995 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2996 * normal constants, else emit a load from the rgctx.
2999 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3000 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3002 if (!context_used) {
3005 switch (rgctx_type) {
3006 case MONO_RGCTX_INFO_METHOD:
3007 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3009 case MONO_RGCTX_INFO_METHOD_RGCTX:
3010 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3013 g_assert_not_reached ();
3016 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3017 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3019 return emit_rgctx_fetch (cfg, rgctx, entry);
3024 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3025 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3027 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3028 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3030 return emit_rgctx_fetch (cfg, rgctx, entry);
3034 * On return the caller must check @klass for load errors.
3037 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3039 MonoInst *vtable_arg;
3041 int context_used = 0;
3043 if (cfg->generic_sharing_context)
3044 context_used = mono_class_check_context_used (klass);
3047 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3048 klass, MONO_RGCTX_INFO_VTABLE);
3050 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3054 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3057 if (COMPILE_LLVM (cfg))
3058 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3060 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3061 #ifdef MONO_ARCH_VTABLE_REG
3062 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3063 cfg->uses_vtable_reg = TRUE;
3070 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3074 if (cfg->gen_seq_points && cfg->method == method) {
3075 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3076 MONO_ADD_INS (cfg->cbb, ins);
3081 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3083 if (mini_get_debug_options ()->better_cast_details) {
3084 int to_klass_reg = alloc_preg (cfg);
3085 int vtable_reg = alloc_preg (cfg);
3086 int klass_reg = alloc_preg (cfg);
3087 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3090 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3094 MONO_ADD_INS (cfg->cbb, tls_get);
3095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3098 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3099 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3100 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3105 reset_cast_details (MonoCompile *cfg)
3107 /* Reset the variables holding the cast details */
3108 if (mini_get_debug_options ()->better_cast_details) {
3109 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3111 MONO_ADD_INS (cfg->cbb, tls_get);
3112 /* It is enough to reset the from field */
3113 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3118 * On return the caller must check @array_class for load errors
3121 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3123 int vtable_reg = alloc_preg (cfg);
3124 int context_used = 0;
3126 if (cfg->generic_sharing_context)
3127 context_used = mono_class_check_context_used (array_class);
3129 save_cast_details (cfg, array_class, obj->dreg);
3131 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3133 if (cfg->opt & MONO_OPT_SHARED) {
3134 int class_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3136 if (cfg->compile_aot) {
3137 int klass_reg = alloc_preg (cfg);
3138 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3139 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3143 } else if (context_used) {
3144 MonoInst *vtable_ins;
3146 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3147 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3149 if (cfg->compile_aot) {
3153 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3155 vt_reg = alloc_preg (cfg);
3156 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3157 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3160 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3166 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3168 reset_cast_details (cfg);
3172 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3173 * generic code is generated.
3176 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3178 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3181 MonoInst *rgctx, *addr;
3183 /* FIXME: What if the class is shared? We might not
3184 have to get the address of the method from the
3186 addr = emit_get_rgctx_method (cfg, context_used, method,
3187 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3189 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3191 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3193 return mono_emit_method_call (cfg, method, &val, NULL);
3198 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3202 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3203 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3204 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3205 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3207 obj_reg = sp [0]->dreg;
3208 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3209 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3211 /* FIXME: generics */
3212 g_assert (klass->rank == 0);
3215 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3216 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3222 MonoInst *element_class;
3224 /* This assertion is from the unboxcast insn */
3225 g_assert (klass->rank == 0);
3227 element_class = emit_get_rgctx_klass (cfg, context_used,
3228 klass->element_class, MONO_RGCTX_INFO_KLASS);
3230 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3231 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3233 save_cast_details (cfg, klass->element_class, obj_reg);
3234 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3235 reset_cast_details (cfg);
3238 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3239 MONO_ADD_INS (cfg->cbb, add);
3240 add->type = STACK_MP;
3247 * Returns NULL and set the cfg exception on error.
3250 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3252 MonoInst *iargs [2];
3258 MonoInst *iargs [2];
3261 FIXME: we cannot get managed_alloc here because we can't get
3262 the class's vtable (because it's not a closed class)
3264 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3265 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3268 if (cfg->opt & MONO_OPT_SHARED)
3269 rgctx_info = MONO_RGCTX_INFO_KLASS;
3271 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3272 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3274 if (cfg->opt & MONO_OPT_SHARED) {
3275 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3277 alloc_ftn = mono_object_new;
3280 alloc_ftn = mono_object_new_specific;
3283 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3286 if (cfg->opt & MONO_OPT_SHARED) {
3287 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3288 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3290 alloc_ftn = mono_object_new;
3291 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3292 /* This happens often in argument checking code, eg. throw new FooException... */
3293 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3294 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3295 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3297 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3298 MonoMethod *managed_alloc = NULL;
3302 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3303 cfg->exception_ptr = klass;
3307 #ifndef MONO_CROSS_COMPILE
3308 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3311 if (managed_alloc) {
3312 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3313 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3315 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3317 guint32 lw = vtable->klass->instance_size;
3318 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3319 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3320 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3323 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3327 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3331 * Returns NULL and set the cfg exception on error.
3334 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3336 MonoInst *alloc, *ins;
3338 if (mono_class_is_nullable (klass)) {
3339 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3342 /* FIXME: What if the class is shared? We might not
3343 have to get the method address from the RGCTX. */
3344 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3345 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3346 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3348 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3350 return mono_emit_method_call (cfg, method, &val, NULL);
3354 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3358 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3365 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3368 MonoGenericContainer *container;
3369 MonoGenericInst *ginst;
3371 if (klass->generic_class) {
3372 container = klass->generic_class->container_class->generic_container;
3373 ginst = klass->generic_class->context.class_inst;
3374 } else if (klass->generic_container && context_used) {
3375 container = klass->generic_container;
3376 ginst = container->context.class_inst;
3381 for (i = 0; i < container->type_argc; ++i) {
3383 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3385 type = ginst->type_argv [i];
3386 if (mini_type_is_reference (cfg, type))
3392 // FIXME: This doesn't work yet (class libs tests fail?)
3393 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3396 * Returns NULL and set the cfg exception on error.
3399 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3401 MonoBasicBlock *is_null_bb;
3402 int obj_reg = src->dreg;
3403 int vtable_reg = alloc_preg (cfg);
3404 MonoInst *klass_inst = NULL;
3409 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3410 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3411 MonoInst *cache_ins;
3413 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3418 /* klass - it's the second element of the cache entry*/
3419 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3422 args [2] = cache_ins;
3424 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3427 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3429 if (is_complex_isinst (klass)) {
3430 /* Complex case, handle by an icall */
3436 args [1] = klass_inst;
3438 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3440 /* Simple case, handled by the code below */
3444 NEW_BBLOCK (cfg, is_null_bb);
3446 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3447 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3449 save_cast_details (cfg, klass, obj_reg);
3451 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3452 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3453 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3455 int klass_reg = alloc_preg (cfg);
3457 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3459 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3460 /* the remoting code is broken, access the class for now */
3461 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3462 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3464 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3465 cfg->exception_ptr = klass;
3468 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3470 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3471 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3473 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3475 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3476 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3480 MONO_START_BB (cfg, is_null_bb);
3482 reset_cast_details (cfg);
3488 * Returns NULL and set the cfg exception on error.
3491 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3494 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3495 int obj_reg = src->dreg;
3496 int vtable_reg = alloc_preg (cfg);
3497 int res_reg = alloc_ireg_ref (cfg);
3498 MonoInst *klass_inst = NULL;
3503 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3504 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3505 MonoInst *cache_ins;
3507 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3512 /* klass - it's the second element of the cache entry*/
3513 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3516 args [2] = cache_ins;
3518 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3521 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3523 if (is_complex_isinst (klass)) {
3524 /* Complex case, handle by an icall */
3530 args [1] = klass_inst;
3532 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3534 /* Simple case, the code below can handle it */
3538 NEW_BBLOCK (cfg, is_null_bb);
3539 NEW_BBLOCK (cfg, false_bb);
3540 NEW_BBLOCK (cfg, end_bb);
3542 /* Do the assignment at the beginning, so the other assignment can be if converted */
3543 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3544 ins->type = STACK_OBJ;
3547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3548 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3552 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3553 g_assert (!context_used);
3554 /* the is_null_bb target simply copies the input register to the output */
3555 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3557 int klass_reg = alloc_preg (cfg);
3560 int rank_reg = alloc_preg (cfg);
3561 int eclass_reg = alloc_preg (cfg);
3563 g_assert (!context_used);
3564 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3569 if (klass->cast_class == mono_defaults.object_class) {
3570 int parent_reg = alloc_preg (cfg);
3571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3572 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3573 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3574 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3575 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3576 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3577 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3579 } else if (klass->cast_class == mono_defaults.enum_class) {
3580 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3581 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3582 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3583 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3585 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3586 /* Check that the object is a vector too */
3587 int bounds_reg = alloc_preg (cfg);
3588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3590 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3593 /* the is_null_bb target simply copies the input register to the output */
3594 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3596 } else if (mono_class_is_nullable (klass)) {
3597 g_assert (!context_used);
3598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3599 /* the is_null_bb target simply copies the input register to the output */
3600 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3602 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3603 g_assert (!context_used);
3604 /* the remoting code is broken, access the class for now */
3605 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3606 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3608 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3609 cfg->exception_ptr = klass;
3612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3615 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3617 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3618 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3621 /* the is_null_bb target simply copies the input register to the output */
3622 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3627 MONO_START_BB (cfg, false_bb);
3629 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3630 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3632 MONO_START_BB (cfg, is_null_bb);
3634 MONO_START_BB (cfg, end_bb);
3640 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3642 /* This opcode takes as input an object reference and a class, and returns:
3643 0) if the object is an instance of the class,
3644 1) if the object is not instance of the class,
3645 2) if the object is a proxy whose type cannot be determined */
3648 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3649 int obj_reg = src->dreg;
3650 int dreg = alloc_ireg (cfg);
3652 int klass_reg = alloc_preg (cfg);
3654 NEW_BBLOCK (cfg, true_bb);
3655 NEW_BBLOCK (cfg, false_bb);
3656 NEW_BBLOCK (cfg, false2_bb);
3657 NEW_BBLOCK (cfg, end_bb);
3658 NEW_BBLOCK (cfg, no_proxy_bb);
3660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3663 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3664 NEW_BBLOCK (cfg, interface_fail_bb);
3666 tmp_reg = alloc_preg (cfg);
3667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3668 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3669 MONO_START_BB (cfg, interface_fail_bb);
3670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3672 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3674 tmp_reg = alloc_preg (cfg);
3675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3679 tmp_reg = alloc_preg (cfg);
3680 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3683 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3684 tmp_reg = alloc_preg (cfg);
3685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3688 tmp_reg = alloc_preg (cfg);
3689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3691 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3693 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3696 MONO_START_BB (cfg, no_proxy_bb);
3698 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3701 MONO_START_BB (cfg, false_bb);
3703 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3704 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3706 MONO_START_BB (cfg, false2_bb);
3708 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3709 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3711 MONO_START_BB (cfg, true_bb);
3713 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3715 MONO_START_BB (cfg, end_bb);
3718 MONO_INST_NEW (cfg, ins, OP_ICONST);
3720 ins->type = STACK_I4;
3726 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3728 /* This opcode takes as input an object reference and a class, and returns:
3729 0) if the object is an instance of the class,
3730 1) if the object is a proxy whose type cannot be determined
3731 an InvalidCastException exception is thrown otherwhise*/
3734 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3735 int obj_reg = src->dreg;
3736 int dreg = alloc_ireg (cfg);
3737 int tmp_reg = alloc_preg (cfg);
3738 int klass_reg = alloc_preg (cfg);
3740 NEW_BBLOCK (cfg, end_bb);
3741 NEW_BBLOCK (cfg, ok_result_bb);
3743 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3744 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3746 save_cast_details (cfg, klass, obj_reg);
3748 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3749 NEW_BBLOCK (cfg, interface_fail_bb);
3751 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3752 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3753 MONO_START_BB (cfg, interface_fail_bb);
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3756 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3758 tmp_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3761 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3763 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3767 NEW_BBLOCK (cfg, no_proxy_bb);
3769 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3770 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3771 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3773 tmp_reg = alloc_preg (cfg);
3774 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3775 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3777 tmp_reg = alloc_preg (cfg);
3778 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3779 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3780 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3782 NEW_BBLOCK (cfg, fail_1_bb);
3784 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3786 MONO_START_BB (cfg, fail_1_bb);
3788 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3789 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3791 MONO_START_BB (cfg, no_proxy_bb);
3793 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3796 MONO_START_BB (cfg, ok_result_bb);
3798 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3800 MONO_START_BB (cfg, end_bb);
3803 MONO_INST_NEW (cfg, ins, OP_ICONST);
3805 ins->type = STACK_I4;
3811 * Returns NULL and set the cfg exception on error.
3813 static G_GNUC_UNUSED MonoInst*
3814 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3818 gpointer *trampoline;
3819 MonoInst *obj, *method_ins, *tramp_ins;
3823 obj = handle_alloc (cfg, klass, FALSE, 0);
3827 /* Inline the contents of mono_delegate_ctor */
3829 /* Set target field */
3830 /* Optimize away setting of NULL target */
3831 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3832 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3833 if (cfg->gen_write_barriers) {
3834 dreg = alloc_preg (cfg);
3835 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3836 emit_write_barrier (cfg, ptr, target, 0);
3840 /* Set method field */
3841 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3842 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3843 if (cfg->gen_write_barriers) {
3844 dreg = alloc_preg (cfg);
3845 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3846 emit_write_barrier (cfg, ptr, method_ins, 0);
3849 * To avoid looking up the compiled code belonging to the target method
3850 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3851 * store it, and we fill it after the method has been compiled.
3853 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3854 MonoInst *code_slot_ins;
3857 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3859 domain = mono_domain_get ();
3860 mono_domain_lock (domain);
3861 if (!domain_jit_info (domain)->method_code_hash)
3862 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3863 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3865 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3866 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3868 mono_domain_unlock (domain);
3870 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3875 /* Set invoke_impl field */
3876 if (cfg->compile_aot) {
3877 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3879 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
3880 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3882 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3884 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3890 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3892 MonoJitICallInfo *info;
3894 /* Need to register the icall so it gets an icall wrapper */
3895 info = mono_get_array_new_va_icall (rank);
3897 cfg->flags |= MONO_CFG_HAS_VARARGS;
3899 /* mono_array_new_va () needs a vararg calling convention */
3900 cfg->disable_llvm = TRUE;
3902 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3903 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3907 mono_emit_load_got_addr (MonoCompile *cfg)
3909 MonoInst *getaddr, *dummy_use;
3911 if (!cfg->got_var || cfg->got_var_allocated)
3914 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3915 getaddr->dreg = cfg->got_var->dreg;
3917 /* Add it to the start of the first bblock */
3918 if (cfg->bb_entry->code) {
3919 getaddr->next = cfg->bb_entry->code;
3920 cfg->bb_entry->code = getaddr;
3923 MONO_ADD_INS (cfg->bb_entry, getaddr);
3925 cfg->got_var_allocated = TRUE;
3928 * Add a dummy use to keep the got_var alive, since real uses might
3929 * only be generated by the back ends.
3930 * Add it to end_bblock, so the variable's lifetime covers the whole
3932 * It would be better to make the usage of the got var explicit in all
3933 * cases when the backend needs it (i.e. calls, throw etc.), so this
3934 * wouldn't be needed.
3936 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3937 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3940 static int inline_limit;
3941 static gboolean inline_limit_inited;
3944 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3946 MonoMethodHeaderSummary header;
3948 #ifdef MONO_ARCH_SOFT_FLOAT
3949 MonoMethodSignature *sig = mono_method_signature (method);
3953 if (cfg->generic_sharing_context)
3956 if (cfg->inline_depth > 10)
3959 #ifdef MONO_ARCH_HAVE_LMF_OPS
3960 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3961 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3962 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3967 if (!mono_method_get_header_summary (method, &header))
3970 /*runtime, icall and pinvoke are checked by summary call*/
3971 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3972 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3973 (method->klass->marshalbyref) ||
3977 /* also consider num_locals? */
3978 /* Do the size check early to avoid creating vtables */
3979 if (!inline_limit_inited) {
3980 if (getenv ("MONO_INLINELIMIT"))
3981 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3983 inline_limit = INLINE_LENGTH_LIMIT;
3984 inline_limit_inited = TRUE;
3986 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
3990 * if we can initialize the class of the method right away, we do,
3991 * otherwise we don't allow inlining if the class needs initialization,
3992 * since it would mean inserting a call to mono_runtime_class_init()
3993 * inside the inlined code
3995 if (!(cfg->opt & MONO_OPT_SHARED)) {
3996 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3997 if (cfg->run_cctors && method->klass->has_cctor) {
3998 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3999 if (!method->klass->runtime_info)
4000 /* No vtable created yet */
4002 vtable = mono_class_vtable (cfg->domain, method->klass);
4005 /* This makes so that inline cannot trigger */
4006 /* .cctors: too many apps depend on them */
4007 /* running with a specific order... */
4008 if (! vtable->initialized)
4010 mono_runtime_class_init (vtable);
4012 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4013 if (!method->klass->runtime_info)
4014 /* No vtable created yet */
4016 vtable = mono_class_vtable (cfg->domain, method->klass);
4019 if (!vtable->initialized)
4024 * If we're compiling for shared code
4025 * the cctor will need to be run at aot method load time, for example,
4026 * or at the end of the compilation of the inlining method.
4028 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4033 * CAS - do not inline methods with declarative security
4034 * Note: this has to be before any possible return TRUE;
4036 if (mono_method_has_declsec (method))
4039 #ifdef MONO_ARCH_SOFT_FLOAT
4041 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4043 for (i = 0; i < sig->param_count; ++i)
4044 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4052 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4054 if (vtable->initialized && !cfg->compile_aot)
4057 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4060 if (!mono_class_needs_cctor_run (vtable->klass, method))
4063 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4064 /* The initialization is already done before the method is called */
4071 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4075 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4077 mono_class_init (klass);
4078 size = mono_class_array_element_size (klass);
4080 mult_reg = alloc_preg (cfg);
4081 array_reg = arr->dreg;
4082 index_reg = index->dreg;
4084 #if SIZEOF_REGISTER == 8
4085 /* The array reg is 64 bits but the index reg is only 32 */
4086 if (COMPILE_LLVM (cfg)) {
4088 index2_reg = index_reg;
4090 index2_reg = alloc_preg (cfg);
4091 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4094 if (index->type == STACK_I8) {
4095 index2_reg = alloc_preg (cfg);
4096 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4098 index2_reg = index_reg;
4103 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4105 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4106 if (size == 1 || size == 2 || size == 4 || size == 8) {
4107 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4109 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4110 ins->klass = mono_class_get_element_class (klass);
4111 ins->type = STACK_MP;
4117 add_reg = alloc_ireg_mp (cfg);
4119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4120 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4121 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4122 ins->klass = mono_class_get_element_class (klass);
4123 ins->type = STACK_MP;
4124 MONO_ADD_INS (cfg->cbb, ins);
4129 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4131 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4133 int bounds_reg = alloc_preg (cfg);
4134 int add_reg = alloc_ireg_mp (cfg);
4135 int mult_reg = alloc_preg (cfg);
4136 int mult2_reg = alloc_preg (cfg);
4137 int low1_reg = alloc_preg (cfg);
4138 int low2_reg = alloc_preg (cfg);
4139 int high1_reg = alloc_preg (cfg);
4140 int high2_reg = alloc_preg (cfg);
4141 int realidx1_reg = alloc_preg (cfg);
4142 int realidx2_reg = alloc_preg (cfg);
4143 int sum_reg = alloc_preg (cfg);
4148 mono_class_init (klass);
4149 size = mono_class_array_element_size (klass);
4151 index1 = index_ins1->dreg;
4152 index2 = index_ins2->dreg;
4154 /* range checking */
4155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4156 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4158 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4159 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4160 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4161 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4162 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4163 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4164 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4166 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4167 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4168 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4169 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4170 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4171 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4172 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4174 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4175 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4176 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4177 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4178 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4180 ins->type = STACK_MP;
4182 MONO_ADD_INS (cfg->cbb, ins);
4189 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4193 MonoMethod *addr_method;
4196 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4199 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4201 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4202 /* emit_ldelema_2 depends on OP_LMUL */
4203 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4204 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4208 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4209 addr_method = mono_marshal_get_array_address (rank, element_size);
4210 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4215 static MonoBreakPolicy
4216 always_insert_breakpoint (MonoMethod *method)
4218 return MONO_BREAK_POLICY_ALWAYS;
4221 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4224 * mono_set_break_policy:
4225 * policy_callback: the new callback function
4227 * Allow embedders to decide wherther to actually obey breakpoint instructions
4228 * (both break IL instructions and Debugger.Break () method calls), for example
4229 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4230 * untrusted or semi-trusted code.
4232 * @policy_callback will be called every time a break point instruction needs to
4233 * be inserted with the method argument being the method that calls Debugger.Break()
4234 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4235 * if it wants the breakpoint to not be effective in the given method.
4236 * #MONO_BREAK_POLICY_ALWAYS is the default.
4239 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4241 if (policy_callback)
4242 break_policy_func = policy_callback;
4244 break_policy_func = always_insert_breakpoint;
4248 should_insert_brekpoint (MonoMethod *method) {
4249 switch (break_policy_func (method)) {
4250 case MONO_BREAK_POLICY_ALWAYS:
4252 case MONO_BREAK_POLICY_NEVER:
4254 case MONO_BREAK_POLICY_ON_DBG:
4255 return mono_debug_using_mono_debugger ();
4257 g_warning ("Incorrect value returned from break policy callback");
4262 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4264 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4266 MonoInst *addr, *store, *load;
4267 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4269 /* the bounds check is already done by the callers */
4270 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4272 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4273 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4275 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4276 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4282 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4284 MonoInst *ins = NULL;
4285 #ifdef MONO_ARCH_SIMD_INTRINSICS
4286 if (cfg->opt & MONO_OPT_SIMD) {
4287 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4297 emit_memory_barrier (MonoCompile *cfg, int kind)
4299 MonoInst *ins = NULL;
4300 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4301 MONO_ADD_INS (cfg->cbb, ins);
4302 ins->backend.memory_barrier_kind = kind;
4308 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4310 MonoInst *ins = NULL;
4312 static MonoClass *runtime_helpers_class = NULL;
4313 if (! runtime_helpers_class)
4314 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4315 "System.Runtime.CompilerServices", "RuntimeHelpers");
4317 if (cmethod->klass == mono_defaults.string_class) {
4318 if (strcmp (cmethod->name, "get_Chars") == 0) {
4319 int dreg = alloc_ireg (cfg);
4320 int index_reg = alloc_preg (cfg);
4321 int mult_reg = alloc_preg (cfg);
4322 int add_reg = alloc_preg (cfg);
4324 #if SIZEOF_REGISTER == 8
4325 /* The array reg is 64 bits but the index reg is only 32 */
4326 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4328 index_reg = args [1]->dreg;
4330 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4332 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4333 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4334 add_reg = ins->dreg;
4335 /* Avoid a warning */
4337 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4341 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4342 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4343 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4345 type_from_op (ins, NULL, NULL);
4347 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4348 int dreg = alloc_ireg (cfg);
4349 /* Decompose later to allow more optimizations */
4350 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4351 ins->type = STACK_I4;
4352 ins->flags |= MONO_INST_FAULT;
4353 cfg->cbb->has_array_access = TRUE;
4354 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4357 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4358 int mult_reg = alloc_preg (cfg);
4359 int add_reg = alloc_preg (cfg);
4361 /* The corlib functions check for oob already. */
4362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4363 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4365 return cfg->cbb->last_ins;
4368 } else if (cmethod->klass == mono_defaults.object_class) {
4370 if (strcmp (cmethod->name, "GetType") == 0) {
4371 int dreg = alloc_ireg_ref (cfg);
4372 int vt_reg = alloc_preg (cfg);
4373 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4374 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4375 type_from_op (ins, NULL, NULL);
4378 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4379 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4380 int dreg = alloc_ireg (cfg);
4381 int t1 = alloc_ireg (cfg);
4383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4384 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4385 ins->type = STACK_I4;
4389 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4390 MONO_INST_NEW (cfg, ins, OP_NOP);
4391 MONO_ADD_INS (cfg->cbb, ins);
4395 } else if (cmethod->klass == mono_defaults.array_class) {
4396 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4397 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4399 #ifndef MONO_BIG_ARRAYS
4401 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4404 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4405 int dreg = alloc_ireg (cfg);
4406 int bounds_reg = alloc_ireg_mp (cfg);
4407 MonoBasicBlock *end_bb, *szarray_bb;
4408 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4410 NEW_BBLOCK (cfg, end_bb);
4411 NEW_BBLOCK (cfg, szarray_bb);
4413 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4414 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4416 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4417 /* Non-szarray case */
4419 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4420 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4422 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4423 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4424 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4425 MONO_START_BB (cfg, szarray_bb);
4428 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4429 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4431 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4432 MONO_START_BB (cfg, end_bb);
4434 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4435 ins->type = STACK_I4;
4441 if (cmethod->name [0] != 'g')
4444 if (strcmp (cmethod->name, "get_Rank") == 0) {
4445 int dreg = alloc_ireg (cfg);
4446 int vtable_reg = alloc_preg (cfg);
4447 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4448 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4449 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4450 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4451 type_from_op (ins, NULL, NULL);
4454 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4455 int dreg = alloc_ireg (cfg);
4457 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4458 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4459 type_from_op (ins, NULL, NULL);
4464 } else if (cmethod->klass == runtime_helpers_class) {
4466 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4467 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4471 } else if (cmethod->klass == mono_defaults.thread_class) {
4472 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4473 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4474 MONO_ADD_INS (cfg->cbb, ins);
4476 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4477 return emit_memory_barrier (cfg, FullBarrier);
4479 } else if (cmethod->klass == mono_defaults.monitor_class) {
4481 /* FIXME this should be integrated to the check below once we support the trampoline version */
4482 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4483 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4484 MonoMethod *fast_method = NULL;
4486 /* Avoid infinite recursion */
4487 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4490 fast_method = mono_monitor_get_fast_path (cmethod);
4494 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4498 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4499 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4502 if (COMPILE_LLVM (cfg)) {
4504 * Pass the argument normally, the LLVM backend will handle the
4505 * calling convention problems.
4507 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4509 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4510 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4511 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4512 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4515 return (MonoInst*)call;
4516 } else if (strcmp (cmethod->name, "Exit") == 0) {
4519 if (COMPILE_LLVM (cfg)) {
4520 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4522 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4523 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4524 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4525 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4528 return (MonoInst*)call;
4530 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4532 MonoMethod *fast_method = NULL;
4534 /* Avoid infinite recursion */
4535 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4536 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4537 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4540 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4541 strcmp (cmethod->name, "Exit") == 0)
4542 fast_method = mono_monitor_get_fast_path (cmethod);
4546 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4549 } else if (cmethod->klass->image == mono_defaults.corlib &&
4550 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4551 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4554 #if SIZEOF_REGISTER == 8
4555 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4556 /* 64 bit reads are already atomic */
4557 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4558 ins->dreg = mono_alloc_preg (cfg);
4559 ins->inst_basereg = args [0]->dreg;
4560 ins->inst_offset = 0;
4561 MONO_ADD_INS (cfg->cbb, ins);
4565 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4566 if (strcmp (cmethod->name, "Increment") == 0) {
4567 MonoInst *ins_iconst;
4570 if (fsig->params [0]->type == MONO_TYPE_I4)
4571 opcode = OP_ATOMIC_ADD_NEW_I4;
4572 #if SIZEOF_REGISTER == 8
4573 else if (fsig->params [0]->type == MONO_TYPE_I8)
4574 opcode = OP_ATOMIC_ADD_NEW_I8;
4577 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4578 ins_iconst->inst_c0 = 1;
4579 ins_iconst->dreg = mono_alloc_ireg (cfg);
4580 MONO_ADD_INS (cfg->cbb, ins_iconst);
4582 MONO_INST_NEW (cfg, ins, opcode);
4583 ins->dreg = mono_alloc_ireg (cfg);
4584 ins->inst_basereg = args [0]->dreg;
4585 ins->inst_offset = 0;
4586 ins->sreg2 = ins_iconst->dreg;
4587 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4588 MONO_ADD_INS (cfg->cbb, ins);
4590 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4591 MonoInst *ins_iconst;
4594 if (fsig->params [0]->type == MONO_TYPE_I4)
4595 opcode = OP_ATOMIC_ADD_NEW_I4;
4596 #if SIZEOF_REGISTER == 8
4597 else if (fsig->params [0]->type == MONO_TYPE_I8)
4598 opcode = OP_ATOMIC_ADD_NEW_I8;
4601 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4602 ins_iconst->inst_c0 = -1;
4603 ins_iconst->dreg = mono_alloc_ireg (cfg);
4604 MONO_ADD_INS (cfg->cbb, ins_iconst);
4606 MONO_INST_NEW (cfg, ins, opcode);
4607 ins->dreg = mono_alloc_ireg (cfg);
4608 ins->inst_basereg = args [0]->dreg;
4609 ins->inst_offset = 0;
4610 ins->sreg2 = ins_iconst->dreg;
4611 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4612 MONO_ADD_INS (cfg->cbb, ins);
4614 } else if (strcmp (cmethod->name, "Add") == 0) {
4617 if (fsig->params [0]->type == MONO_TYPE_I4)
4618 opcode = OP_ATOMIC_ADD_NEW_I4;
4619 #if SIZEOF_REGISTER == 8
4620 else if (fsig->params [0]->type == MONO_TYPE_I8)
4621 opcode = OP_ATOMIC_ADD_NEW_I8;
4625 MONO_INST_NEW (cfg, ins, opcode);
4626 ins->dreg = mono_alloc_ireg (cfg);
4627 ins->inst_basereg = args [0]->dreg;
4628 ins->inst_offset = 0;
4629 ins->sreg2 = args [1]->dreg;
4630 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4631 MONO_ADD_INS (cfg->cbb, ins);
4634 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4636 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4637 if (strcmp (cmethod->name, "Exchange") == 0) {
4639 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4641 if (fsig->params [0]->type == MONO_TYPE_I4)
4642 opcode = OP_ATOMIC_EXCHANGE_I4;
4643 #if SIZEOF_REGISTER == 8
4644 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4645 (fsig->params [0]->type == MONO_TYPE_I))
4646 opcode = OP_ATOMIC_EXCHANGE_I8;
4648 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4649 opcode = OP_ATOMIC_EXCHANGE_I4;
4654 MONO_INST_NEW (cfg, ins, opcode);
4655 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4656 ins->inst_basereg = args [0]->dreg;
4657 ins->inst_offset = 0;
4658 ins->sreg2 = args [1]->dreg;
4659 MONO_ADD_INS (cfg->cbb, ins);
4661 switch (fsig->params [0]->type) {
4663 ins->type = STACK_I4;
4667 ins->type = STACK_I8;
4669 case MONO_TYPE_OBJECT:
4670 ins->type = STACK_OBJ;
4673 g_assert_not_reached ();
4676 if (cfg->gen_write_barriers && is_ref)
4677 emit_write_barrier (cfg, args [0], args [1], -1);
4679 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4681 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4682 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4684 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4685 if (fsig->params [1]->type == MONO_TYPE_I4)
4687 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4688 size = sizeof (gpointer);
4689 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4692 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4693 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4694 ins->sreg1 = args [0]->dreg;
4695 ins->sreg2 = args [1]->dreg;
4696 ins->sreg3 = args [2]->dreg;
4697 ins->type = STACK_I4;
4698 MONO_ADD_INS (cfg->cbb, ins);
4699 } else if (size == 8) {
4700 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4701 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4702 ins->sreg1 = args [0]->dreg;
4703 ins->sreg2 = args [1]->dreg;
4704 ins->sreg3 = args [2]->dreg;
4705 ins->type = STACK_I8;
4706 MONO_ADD_INS (cfg->cbb, ins);
4708 /* g_assert_not_reached (); */
4710 if (cfg->gen_write_barriers && is_ref)
4711 emit_write_barrier (cfg, args [0], args [1], -1);
4713 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4717 } else if (cmethod->klass->image == mono_defaults.corlib) {
4718 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4719 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4720 if (should_insert_brekpoint (cfg->method)) {
4721 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4723 MONO_INST_NEW (cfg, ins, OP_NOP);
4724 MONO_ADD_INS (cfg->cbb, ins);
4728 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4729 && strcmp (cmethod->klass->name, "Environment") == 0) {
4731 EMIT_NEW_ICONST (cfg, ins, 1);
4733 EMIT_NEW_ICONST (cfg, ins, 0);
4737 } else if (cmethod->klass == mono_defaults.math_class) {
4739 * There is general branches code for Min/Max, but it does not work for
4741 * http://everything2.com/?node_id=1051618
4745 #ifdef MONO_ARCH_SIMD_INTRINSICS
4746 if (cfg->opt & MONO_OPT_SIMD) {
4747 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4753 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4757 * This entry point could be used later for arbitrary method
4760 inline static MonoInst*
4761 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4762 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4764 if (method->klass == mono_defaults.string_class) {
4765 /* managed string allocation support */
4766 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4767 MonoInst *iargs [2];
4768 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4769 MonoMethod *managed_alloc = NULL;
4771 g_assert (vtable); /*Should not fail since it System.String*/
4772 #ifndef MONO_CROSS_COMPILE
4773 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4777 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4778 iargs [1] = args [0];
4779 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4786 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4788 MonoInst *store, *temp;
4791 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4792 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4795 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4796 * would be different than the MonoInst's used to represent arguments, and
4797 * the ldelema implementation can't deal with that.
4798 * Solution: When ldelema is used on an inline argument, create a var for
4799 * it, emit ldelema on that var, and emit the saving code below in
4800 * inline_method () if needed.
4802 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4803 cfg->args [i] = temp;
4804 /* This uses cfg->args [i] which is set by the preceeding line */
4805 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4806 store->cil_code = sp [0]->cil_code;
4811 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4812 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4814 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4816 check_inline_called_method_name_limit (MonoMethod *called_method)
4819 static char *limit = NULL;
4821 if (limit == NULL) {
4822 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4824 if (limit_string != NULL)
4825 limit = limit_string;
4827 limit = (char *) "";
4830 if (limit [0] != '\0') {
4831 char *called_method_name = mono_method_full_name (called_method, TRUE);
4833 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4834 g_free (called_method_name);
4836 //return (strncmp_result <= 0);
4837 return (strncmp_result == 0);
4844 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4846 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4849 static char *limit = NULL;
4851 if (limit == NULL) {
4852 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4853 if (limit_string != NULL) {
4854 limit = limit_string;
4856 limit = (char *) "";
4860 if (limit [0] != '\0') {
4861 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4863 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4864 g_free (caller_method_name);
4866 //return (strncmp_result <= 0);
4867 return (strncmp_result == 0);
4875 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
4877 static double r8_0 = 0.0;
4880 switch (rvar->type) {
4882 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4885 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4890 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4893 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4894 ins->type = STACK_R8;
4895 ins->inst_p0 = (void*)&r8_0;
4896 ins->dreg = rvar->dreg;
4897 MONO_ADD_INS (cfg->cbb, ins);
4900 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
4903 g_assert_not_reached ();
4908 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4909 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4911 MonoInst *ins, *rvar = NULL;
4912 MonoMethodHeader *cheader;
4913 MonoBasicBlock *ebblock, *sbblock;
4915 MonoMethod *prev_inlined_method;
4916 MonoInst **prev_locals, **prev_args;
4917 MonoType **prev_arg_types;
4918 guint prev_real_offset;
4919 GHashTable *prev_cbb_hash;
4920 MonoBasicBlock **prev_cil_offset_to_bb;
4921 MonoBasicBlock *prev_cbb;
4922 unsigned char* prev_cil_start;
4923 guint32 prev_cil_offset_to_bb_len;
4924 MonoMethod *prev_current_method;
4925 MonoGenericContext *prev_generic_context;
4926 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4928 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4930 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4931 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4934 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4935 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4939 if (cfg->verbose_level > 2)
4940 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4942 if (!cmethod->inline_info) {
4943 cfg->stat_inlineable_methods++;
4944 cmethod->inline_info = 1;
4947 /* allocate local variables */
4948 cheader = mono_method_get_header (cmethod);
4950 if (cheader == NULL || mono_loader_get_last_error ()) {
4951 MonoLoaderError *error = mono_loader_get_last_error ();
4954 mono_metadata_free_mh (cheader);
4955 if (inline_always && error)
4956 mono_cfg_set_exception (cfg, error->exception_type);
4958 mono_loader_clear_error ();
4962 /*Must verify before creating locals as it can cause the JIT to assert.*/
4963 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4964 mono_metadata_free_mh (cheader);
4968 /* allocate space to store the return value */
4969 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4970 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4973 prev_locals = cfg->locals;
4974 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4975 for (i = 0; i < cheader->num_locals; ++i)
4976 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4978 /* allocate start and end blocks */
4979 /* This is needed so if the inline is aborted, we can clean up */
4980 NEW_BBLOCK (cfg, sbblock);
4981 sbblock->real_offset = real_offset;
4983 NEW_BBLOCK (cfg, ebblock);
4984 ebblock->block_num = cfg->num_bblocks++;
4985 ebblock->real_offset = real_offset;
4987 prev_args = cfg->args;
4988 prev_arg_types = cfg->arg_types;
4989 prev_inlined_method = cfg->inlined_method;
4990 cfg->inlined_method = cmethod;
4991 cfg->ret_var_set = FALSE;
4992 cfg->inline_depth ++;
4993 prev_real_offset = cfg->real_offset;
4994 prev_cbb_hash = cfg->cbb_hash;
4995 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4996 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4997 prev_cil_start = cfg->cil_start;
4998 prev_cbb = cfg->cbb;
4999 prev_current_method = cfg->current_method;
5000 prev_generic_context = cfg->generic_context;
5001 prev_ret_var_set = cfg->ret_var_set;
5003 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5006 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5008 ret_var_set = cfg->ret_var_set;
5010 cfg->inlined_method = prev_inlined_method;
5011 cfg->real_offset = prev_real_offset;
5012 cfg->cbb_hash = prev_cbb_hash;
5013 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5014 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5015 cfg->cil_start = prev_cil_start;
5016 cfg->locals = prev_locals;
5017 cfg->args = prev_args;
5018 cfg->arg_types = prev_arg_types;
5019 cfg->current_method = prev_current_method;
5020 cfg->generic_context = prev_generic_context;
5021 cfg->ret_var_set = prev_ret_var_set;
5022 cfg->inline_depth --;
5024 if ((costs >= 0 && costs < 60) || inline_always) {
5025 if (cfg->verbose_level > 2)
5026 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5028 cfg->stat_inlined_methods++;
5030 /* always add some code to avoid block split failures */
5031 MONO_INST_NEW (cfg, ins, OP_NOP);
5032 MONO_ADD_INS (prev_cbb, ins);
5034 prev_cbb->next_bb = sbblock;
5035 link_bblock (cfg, prev_cbb, sbblock);
5038 * Get rid of the begin and end bblocks if possible to aid local
5041 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5043 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5044 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5046 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5047 MonoBasicBlock *prev = ebblock->in_bb [0];
5048 mono_merge_basic_blocks (cfg, prev, ebblock);
5050 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5051 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5052 cfg->cbb = prev_cbb;
5056 * Its possible that the rvar is set in some prev bblock, but not in others.
5062 for (i = 0; i < ebblock->in_count; ++i) {
5063 bb = ebblock->in_bb [i];
5065 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5068 emit_init_rvar (cfg, rvar, fsig->ret);
5078 * If the inlined method contains only a throw, then the ret var is not
5079 * set, so set it to a dummy value.
5082 emit_init_rvar (cfg, rvar, fsig->ret);
5084 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5087 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5090 if (cfg->verbose_level > 2)
5091 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5092 cfg->exception_type = MONO_EXCEPTION_NONE;
5093 mono_loader_clear_error ();
5095 /* This gets rid of the newly added bblocks */
5096 cfg->cbb = prev_cbb;
5098 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5103 * Some of these comments may well be out-of-date.
5104 * Design decisions: we do a single pass over the IL code (and we do bblock
5105 * splitting/merging in the few cases when it's required: a back jump to an IL
5106 * address that was not already seen as bblock starting point).
5107 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5108 * Complex operations are decomposed in simpler ones right away. We need to let the
5109 * arch-specific code peek and poke inside this process somehow (except when the
5110 * optimizations can take advantage of the full semantic info of coarse opcodes).
5111 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5112 * MonoInst->opcode initially is the IL opcode or some simplification of that
5113 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5114 * opcode with value bigger than OP_LAST.
5115 * At this point the IR can be handed over to an interpreter, a dumb code generator
5116 * or to the optimizing code generator that will translate it to SSA form.
5118 * Profiling directed optimizations.
5119 * We may compile by default with few or no optimizations and instrument the code
5120 * or the user may indicate what methods to optimize the most either in a config file
5121 * or through repeated runs where the compiler applies offline the optimizations to
5122 * each method and then decides if it was worth it.
5125 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5126 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5127 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5128 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5129 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5130 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5131 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5132 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5134 /* offset from br.s -> br like opcodes */
5135 #define BIG_BRANCH_OFFSET 13
5138 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5140 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5142 return b == NULL || b == bb;
5146 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5148 unsigned char *ip = start;
5149 unsigned char *target;
5152 MonoBasicBlock *bblock;
5153 const MonoOpcode *opcode;
5156 cli_addr = ip - start;
5157 i = mono_opcode_value ((const guint8 **)&ip, end);
5160 opcode = &mono_opcodes [i];
5161 switch (opcode->argument) {
5162 case MonoInlineNone:
5165 case MonoInlineString:
5166 case MonoInlineType:
5167 case MonoInlineField:
5168 case MonoInlineMethod:
5171 case MonoShortInlineR:
5178 case MonoShortInlineVar:
5179 case MonoShortInlineI:
5182 case MonoShortInlineBrTarget:
5183 target = start + cli_addr + 2 + (signed char)ip [1];
5184 GET_BBLOCK (cfg, bblock, target);
5187 GET_BBLOCK (cfg, bblock, ip);
5189 case MonoInlineBrTarget:
5190 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5191 GET_BBLOCK (cfg, bblock, target);
5194 GET_BBLOCK (cfg, bblock, ip);
5196 case MonoInlineSwitch: {
5197 guint32 n = read32 (ip + 1);
5200 cli_addr += 5 + 4 * n;
5201 target = start + cli_addr;
5202 GET_BBLOCK (cfg, bblock, target);
5204 for (j = 0; j < n; ++j) {
5205 target = start + cli_addr + (gint32)read32 (ip);
5206 GET_BBLOCK (cfg, bblock, target);
5216 g_assert_not_reached ();
5219 if (i == CEE_THROW) {
5220 unsigned char *bb_start = ip - 1;
5222 /* Find the start of the bblock containing the throw */
5224 while ((bb_start >= start) && !bblock) {
5225 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5229 bblock->out_of_line = 1;
5238 static inline MonoMethod *
5239 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5243 if (m->wrapper_type != MONO_WRAPPER_NONE)
5244 return mono_method_get_wrapper_data (m, token);
5246 method = mono_get_method_full (m->klass->image, token, klass, context);
5251 static inline MonoMethod *
5252 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5254 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5256 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5262 static inline MonoClass*
5263 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5267 if (method->wrapper_type != MONO_WRAPPER_NONE)
5268 klass = mono_method_get_wrapper_data (method, token);
5270 klass = mono_class_get_full (method->klass->image, token, context);
5272 mono_class_init (klass);
5277 * Returns TRUE if the JIT should abort inlining because "callee"
5278 * is influenced by security attributes.
5281 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5285 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5289 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5290 if (result == MONO_JIT_SECURITY_OK)
5293 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5294 /* Generate code to throw a SecurityException before the actual call/link */
5295 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5298 NEW_ICONST (cfg, args [0], 4);
5299 NEW_METHODCONST (cfg, args [1], caller);
5300 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5301 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5302 /* don't hide previous results */
5303 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5304 cfg->exception_data = result;
5312 throw_exception (void)
5314 static MonoMethod *method = NULL;
5317 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5318 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5325 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5327 MonoMethod *thrower = throw_exception ();
5330 EMIT_NEW_PCONST (cfg, args [0], ex);
5331 mono_emit_method_call (cfg, thrower, args, NULL);
5335 * Return the original method is a wrapper is specified. We can only access
5336 * the custom attributes from the original method.
5339 get_original_method (MonoMethod *method)
5341 if (method->wrapper_type == MONO_WRAPPER_NONE)
5344 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5345 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5348 /* in other cases we need to find the original method */
5349 return mono_marshal_method_from_wrapper (method);
5353 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5354 MonoBasicBlock *bblock, unsigned char *ip)
5356 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5357 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5359 emit_throw_exception (cfg, ex);
5363 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5364 MonoBasicBlock *bblock, unsigned char *ip)
5366 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5367 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5369 emit_throw_exception (cfg, ex);
5373 * Check that the IL instructions at ip are the array initialization
5374 * sequence and return the pointer to the data and the size.
5377 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5380 * newarr[System.Int32]
5382 * ldtoken field valuetype ...
5383 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5385 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5386 guint32 token = read32 (ip + 7);
5387 guint32 field_token = read32 (ip + 2);
5388 guint32 field_index = field_token & 0xffffff;
5390 const char *data_ptr;
5392 MonoMethod *cmethod;
5393 MonoClass *dummy_class;
5394 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5400 *out_field_token = field_token;
5402 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5405 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5407 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5408 case MONO_TYPE_BOOLEAN:
5412 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5413 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5414 case MONO_TYPE_CHAR:
5424 return NULL; /* stupid ARM FP swapped format */
5434 if (size > mono_type_size (field->type, &dummy_align))
5437 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5438 if (!method->klass->image->dynamic) {
5439 field_index = read32 (ip + 2) & 0xffffff;
5440 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5441 data_ptr = mono_image_rva_map (method->klass->image, rva);
5442 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5443 /* for aot code we do the lookup on load */
5444 if (aot && data_ptr)
5445 return GUINT_TO_POINTER (rva);
5447 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5449 data_ptr = mono_field_get_data (field);
5457 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5459 char *method_fname = mono_method_full_name (method, TRUE);
5461 MonoMethodHeader *header = mono_method_get_header (method);
5463 if (header->code_size == 0)
5464 method_code = g_strdup ("method body is empty.");
5466 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5467 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5468 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5469 g_free (method_fname);
5470 g_free (method_code);
5471 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5475 set_exception_object (MonoCompile *cfg, MonoException *exception)
5477 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5478 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5479 cfg->exception_ptr = exception;
5483 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5485 return mini_type_is_reference (cfg, &klass->byval_arg);
5489 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5492 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5493 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5494 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5495 /* Optimize reg-reg moves away */
5497 * Can't optimize other opcodes, since sp[0] might point to
5498 * the last ins of a decomposed opcode.
5500 sp [0]->dreg = (cfg)->locals [n]->dreg;
5502 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5507 * ldloca inhibits many optimizations so try to get rid of it in common
5510 static inline unsigned char *
5511 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5520 local = read16 (ip + 2);
5524 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5525 gboolean skip = FALSE;
5527 /* From the INITOBJ case */
5528 token = read32 (ip + 2);
5529 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5530 CHECK_TYPELOAD (klass);
5531 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5532 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5533 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5534 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5547 is_exception_class (MonoClass *class)
5550 if (class == mono_defaults.exception_class)
5552 class = class->parent;
5558 * is_jit_optimizer_disabled:
5560 * Determine whenever M's assembly has a DebuggableAttribute with the
5561 * IsJITOptimizerDisabled flag set.
5564 is_jit_optimizer_disabled (MonoMethod *m)
5566 MonoAssembly *ass = m->klass->image->assembly;
5567 MonoCustomAttrInfo* attrs;
5568 static MonoClass *klass;
5570 gboolean val = FALSE;
5573 if (ass->jit_optimizer_disabled_inited)
5574 return ass->jit_optimizer_disabled;
5577 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5580 ass->jit_optimizer_disabled = FALSE;
5581 mono_memory_barrier ();
5582 ass->jit_optimizer_disabled_inited = TRUE;
5586 attrs = mono_custom_attrs_from_assembly (ass);
5588 for (i = 0; i < attrs->num_attrs; ++i) {
5589 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5592 MonoMethodSignature *sig;
5594 if (!attr->ctor || attr->ctor->klass != klass)
5596 /* Decode the attribute. See reflection.c */
5597 len = attr->data_size;
5598 p = (const char*)attr->data;
5599 g_assert (read16 (p) == 0x0001);
5602 // FIXME: Support named parameters
5603 sig = mono_method_signature (attr->ctor);
5604 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5606 /* Two boolean arguments */
5610 mono_custom_attrs_free (attrs);
5613 ass->jit_optimizer_disabled = val;
5614 mono_memory_barrier ();
5615 ass->jit_optimizer_disabled_inited = TRUE;
5621 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5623 gboolean supported_tail_call;
5626 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5627 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5629 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5632 for (i = 0; i < fsig->param_count; ++i) {
5633 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5634 /* These can point to the current method's stack */
5635 supported_tail_call = FALSE;
5637 if (fsig->hasthis && cmethod->klass->valuetype)
5638 /* this might point to the current method's stack */
5639 supported_tail_call = FALSE;
5640 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5641 supported_tail_call = FALSE;
5642 if (cfg->method->save_lmf)
5643 supported_tail_call = FALSE;
5644 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5645 supported_tail_call = FALSE;
5647 /* Debugging support */
5649 if (supported_tail_call) {
5650 if (!mono_debug_count ())
5651 supported_tail_call = FALSE;
5655 return supported_tail_call;
5658 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5659 * it to the thread local value based on the tls_offset field. Every other kind of access to
5660 * the field causes an assert.
5663 is_magic_tls_access (MonoClassField *field)
5665 if (strcmp (field->name, "tlsdata"))
5667 if (strcmp (field->parent->name, "ThreadLocal`1"))
5669 return field->parent->image == mono_defaults.corlib;
5672 /* emits the code needed to access a managed tls var (like ThreadStatic)
5673 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5674 * pointer for the current thread.
5675 * Returns the MonoInst* representing the address of the tls var.
5678 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5681 int static_data_reg, array_reg, dreg;
5682 int offset2_reg, idx_reg;
5683 // inlined access to the tls data
5684 // idx = (offset >> 24) - 1;
5685 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5686 static_data_reg = alloc_ireg (cfg);
5687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5688 idx_reg = alloc_ireg (cfg);
5689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5692 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5693 array_reg = alloc_ireg (cfg);
5694 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5695 offset2_reg = alloc_ireg (cfg);
5696 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5697 dreg = alloc_ireg (cfg);
5698 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5703 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5704 * this address is cached per-method in cached_tls_addr.
5707 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5709 MonoInst *load, *addr, *temp, *store, *thread_ins;
5710 MonoClassField *offset_field;
5712 if (*cached_tls_addr) {
5713 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5716 thread_ins = mono_get_thread_intrinsic (cfg);
5717 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5719 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5721 MONO_ADD_INS (cfg->cbb, thread_ins);
5723 MonoMethod *thread_method;
5724 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5725 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5727 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5728 addr->klass = mono_class_from_mono_type (tls_field->type);
5729 addr->type = STACK_MP;
5730 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5731 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5733 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5738 * mono_method_to_ir:
5740 * Translate the .net IL into linear IR.
5743 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5744 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5745 guint inline_offset, gboolean is_virtual_call)
5748 MonoInst *ins, **sp, **stack_start;
5749 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5750 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5751 MonoMethod *cmethod, *method_definition;
5752 MonoInst **arg_array;
5753 MonoMethodHeader *header;
5755 guint32 token, ins_flag;
5757 MonoClass *constrained_call = NULL;
5758 unsigned char *ip, *end, *target, *err_pos;
5759 static double r8_0 = 0.0;
5760 MonoMethodSignature *sig;
5761 MonoGenericContext *generic_context = NULL;
5762 MonoGenericContainer *generic_container = NULL;
5763 MonoType **param_types;
5764 int i, n, start_new_bblock, dreg;
5765 int num_calls = 0, inline_costs = 0;
5766 int breakpoint_id = 0;
5768 MonoBoolean security, pinvoke;
5769 MonoSecurityManager* secman = NULL;
5770 MonoDeclSecurityActions actions;
5771 GSList *class_inits = NULL;
5772 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5774 gboolean init_locals, seq_points, skip_dead_blocks;
5775 gboolean disable_inline, sym_seq_points = FALSE;
5776 MonoInst *cached_tls_addr = NULL;
5777 MonoDebugMethodInfo *minfo;
5778 MonoBitSet *seq_point_locs = NULL;
5780 disable_inline = is_jit_optimizer_disabled (method);
5782 /* serialization and xdomain stuff may need access to private fields and methods */
5783 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5784 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5785 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5786 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5787 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5788 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5790 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5792 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5793 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5794 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5795 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5796 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5798 image = method->klass->image;
5799 header = mono_method_get_header (method);
5801 MonoLoaderError *error;
5803 if ((error = mono_loader_get_last_error ())) {
5804 mono_cfg_set_exception (cfg, error->exception_type);
5806 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5807 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5809 goto exception_exit;
5811 generic_container = mono_method_get_generic_container (method);
5812 sig = mono_method_signature (method);
5813 num_args = sig->hasthis + sig->param_count;
5814 ip = (unsigned char*)header->code;
5815 cfg->cil_start = ip;
5816 end = ip + header->code_size;
5817 cfg->stat_cil_code_size += header->code_size;
5818 init_locals = header->init_locals;
5820 seq_points = cfg->gen_seq_points && cfg->method == method;
5822 if (cfg->gen_seq_points && cfg->method == method) {
5823 minfo = mono_debug_lookup_method (method);
5825 int i, n_il_offsets;
5829 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
5830 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
5831 sym_seq_points = TRUE;
5832 for (i = 0; i < n_il_offsets; ++i) {
5833 if (il_offsets [i] < header->code_size)
5834 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
5840 * Methods without init_locals set could cause asserts in various passes
5845 method_definition = method;
5846 while (method_definition->is_inflated) {
5847 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5848 method_definition = imethod->declaring;
5851 /* SkipVerification is not allowed if core-clr is enabled */
5852 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5854 dont_verify_stloc = TRUE;
5857 if (mono_debug_using_mono_debugger ())
5858 cfg->keep_cil_nops = TRUE;
5860 if (sig->is_inflated)
5861 generic_context = mono_method_get_context (method);
5862 else if (generic_container)
5863 generic_context = &generic_container->context;
5864 cfg->generic_context = generic_context;
5866 if (!cfg->generic_sharing_context)
5867 g_assert (!sig->has_type_parameters);
5869 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5870 g_assert (method->is_inflated);
5871 g_assert (mono_method_get_context (method)->method_inst);
5873 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5874 g_assert (sig->generic_param_count);
5876 if (cfg->method == method) {
5877 cfg->real_offset = 0;
5879 cfg->real_offset = inline_offset;
5882 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5883 cfg->cil_offset_to_bb_len = header->code_size;
5885 cfg->current_method = method;
5887 if (cfg->verbose_level > 2)
5888 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5890 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5892 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5893 for (n = 0; n < sig->param_count; ++n)
5894 param_types [n + sig->hasthis] = sig->params [n];
5895 cfg->arg_types = param_types;
5897 dont_inline = g_list_prepend (dont_inline, method);
5898 if (cfg->method == method) {
5900 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5901 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5904 NEW_BBLOCK (cfg, start_bblock);
5905 cfg->bb_entry = start_bblock;
5906 start_bblock->cil_code = NULL;
5907 start_bblock->cil_length = 0;
5908 #if defined(__native_client_codegen__)
5909 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5910 ins->dreg = alloc_dreg (cfg, STACK_I4);
5911 MONO_ADD_INS (start_bblock, ins);
5915 NEW_BBLOCK (cfg, end_bblock);
5916 cfg->bb_exit = end_bblock;
5917 end_bblock->cil_code = NULL;
5918 end_bblock->cil_length = 0;
5919 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5920 g_assert (cfg->num_bblocks == 2);
5922 arg_array = cfg->args;
5924 if (header->num_clauses) {
5925 cfg->spvars = g_hash_table_new (NULL, NULL);
5926 cfg->exvars = g_hash_table_new (NULL, NULL);
5928 /* handle exception clauses */
5929 for (i = 0; i < header->num_clauses; ++i) {
5930 MonoBasicBlock *try_bb;
5931 MonoExceptionClause *clause = &header->clauses [i];
5932 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5933 try_bb->real_offset = clause->try_offset;
5934 try_bb->try_start = TRUE;
5935 try_bb->region = ((i + 1) << 8) | clause->flags;
5936 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5937 tblock->real_offset = clause->handler_offset;
5938 tblock->flags |= BB_EXCEPTION_HANDLER;
5940 link_bblock (cfg, try_bb, tblock);
5942 if (*(ip + clause->handler_offset) == CEE_POP)
5943 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5945 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5946 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5947 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5948 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5949 MONO_ADD_INS (tblock, ins);
5951 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5952 /* finally clauses already have a seq point */
5953 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5954 MONO_ADD_INS (tblock, ins);
5957 /* todo: is a fault block unsafe to optimize? */
5958 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5959 tblock->flags |= BB_EXCEPTION_UNSAFE;
5963 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5965 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5967 /* catch and filter blocks get the exception object on the stack */
5968 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5969 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5970 MonoInst *dummy_use;
5972 /* mostly like handle_stack_args (), but just sets the input args */
5973 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5974 tblock->in_scount = 1;
5975 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5976 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5979 * Add a dummy use for the exvar so its liveness info will be
5983 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5985 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5986 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5987 tblock->flags |= BB_EXCEPTION_HANDLER;
5988 tblock->real_offset = clause->data.filter_offset;
5989 tblock->in_scount = 1;
5990 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5991 /* The filter block shares the exvar with the handler block */
5992 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5993 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5994 MONO_ADD_INS (tblock, ins);
5998 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5999 clause->data.catch_class &&
6000 cfg->generic_sharing_context &&
6001 mono_class_check_context_used (clause->data.catch_class)) {
6003 * In shared generic code with catch
6004 * clauses containing type variables
6005 * the exception handling code has to
6006 * be able to get to the rgctx.
6007 * Therefore we have to make sure that
6008 * the vtable/mrgctx argument (for
6009 * static or generic methods) or the
6010 * "this" argument (for non-static
6011 * methods) are live.
6013 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6014 mini_method_get_context (method)->method_inst ||
6015 method->klass->valuetype) {
6016 mono_get_vtable_var (cfg);
6018 MonoInst *dummy_use;
6020 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6025 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6026 cfg->cbb = start_bblock;
6027 cfg->args = arg_array;
6028 mono_save_args (cfg, sig, inline_args);
6031 /* FIRST CODE BLOCK */
6032 NEW_BBLOCK (cfg, bblock);
6033 bblock->cil_code = ip;
6037 ADD_BBLOCK (cfg, bblock);
6039 if (cfg->method == method) {
6040 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6041 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6042 MONO_INST_NEW (cfg, ins, OP_BREAK);
6043 MONO_ADD_INS (bblock, ins);
6047 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6048 secman = mono_security_manager_get_methods ();
6050 security = (secman && mono_method_has_declsec (method));
6051 /* at this point having security doesn't mean we have any code to generate */
6052 if (security && (cfg->method == method)) {
6053 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6054 * And we do not want to enter the next section (with allocation) if we
6055 * have nothing to generate */
6056 security = mono_declsec_get_demands (method, &actions);
6059 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6060 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6062 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6063 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6064 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6066 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6067 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6071 mono_custom_attrs_free (custom);
6074 custom = mono_custom_attrs_from_class (wrapped->klass);
6075 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6079 mono_custom_attrs_free (custom);
6082 /* not a P/Invoke after all */
6087 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6088 /* we use a separate basic block for the initialization code */
6089 NEW_BBLOCK (cfg, init_localsbb);
6090 cfg->bb_init = init_localsbb;
6091 init_localsbb->real_offset = cfg->real_offset;
6092 start_bblock->next_bb = init_localsbb;
6093 init_localsbb->next_bb = bblock;
6094 link_bblock (cfg, start_bblock, init_localsbb);
6095 link_bblock (cfg, init_localsbb, bblock);
6097 cfg->cbb = init_localsbb;
6099 start_bblock->next_bb = bblock;
6100 link_bblock (cfg, start_bblock, bblock);
6103 /* at this point we know, if security is TRUE, that some code needs to be generated */
6104 if (security && (cfg->method == method)) {
6107 cfg->stat_cas_demand_generation++;
6109 if (actions.demand.blob) {
6110 /* Add code for SecurityAction.Demand */
6111 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6112 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6113 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6114 mono_emit_method_call (cfg, secman->demand, args, NULL);
6116 if (actions.noncasdemand.blob) {
6117 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6118 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6119 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6120 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6121 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6122 mono_emit_method_call (cfg, secman->demand, args, NULL);
6124 if (actions.demandchoice.blob) {
6125 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6126 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6127 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6128 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6129 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6133 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6135 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6138 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6139 /* check if this is native code, e.g. an icall or a p/invoke */
6140 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6141 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6143 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6144 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6146 /* if this ia a native call then it can only be JITted from platform code */
6147 if ((icall || pinvk) && method->klass && method->klass->image) {
6148 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6149 MonoException *ex = icall ? mono_get_exception_security () :
6150 mono_get_exception_method_access ();
6151 emit_throw_exception (cfg, ex);
6158 if (header->code_size == 0)
6161 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6166 if (cfg->method == method)
6167 mono_debug_init_method (cfg, bblock, breakpoint_id);
6169 for (n = 0; n < header->num_locals; ++n) {
6170 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6175 /* We force the vtable variable here for all shared methods
6176 for the possibility that they might show up in a stack
6177 trace where their exact instantiation is needed. */
6178 if (cfg->generic_sharing_context && method == cfg->method) {
6179 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6180 mini_method_get_context (method)->method_inst ||
6181 method->klass->valuetype) {
6182 mono_get_vtable_var (cfg);
6184 /* FIXME: Is there a better way to do this?
6185 We need the variable live for the duration
6186 of the whole method. */
6187 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6191 /* add a check for this != NULL to inlined methods */
6192 if (is_virtual_call) {
6195 NEW_ARGLOAD (cfg, arg_ins, 0);
6196 MONO_ADD_INS (cfg->cbb, arg_ins);
6197 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6200 skip_dead_blocks = !dont_verify;
6201 if (skip_dead_blocks) {
6202 original_bb = bb = mono_basic_block_split (method, &error);
6203 if (!mono_error_ok (&error)) {
6204 mono_error_cleanup (&error);
6210 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6211 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6214 start_new_bblock = 0;
6217 if (cfg->method == method)
6218 cfg->real_offset = ip - header->code;
6220 cfg->real_offset = inline_offset;
6225 if (start_new_bblock) {
6226 bblock->cil_length = ip - bblock->cil_code;
6227 if (start_new_bblock == 2) {
6228 g_assert (ip == tblock->cil_code);
6230 GET_BBLOCK (cfg, tblock, ip);
6232 bblock->next_bb = tblock;
6235 start_new_bblock = 0;
6236 for (i = 0; i < bblock->in_scount; ++i) {
6237 if (cfg->verbose_level > 3)
6238 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6239 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6243 g_slist_free (class_inits);
6246 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6247 link_bblock (cfg, bblock, tblock);
6248 if (sp != stack_start) {
6249 handle_stack_args (cfg, stack_start, sp - stack_start);
6251 CHECK_UNVERIFIABLE (cfg);
6253 bblock->next_bb = tblock;
6256 for (i = 0; i < bblock->in_scount; ++i) {
6257 if (cfg->verbose_level > 3)
6258 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6259 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6262 g_slist_free (class_inits);
6267 if (skip_dead_blocks) {
6268 int ip_offset = ip - header->code;
6270 if (ip_offset == bb->end)
6274 int op_size = mono_opcode_size (ip, end);
6275 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6277 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6279 if (ip_offset + op_size == bb->end) {
6280 MONO_INST_NEW (cfg, ins, OP_NOP);
6281 MONO_ADD_INS (bblock, ins);
6282 start_new_bblock = 1;
6290 * Sequence points are points where the debugger can place a breakpoint.
6291 * Currently, we generate these automatically at points where the IL
6294 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6296 * Make methods interruptable at the beginning, and at the targets of
6297 * backward branches.
6298 * Also, do this at the start of every bblock in methods with clauses too,
6299 * to be able to handle instructions with inprecise control flow like
6301 * Backward branches are handled at the end of method-to-ir ().
6303 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6305 /* Avoid sequence points on empty IL like .volatile */
6306 // FIXME: Enable this
6307 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6308 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6309 MONO_ADD_INS (cfg->cbb, ins);
6312 bblock->real_offset = cfg->real_offset;
6314 if ((cfg->method == method) && cfg->coverage_info) {
6315 guint32 cil_offset = ip - header->code;
6316 cfg->coverage_info->data [cil_offset].cil_code = ip;
6318 /* TODO: Use an increment here */
6319 #if defined(TARGET_X86)
6320 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6321 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6323 MONO_ADD_INS (cfg->cbb, ins);
6325 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6326 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6330 if (cfg->verbose_level > 3)
6331 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6335 if (seq_points && !sym_seq_points && sp != stack_start) {
6337 * The C# compiler uses these nops to notify the JIT that it should
6338 * insert seq points.
6340 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6341 MONO_ADD_INS (cfg->cbb, ins);
6343 if (cfg->keep_cil_nops)
6344 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6346 MONO_INST_NEW (cfg, ins, OP_NOP);
6348 MONO_ADD_INS (bblock, ins);
6351 if (should_insert_brekpoint (cfg->method)) {
6352 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6354 MONO_INST_NEW (cfg, ins, OP_NOP);
6357 MONO_ADD_INS (bblock, ins);
6363 CHECK_STACK_OVF (1);
6364 n = (*ip)-CEE_LDARG_0;
6366 EMIT_NEW_ARGLOAD (cfg, ins, n);
6374 CHECK_STACK_OVF (1);
6375 n = (*ip)-CEE_LDLOC_0;
6377 EMIT_NEW_LOCLOAD (cfg, ins, n);
6386 n = (*ip)-CEE_STLOC_0;
6389 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6391 emit_stloc_ir (cfg, sp, header, n);
6398 CHECK_STACK_OVF (1);
6401 EMIT_NEW_ARGLOAD (cfg, ins, n);
6407 CHECK_STACK_OVF (1);
6410 NEW_ARGLOADA (cfg, ins, n);
6411 MONO_ADD_INS (cfg->cbb, ins);
6421 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6423 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6428 CHECK_STACK_OVF (1);
6431 EMIT_NEW_LOCLOAD (cfg, ins, n);
6435 case CEE_LDLOCA_S: {
6436 unsigned char *tmp_ip;
6438 CHECK_STACK_OVF (1);
6439 CHECK_LOCAL (ip [1]);
6441 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6447 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6456 CHECK_LOCAL (ip [1]);
6457 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6459 emit_stloc_ir (cfg, sp, header, ip [1]);
6464 CHECK_STACK_OVF (1);
6465 EMIT_NEW_PCONST (cfg, ins, NULL);
6466 ins->type = STACK_OBJ;
6471 CHECK_STACK_OVF (1);
6472 EMIT_NEW_ICONST (cfg, ins, -1);
6485 CHECK_STACK_OVF (1);
6486 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6492 CHECK_STACK_OVF (1);
6494 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6500 CHECK_STACK_OVF (1);
6501 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6507 CHECK_STACK_OVF (1);
6508 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6509 ins->type = STACK_I8;
6510 ins->dreg = alloc_dreg (cfg, STACK_I8);
6512 ins->inst_l = (gint64)read64 (ip);
6513 MONO_ADD_INS (bblock, ins);
6519 gboolean use_aotconst = FALSE;
6521 #ifdef TARGET_POWERPC
6522 /* FIXME: Clean this up */
6523 if (cfg->compile_aot)
6524 use_aotconst = TRUE;
6527 /* FIXME: we should really allocate this only late in the compilation process */
6528 f = mono_domain_alloc (cfg->domain, sizeof (float));
6530 CHECK_STACK_OVF (1);
6536 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6538 dreg = alloc_freg (cfg);
6539 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6540 ins->type = STACK_R8;
6542 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6543 ins->type = STACK_R8;
6544 ins->dreg = alloc_dreg (cfg, STACK_R8);
6546 MONO_ADD_INS (bblock, ins);
6556 gboolean use_aotconst = FALSE;
6558 #ifdef TARGET_POWERPC
6559 /* FIXME: Clean this up */
6560 if (cfg->compile_aot)
6561 use_aotconst = TRUE;
6564 /* FIXME: we should really allocate this only late in the compilation process */
6565 d = mono_domain_alloc (cfg->domain, sizeof (double));
6567 CHECK_STACK_OVF (1);
6573 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6575 dreg = alloc_freg (cfg);
6576 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6577 ins->type = STACK_R8;
6579 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6580 ins->type = STACK_R8;
6581 ins->dreg = alloc_dreg (cfg, STACK_R8);
6583 MONO_ADD_INS (bblock, ins);
6592 MonoInst *temp, *store;
6594 CHECK_STACK_OVF (1);
6598 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6599 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6601 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6604 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6617 if (sp [0]->type == STACK_R8)
6618 /* we need to pop the value from the x86 FP stack */
6619 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6628 if (stack_start != sp)
6630 token = read32 (ip + 1);
6631 /* FIXME: check the signature matches */
6632 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6634 if (!cmethod || mono_loader_get_last_error ())
6637 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6638 GENERIC_SHARING_FAILURE (CEE_JMP);
6640 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6641 CHECK_CFG_EXCEPTION;
6643 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6645 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6648 /* Handle tail calls similarly to calls */
6649 n = fsig->param_count + fsig->hasthis;
6651 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6652 call->method = cmethod;
6653 call->tail_call = TRUE;
6654 call->signature = mono_method_signature (cmethod);
6655 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6656 call->inst.inst_p0 = cmethod;
6657 for (i = 0; i < n; ++i)
6658 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6660 mono_arch_emit_call (cfg, call);
6661 MONO_ADD_INS (bblock, (MonoInst*)call);
6664 for (i = 0; i < num_args; ++i)
6665 /* Prevent arguments from being optimized away */
6666 arg_array [i]->flags |= MONO_INST_VOLATILE;
6668 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6669 ins = (MonoInst*)call;
6670 ins->inst_p0 = cmethod;
6671 MONO_ADD_INS (bblock, ins);
6675 start_new_bblock = 1;
6680 case CEE_CALLVIRT: {
6681 MonoInst *addr = NULL;
6682 MonoMethodSignature *fsig = NULL;
6684 int virtual = *ip == CEE_CALLVIRT;
6685 int calli = *ip == CEE_CALLI;
6686 gboolean pass_imt_from_rgctx = FALSE;
6687 MonoInst *imt_arg = NULL;
6688 gboolean pass_vtable = FALSE;
6689 gboolean pass_mrgctx = FALSE;
6690 MonoInst *vtable_arg = NULL;
6691 gboolean check_this = FALSE;
6692 gboolean supported_tail_call = FALSE;
6693 gboolean need_seq_point = FALSE;
6696 token = read32 (ip + 1);
6703 if (method->wrapper_type != MONO_WRAPPER_NONE)
6704 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6706 fsig = mono_metadata_parse_signature (image, token);
6708 n = fsig->param_count + fsig->hasthis;
6710 if (method->dynamic && fsig->pinvoke) {
6714 * This is a call through a function pointer using a pinvoke
6715 * signature. Have to create a wrapper and call that instead.
6716 * FIXME: This is very slow, need to create a wrapper at JIT time
6717 * instead based on the signature.
6719 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6720 EMIT_NEW_PCONST (cfg, args [1], fsig);
6722 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6725 MonoMethod *cil_method;
6727 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6728 if (constrained_call && cfg->verbose_level > 2)
6729 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6730 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6731 cil_method = cmethod;
6732 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6733 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6734 cfg->generic_sharing_context)) {
6735 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6737 } else if (constrained_call) {
6738 if (cfg->verbose_level > 2)
6739 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6741 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6743 * This is needed since get_method_constrained can't find
6744 * the method in klass representing a type var.
6745 * The type var is guaranteed to be a reference type in this
6748 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6749 cil_method = cmethod;
6750 g_assert (!cmethod->klass->valuetype);
6752 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6755 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6756 cil_method = cmethod;
6759 if (!cmethod || mono_loader_get_last_error ())
6761 if (!dont_verify && !cfg->skip_visibility) {
6762 MonoMethod *target_method = cil_method;
6763 if (method->is_inflated) {
6764 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6766 if (!mono_method_can_access_method (method_definition, target_method) &&
6767 !mono_method_can_access_method (method, cil_method))
6768 METHOD_ACCESS_FAILURE;
6771 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6772 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6774 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6775 /* MS.NET seems to silently convert this to a callvirt */
6780 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6781 * converts to a callvirt.
6783 * tests/bug-515884.il is an example of this behavior
6785 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6786 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6787 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6791 if (!cmethod->klass->inited)
6792 if (!mono_class_init (cmethod->klass))
6793 TYPE_LOAD_ERROR (cmethod->klass);
6795 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6796 mini_class_is_system_array (cmethod->klass)) {
6797 array_rank = cmethod->klass->rank;
6798 fsig = mono_method_signature (cmethod);
6800 fsig = mono_method_signature (cmethod);
6805 if (fsig->pinvoke) {
6806 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6807 check_for_pending_exc, FALSE);
6808 fsig = mono_method_signature (wrapper);
6809 } else if (constrained_call) {
6810 fsig = mono_method_signature (cmethod);
6812 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6816 mono_save_token_info (cfg, image, token, cil_method);
6818 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
6820 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
6821 * foo (bar (), baz ())
6822 * works correctly. MS does this also:
6823 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
6824 * The problem with this approach is that the debugger will stop after all calls returning a value,
6825 * even for simple cases, like:
6828 /* Special case a few common successor opcodes */
6829 if (!(ip + 5 < end && ip [5] == CEE_POP))
6830 need_seq_point = TRUE;
6833 n = fsig->param_count + fsig->hasthis;
6835 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6836 if (check_linkdemand (cfg, method, cmethod))
6838 CHECK_CFG_EXCEPTION;
6841 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6842 g_assert_not_reached ();
6845 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6848 if (!cfg->generic_sharing_context && cmethod)
6849 g_assert (!mono_method_check_context_used (cmethod));
6853 //g_assert (!virtual || fsig->hasthis);
6857 if (constrained_call) {
6859 * We have the `constrained.' prefix opcode.
6861 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6863 * The type parameter is instantiated as a valuetype,
6864 * but that type doesn't override the method we're
6865 * calling, so we need to box `this'.
6867 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6868 ins->klass = constrained_call;
6869 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6870 CHECK_CFG_EXCEPTION;
6871 } else if (!constrained_call->valuetype) {
6872 int dreg = alloc_ireg_ref (cfg);
6875 * The type parameter is instantiated as a reference
6876 * type. We have a managed pointer on the stack, so
6877 * we need to dereference it here.
6879 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6880 ins->type = STACK_OBJ;
6882 } else if (cmethod->klass->valuetype)
6884 constrained_call = NULL;
6887 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6891 * If the callee is a shared method, then its static cctor
6892 * might not get called after the call was patched.
6894 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6895 emit_generic_class_init (cfg, cmethod->klass);
6896 CHECK_TYPELOAD (cmethod->klass);
6899 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6900 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6901 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6902 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6903 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6906 * Pass vtable iff target method might
6907 * be shared, which means that sharing
6908 * is enabled for its class and its
6909 * context is sharable (and it's not a
6912 if (sharing_enabled && context_sharable &&
6913 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6917 if (cmethod && mini_method_get_context (cmethod) &&
6918 mini_method_get_context (cmethod)->method_inst) {
6919 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6920 MonoGenericContext *context = mini_method_get_context (cmethod);
6921 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6923 g_assert (!pass_vtable);
6925 if (sharing_enabled && context_sharable)
6929 if (cfg->generic_sharing_context && cmethod) {
6930 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6932 context_used = mono_method_check_context_used (cmethod);
6934 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6935 /* Generic method interface
6936 calls are resolved via a
6937 helper function and don't
6939 if (!cmethod_context || !cmethod_context->method_inst)
6940 pass_imt_from_rgctx = TRUE;
6944 * If a shared method calls another
6945 * shared method then the caller must
6946 * have a generic sharing context
6947 * because the magic trampoline
6948 * requires it. FIXME: We shouldn't
6949 * have to force the vtable/mrgctx
6950 * variable here. Instead there
6951 * should be a flag in the cfg to
6952 * request a generic sharing context.
6955 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6956 mono_get_vtable_var (cfg);
6961 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6963 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6965 CHECK_TYPELOAD (cmethod->klass);
6966 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6971 g_assert (!vtable_arg);
6973 if (!cfg->compile_aot) {
6975 * emit_get_rgctx_method () calls mono_class_vtable () so check
6976 * for type load errors before.
6978 mono_class_setup_vtable (cmethod->klass);
6979 CHECK_TYPELOAD (cmethod->klass);
6982 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6984 /* !marshalbyref is needed to properly handle generic methods + remoting */
6985 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6986 MONO_METHOD_IS_FINAL (cmethod)) &&
6987 !cmethod->klass->marshalbyref) {
6994 if (pass_imt_from_rgctx) {
6995 g_assert (!pass_vtable);
6998 imt_arg = emit_get_rgctx_method (cfg, context_used,
6999 cmethod, MONO_RGCTX_INFO_METHOD);
7003 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7005 /* Calling virtual generic methods */
7006 if (cmethod && virtual &&
7007 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7008 !(MONO_METHOD_IS_FINAL (cmethod) &&
7009 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7010 mono_method_signature (cmethod)->generic_param_count) {
7011 MonoInst *this_temp, *this_arg_temp, *store;
7012 MonoInst *iargs [4];
7014 g_assert (mono_method_signature (cmethod)->is_inflated);
7016 /* Prevent inlining of methods that contain indirect calls */
7019 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7020 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
7021 g_assert (!imt_arg);
7023 g_assert (cmethod->is_inflated);
7024 imt_arg = emit_get_rgctx_method (cfg, context_used,
7025 cmethod, MONO_RGCTX_INFO_METHOD);
7026 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
7030 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7031 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7032 MONO_ADD_INS (bblock, store);
7034 /* FIXME: This should be a managed pointer */
7035 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7037 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7038 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7039 cmethod, MONO_RGCTX_INFO_METHOD);
7040 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7041 addr = mono_emit_jit_icall (cfg,
7042 mono_helper_compile_generic_method, iargs);
7044 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7046 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7049 if (!MONO_TYPE_IS_VOID (fsig->ret))
7050 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7052 CHECK_CFG_EXCEPTION;
7057 emit_seq_point (cfg, method, ip, FALSE);
7062 * Implement a workaround for the inherent races involved in locking:
7068 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7069 * try block, the Exit () won't be executed, see:
7070 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7071 * To work around this, we extend such try blocks to include the last x bytes
7072 * of the Monitor.Enter () call.
7074 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7075 MonoBasicBlock *tbb;
7077 GET_BBLOCK (cfg, tbb, ip + 5);
7079 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7080 * from Monitor.Enter like ArgumentNullException.
7082 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7083 /* Mark this bblock as needing to be extended */
7084 tbb->extend_try_block = TRUE;
7088 /* Conversion to a JIT intrinsic */
7089 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7091 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7092 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7097 CHECK_CFG_EXCEPTION;
7102 emit_seq_point (cfg, method, ip, FALSE);
7107 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7108 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7109 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7110 !g_list_find (dont_inline, cmethod)) {
7112 gboolean always = FALSE;
7114 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7115 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7116 /* Prevent inlining of methods that call wrappers */
7118 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7122 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7124 cfg->real_offset += 5;
7127 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7128 /* *sp is already set by inline_method */
7132 inline_costs += costs;
7135 emit_seq_point (cfg, method, ip, FALSE);
7140 inline_costs += 10 * num_calls++;
7142 /* Tail recursion elimination */
7143 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7144 gboolean has_vtargs = FALSE;
7147 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7150 /* keep it simple */
7151 for (i = fsig->param_count - 1; i >= 0; i--) {
7152 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7157 for (i = 0; i < n; ++i)
7158 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7159 MONO_INST_NEW (cfg, ins, OP_BR);
7160 MONO_ADD_INS (bblock, ins);
7161 tblock = start_bblock->out_bb [0];
7162 link_bblock (cfg, bblock, tblock);
7163 ins->inst_target_bb = tblock;
7164 start_new_bblock = 1;
7166 /* skip the CEE_RET, too */
7167 if (ip_in_bb (cfg, bblock, ip + 5))
7177 /* Generic sharing */
7178 /* FIXME: only do this for generic methods if
7179 they are not shared! */
7180 if (context_used && !imt_arg && !array_rank &&
7181 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7182 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7183 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7184 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7187 g_assert (cfg->generic_sharing_context && cmethod);
7191 * We are compiling a call to a
7192 * generic method from shared code,
7193 * which means that we have to look up
7194 * the method in the rgctx and do an
7197 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7200 /* Indirect calls */
7202 g_assert (!imt_arg);
7204 if (*ip == CEE_CALL)
7205 g_assert (context_used);
7206 else if (*ip == CEE_CALLI)
7207 g_assert (!vtable_arg);
7209 /* FIXME: what the hell is this??? */
7210 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7211 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7213 /* Prevent inlining of methods with indirect calls */
7217 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7219 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7221 * Instead of emitting an indirect call, emit a direct call
7222 * with the contents of the aotconst as the patch info.
7224 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7226 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7227 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7230 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7233 if (!MONO_TYPE_IS_VOID (fsig->ret))
7234 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7236 CHECK_CFG_EXCEPTION;
7241 emit_seq_point (cfg, method, ip, FALSE);
7249 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7250 MonoInst *val = sp [fsig->param_count];
7252 if (val->type == STACK_OBJ) {
7253 MonoInst *iargs [2];
7258 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7261 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7262 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7263 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7264 emit_write_barrier (cfg, addr, val, 0);
7265 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7266 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7268 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7271 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7272 if (!cmethod->klass->element_class->valuetype && !readonly)
7273 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7274 CHECK_TYPELOAD (cmethod->klass);
7277 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7280 g_assert_not_reached ();
7283 CHECK_CFG_EXCEPTION;
7287 emit_seq_point (cfg, method, ip, FALSE);
7291 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7293 if (!MONO_TYPE_IS_VOID (fsig->ret))
7294 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7296 CHECK_CFG_EXCEPTION;
7301 emit_seq_point (cfg, method, ip, FALSE);
7305 /* Tail prefix / tail call optimization */
7307 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7308 /* FIXME: runtime generic context pointer for jumps? */
7309 /* FIXME: handle this for generic sharing eventually */
7310 supported_tail_call = cmethod &&
7311 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7312 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7313 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7315 if (supported_tail_call) {
7318 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7321 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7323 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7324 /* Handle tail calls similarly to calls */
7325 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7327 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7328 call->tail_call = TRUE;
7329 call->method = cmethod;
7330 call->signature = mono_method_signature (cmethod);
7333 * We implement tail calls by storing the actual arguments into the
7334 * argument variables, then emitting a CEE_JMP.
7336 for (i = 0; i < n; ++i) {
7337 /* Prevent argument from being register allocated */
7338 arg_array [i]->flags |= MONO_INST_VOLATILE;
7339 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7343 ins = (MonoInst*)call;
7344 ins->inst_p0 = cmethod;
7345 ins->inst_p1 = arg_array [0];
7346 MONO_ADD_INS (bblock, ins);
7347 link_bblock (cfg, bblock, end_bblock);
7348 start_new_bblock = 1;
7350 CHECK_CFG_EXCEPTION;
7355 // FIXME: Eliminate unreachable epilogs
7358 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7359 * only reachable from this call.
7361 GET_BBLOCK (cfg, tblock, ip);
7362 if (tblock == bblock || tblock->in_count == 0)
7368 * Synchronized wrappers.
7369 * Its hard to determine where to replace a method with its synchronized
7370 * wrapper without causing an infinite recursion. The current solution is
7371 * to add the synchronized wrapper in the trampolines, and to
7372 * change the called method to a dummy wrapper, and resolve that wrapper
7373 * to the real method in mono_jit_compile_method ().
7375 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod) {
7376 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7381 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7382 imt_arg, vtable_arg);
7384 if (!MONO_TYPE_IS_VOID (fsig->ret))
7385 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7387 CHECK_CFG_EXCEPTION;
7392 emit_seq_point (cfg, method, ip, FALSE);
7396 if (cfg->method != method) {
7397 /* return from inlined method */
7399 * If in_count == 0, that means the ret is unreachable due to
7400 * being preceeded by a throw. In that case, inline_method () will
7401 * handle setting the return value
7402 * (test case: test_0_inline_throw ()).
7404 if (return_var && cfg->cbb->in_count) {
7405 MonoType *ret_type = mono_method_signature (method)->ret;
7411 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7414 //g_assert (returnvar != -1);
7415 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7416 cfg->ret_var_set = TRUE;
7420 MonoType *ret_type = mono_method_signature (method)->ret;
7422 if (seq_points && !sym_seq_points) {
7424 * Place a seq point here too even through the IL stack is not
7425 * empty, so a step over on
7428 * will work correctly.
7430 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7431 MONO_ADD_INS (cfg->cbb, ins);
7434 g_assert (!return_var);
7438 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7441 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7444 if (!cfg->vret_addr) {
7447 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7449 EMIT_NEW_RETLOADA (cfg, ret_addr);
7451 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7452 ins->klass = mono_class_from_mono_type (ret_type);
7455 #ifdef MONO_ARCH_SOFT_FLOAT
7456 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7457 MonoInst *iargs [1];
7461 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7462 mono_arch_emit_setret (cfg, method, conv);
7464 mono_arch_emit_setret (cfg, method, *sp);
7467 mono_arch_emit_setret (cfg, method, *sp);
7472 if (sp != stack_start)
7474 MONO_INST_NEW (cfg, ins, OP_BR);
7476 ins->inst_target_bb = end_bblock;
7477 MONO_ADD_INS (bblock, ins);
7478 link_bblock (cfg, bblock, end_bblock);
7479 start_new_bblock = 1;
7483 MONO_INST_NEW (cfg, ins, OP_BR);
7485 target = ip + 1 + (signed char)(*ip);
7487 GET_BBLOCK (cfg, tblock, target);
7488 link_bblock (cfg, bblock, tblock);
7489 ins->inst_target_bb = tblock;
7490 if (sp != stack_start) {
7491 handle_stack_args (cfg, stack_start, sp - stack_start);
7493 CHECK_UNVERIFIABLE (cfg);
7495 MONO_ADD_INS (bblock, ins);
7496 start_new_bblock = 1;
7497 inline_costs += BRANCH_COST;
7511 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7513 target = ip + 1 + *(signed char*)ip;
7519 inline_costs += BRANCH_COST;
7523 MONO_INST_NEW (cfg, ins, OP_BR);
7526 target = ip + 4 + (gint32)read32(ip);
7528 GET_BBLOCK (cfg, tblock, target);
7529 link_bblock (cfg, bblock, tblock);
7530 ins->inst_target_bb = tblock;
7531 if (sp != stack_start) {
7532 handle_stack_args (cfg, stack_start, sp - stack_start);
7534 CHECK_UNVERIFIABLE (cfg);
7537 MONO_ADD_INS (bblock, ins);
7539 start_new_bblock = 1;
7540 inline_costs += BRANCH_COST;
7547 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7548 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7549 guint32 opsize = is_short ? 1 : 4;
7551 CHECK_OPSIZE (opsize);
7553 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7556 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7561 GET_BBLOCK (cfg, tblock, target);
7562 link_bblock (cfg, bblock, tblock);
7563 GET_BBLOCK (cfg, tblock, ip);
7564 link_bblock (cfg, bblock, tblock);
7566 if (sp != stack_start) {
7567 handle_stack_args (cfg, stack_start, sp - stack_start);
7568 CHECK_UNVERIFIABLE (cfg);
7571 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7572 cmp->sreg1 = sp [0]->dreg;
7573 type_from_op (cmp, sp [0], NULL);
7576 #if SIZEOF_REGISTER == 4
7577 if (cmp->opcode == OP_LCOMPARE_IMM) {
7578 /* Convert it to OP_LCOMPARE */
7579 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7580 ins->type = STACK_I8;
7581 ins->dreg = alloc_dreg (cfg, STACK_I8);
7583 MONO_ADD_INS (bblock, ins);
7584 cmp->opcode = OP_LCOMPARE;
7585 cmp->sreg2 = ins->dreg;
7588 MONO_ADD_INS (bblock, cmp);
7590 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7591 type_from_op (ins, sp [0], NULL);
7592 MONO_ADD_INS (bblock, ins);
7593 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7594 GET_BBLOCK (cfg, tblock, target);
7595 ins->inst_true_bb = tblock;
7596 GET_BBLOCK (cfg, tblock, ip);
7597 ins->inst_false_bb = tblock;
7598 start_new_bblock = 2;
7601 inline_costs += BRANCH_COST;
7616 MONO_INST_NEW (cfg, ins, *ip);
7618 target = ip + 4 + (gint32)read32(ip);
7624 inline_costs += BRANCH_COST;
7628 MonoBasicBlock **targets;
7629 MonoBasicBlock *default_bblock;
7630 MonoJumpInfoBBTable *table;
7631 int offset_reg = alloc_preg (cfg);
7632 int target_reg = alloc_preg (cfg);
7633 int table_reg = alloc_preg (cfg);
7634 int sum_reg = alloc_preg (cfg);
7635 gboolean use_op_switch;
7639 n = read32 (ip + 1);
7642 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7646 CHECK_OPSIZE (n * sizeof (guint32));
7647 target = ip + n * sizeof (guint32);
7649 GET_BBLOCK (cfg, default_bblock, target);
7650 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7652 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7653 for (i = 0; i < n; ++i) {
7654 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7655 targets [i] = tblock;
7656 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7660 if (sp != stack_start) {
7662 * Link the current bb with the targets as well, so handle_stack_args
7663 * will set their in_stack correctly.
7665 link_bblock (cfg, bblock, default_bblock);
7666 for (i = 0; i < n; ++i)
7667 link_bblock (cfg, bblock, targets [i]);
7669 handle_stack_args (cfg, stack_start, sp - stack_start);
7671 CHECK_UNVERIFIABLE (cfg);
7674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7675 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7678 for (i = 0; i < n; ++i)
7679 link_bblock (cfg, bblock, targets [i]);
7681 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7682 table->table = targets;
7683 table->table_size = n;
7685 use_op_switch = FALSE;
7687 /* ARM implements SWITCH statements differently */
7688 /* FIXME: Make it use the generic implementation */
7689 if (!cfg->compile_aot)
7690 use_op_switch = TRUE;
7693 if (COMPILE_LLVM (cfg))
7694 use_op_switch = TRUE;
7696 cfg->cbb->has_jump_table = 1;
7698 if (use_op_switch) {
7699 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7700 ins->sreg1 = src1->dreg;
7701 ins->inst_p0 = table;
7702 ins->inst_many_bb = targets;
7703 ins->klass = GUINT_TO_POINTER (n);
7704 MONO_ADD_INS (cfg->cbb, ins);
7706 if (sizeof (gpointer) == 8)
7707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7709 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7711 #if SIZEOF_REGISTER == 8
7712 /* The upper word might not be zero, and we add it to a 64 bit address later */
7713 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7716 if (cfg->compile_aot) {
7717 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7719 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7720 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7721 ins->inst_p0 = table;
7722 ins->dreg = table_reg;
7723 MONO_ADD_INS (cfg->cbb, ins);
7726 /* FIXME: Use load_memindex */
7727 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7729 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7731 start_new_bblock = 1;
7732 inline_costs += (BRANCH_COST * 2);
7752 dreg = alloc_freg (cfg);
7755 dreg = alloc_lreg (cfg);
7758 dreg = alloc_ireg_ref (cfg);
7761 dreg = alloc_preg (cfg);
7764 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7765 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7766 ins->flags |= ins_flag;
7768 MONO_ADD_INS (bblock, ins);
7770 if (ins->flags & MONO_INST_VOLATILE) {
7771 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7772 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7773 emit_memory_barrier (cfg, FullBarrier);
7788 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7789 ins->flags |= ins_flag;
7792 if (ins->flags & MONO_INST_VOLATILE) {
7793 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7794 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7795 emit_memory_barrier (cfg, FullBarrier);
7798 MONO_ADD_INS (bblock, ins);
7800 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7801 emit_write_barrier (cfg, sp [0], sp [1], -1);
7810 MONO_INST_NEW (cfg, ins, (*ip));
7812 ins->sreg1 = sp [0]->dreg;
7813 ins->sreg2 = sp [1]->dreg;
7814 type_from_op (ins, sp [0], sp [1]);
7816 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7818 /* Use the immediate opcodes if possible */
7819 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7820 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7821 if (imm_opcode != -1) {
7822 ins->opcode = imm_opcode;
7823 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7826 sp [1]->opcode = OP_NOP;
7830 MONO_ADD_INS ((cfg)->cbb, (ins));
7832 *sp++ = mono_decompose_opcode (cfg, ins);
7849 MONO_INST_NEW (cfg, ins, (*ip));
7851 ins->sreg1 = sp [0]->dreg;
7852 ins->sreg2 = sp [1]->dreg;
7853 type_from_op (ins, sp [0], sp [1]);
7855 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7856 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7858 /* FIXME: Pass opcode to is_inst_imm */
7860 /* Use the immediate opcodes if possible */
7861 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7864 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7865 if (imm_opcode != -1) {
7866 ins->opcode = imm_opcode;
7867 if (sp [1]->opcode == OP_I8CONST) {
7868 #if SIZEOF_REGISTER == 8
7869 ins->inst_imm = sp [1]->inst_l;
7871 ins->inst_ls_word = sp [1]->inst_ls_word;
7872 ins->inst_ms_word = sp [1]->inst_ms_word;
7876 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7879 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7880 if (sp [1]->next == NULL)
7881 sp [1]->opcode = OP_NOP;
7884 MONO_ADD_INS ((cfg)->cbb, (ins));
7886 *sp++ = mono_decompose_opcode (cfg, ins);
7899 case CEE_CONV_OVF_I8:
7900 case CEE_CONV_OVF_U8:
7904 /* Special case this earlier so we have long constants in the IR */
7905 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7906 int data = sp [-1]->inst_c0;
7907 sp [-1]->opcode = OP_I8CONST;
7908 sp [-1]->type = STACK_I8;
7909 #if SIZEOF_REGISTER == 8
7910 if ((*ip) == CEE_CONV_U8)
7911 sp [-1]->inst_c0 = (guint32)data;
7913 sp [-1]->inst_c0 = data;
7915 sp [-1]->inst_ls_word = data;
7916 if ((*ip) == CEE_CONV_U8)
7917 sp [-1]->inst_ms_word = 0;
7919 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7921 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7928 case CEE_CONV_OVF_I4:
7929 case CEE_CONV_OVF_I1:
7930 case CEE_CONV_OVF_I2:
7931 case CEE_CONV_OVF_I:
7932 case CEE_CONV_OVF_U:
7935 if (sp [-1]->type == STACK_R8) {
7936 ADD_UNOP (CEE_CONV_OVF_I8);
7943 case CEE_CONV_OVF_U1:
7944 case CEE_CONV_OVF_U2:
7945 case CEE_CONV_OVF_U4:
7948 if (sp [-1]->type == STACK_R8) {
7949 ADD_UNOP (CEE_CONV_OVF_U8);
7956 case CEE_CONV_OVF_I1_UN:
7957 case CEE_CONV_OVF_I2_UN:
7958 case CEE_CONV_OVF_I4_UN:
7959 case CEE_CONV_OVF_I8_UN:
7960 case CEE_CONV_OVF_U1_UN:
7961 case CEE_CONV_OVF_U2_UN:
7962 case CEE_CONV_OVF_U4_UN:
7963 case CEE_CONV_OVF_U8_UN:
7964 case CEE_CONV_OVF_I_UN:
7965 case CEE_CONV_OVF_U_UN:
7972 CHECK_CFG_EXCEPTION;
7976 case CEE_ADD_OVF_UN:
7978 case CEE_MUL_OVF_UN:
7980 case CEE_SUB_OVF_UN:
7988 token = read32 (ip + 1);
7989 klass = mini_get_class (method, token, generic_context);
7990 CHECK_TYPELOAD (klass);
7992 if (generic_class_is_reference_type (cfg, klass)) {
7993 MonoInst *store, *load;
7994 int dreg = alloc_ireg_ref (cfg);
7996 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7997 load->flags |= ins_flag;
7998 MONO_ADD_INS (cfg->cbb, load);
8000 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8001 store->flags |= ins_flag;
8002 MONO_ADD_INS (cfg->cbb, store);
8004 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8005 emit_write_barrier (cfg, sp [0], sp [1], -1);
8007 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8019 token = read32 (ip + 1);
8020 klass = mini_get_class (method, token, generic_context);
8021 CHECK_TYPELOAD (klass);
8023 /* Optimize the common ldobj+stloc combination */
8033 loc_index = ip [5] - CEE_STLOC_0;
8040 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8041 CHECK_LOCAL (loc_index);
8043 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8044 ins->dreg = cfg->locals [loc_index]->dreg;
8050 /* Optimize the ldobj+stobj combination */
8051 /* The reference case ends up being a load+store anyway */
8052 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8057 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8064 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8073 CHECK_STACK_OVF (1);
8075 n = read32 (ip + 1);
8077 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8078 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8079 ins->type = STACK_OBJ;
8082 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8083 MonoInst *iargs [1];
8085 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8086 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8088 if (cfg->opt & MONO_OPT_SHARED) {
8089 MonoInst *iargs [3];
8091 if (cfg->compile_aot) {
8092 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8094 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8095 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8096 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8097 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8098 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8100 if (bblock->out_of_line) {
8101 MonoInst *iargs [2];
8103 if (image == mono_defaults.corlib) {
8105 * Avoid relocations in AOT and save some space by using a
8106 * version of helper_ldstr specialized to mscorlib.
8108 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8109 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8111 /* Avoid creating the string object */
8112 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8113 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8114 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8118 if (cfg->compile_aot) {
8119 NEW_LDSTRCONST (cfg, ins, image, n);
8121 MONO_ADD_INS (bblock, ins);
8124 NEW_PCONST (cfg, ins, NULL);
8125 ins->type = STACK_OBJ;
8126 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8128 OUT_OF_MEMORY_FAILURE;
8131 MONO_ADD_INS (bblock, ins);
8140 MonoInst *iargs [2];
8141 MonoMethodSignature *fsig;
8144 MonoInst *vtable_arg = NULL;
8147 token = read32 (ip + 1);
8148 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8149 if (!cmethod || mono_loader_get_last_error ())
8151 fsig = mono_method_get_signature (cmethod, image, token);
8155 mono_save_token_info (cfg, image, token, cmethod);
8157 if (!mono_class_init (cmethod->klass))
8158 TYPE_LOAD_ERROR (cmethod->klass);
8160 if (cfg->generic_sharing_context)
8161 context_used = mono_method_check_context_used (cmethod);
8163 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8164 if (check_linkdemand (cfg, method, cmethod))
8166 CHECK_CFG_EXCEPTION;
8167 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8168 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8171 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8172 emit_generic_class_init (cfg, cmethod->klass);
8173 CHECK_TYPELOAD (cmethod->klass);
8176 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8177 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8178 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8179 mono_class_vtable (cfg->domain, cmethod->klass);
8180 CHECK_TYPELOAD (cmethod->klass);
8182 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8183 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8186 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8187 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8189 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8191 CHECK_TYPELOAD (cmethod->klass);
8192 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8197 n = fsig->param_count;
8201 * Generate smaller code for the common newobj <exception> instruction in
8202 * argument checking code.
8204 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8205 is_exception_class (cmethod->klass) && n <= 2 &&
8206 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8207 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8208 MonoInst *iargs [3];
8210 g_assert (!vtable_arg);
8214 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8217 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8221 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8226 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8229 g_assert_not_reached ();
8237 /* move the args to allow room for 'this' in the first position */
8243 /* check_call_signature () requires sp[0] to be set */
8244 this_ins.type = STACK_OBJ;
8246 if (check_call_signature (cfg, fsig, sp))
8251 if (mini_class_is_system_array (cmethod->klass)) {
8252 g_assert (!vtable_arg);
8254 *sp = emit_get_rgctx_method (cfg, context_used,
8255 cmethod, MONO_RGCTX_INFO_METHOD);
8257 /* Avoid varargs in the common case */
8258 if (fsig->param_count == 1)
8259 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8260 else if (fsig->param_count == 2)
8261 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8262 else if (fsig->param_count == 3)
8263 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8265 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8266 } else if (cmethod->string_ctor) {
8267 g_assert (!context_used);
8268 g_assert (!vtable_arg);
8269 /* we simply pass a null pointer */
8270 EMIT_NEW_PCONST (cfg, *sp, NULL);
8271 /* now call the string ctor */
8272 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8274 MonoInst* callvirt_this_arg = NULL;
8276 if (cmethod->klass->valuetype) {
8277 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8278 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8279 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8284 * The code generated by mini_emit_virtual_call () expects
8285 * iargs [0] to be a boxed instance, but luckily the vcall
8286 * will be transformed into a normal call there.
8288 } else if (context_used) {
8289 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8292 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8294 CHECK_TYPELOAD (cmethod->klass);
8297 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8298 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8299 * As a workaround, we call class cctors before allocating objects.
8301 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8302 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8303 if (cfg->verbose_level > 2)
8304 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8305 class_inits = g_slist_prepend (class_inits, vtable);
8308 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8311 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8314 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8316 /* Now call the actual ctor */
8317 /* Avoid virtual calls to ctors if possible */
8318 if (cmethod->klass->marshalbyref)
8319 callvirt_this_arg = sp [0];
8322 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8323 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8324 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8329 CHECK_CFG_EXCEPTION;
8330 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8331 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8332 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8333 !g_list_find (dont_inline, cmethod)) {
8336 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8337 cfg->real_offset += 5;
8340 inline_costs += costs - 5;
8343 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8345 } else if (context_used &&
8346 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8347 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8348 MonoInst *cmethod_addr;
8350 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8351 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8353 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8356 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8357 callvirt_this_arg, NULL, vtable_arg);
8361 if (alloc == NULL) {
8363 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8364 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8378 token = read32 (ip + 1);
8379 klass = mini_get_class (method, token, generic_context);
8380 CHECK_TYPELOAD (klass);
8381 if (sp [0]->type != STACK_OBJ)
8384 if (cfg->generic_sharing_context)
8385 context_used = mono_class_check_context_used (klass);
8387 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8388 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8395 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8398 if (cfg->compile_aot)
8399 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8401 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8403 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8404 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8407 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8408 MonoMethod *mono_castclass;
8409 MonoInst *iargs [1];
8412 mono_castclass = mono_marshal_get_castclass (klass);
8415 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8416 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8417 CHECK_CFG_EXCEPTION;
8418 g_assert (costs > 0);
8421 cfg->real_offset += 5;
8426 inline_costs += costs;
8429 ins = handle_castclass (cfg, klass, *sp, context_used);
8430 CHECK_CFG_EXCEPTION;
8440 token = read32 (ip + 1);
8441 klass = mini_get_class (method, token, generic_context);
8442 CHECK_TYPELOAD (klass);
8443 if (sp [0]->type != STACK_OBJ)
8446 if (cfg->generic_sharing_context)
8447 context_used = mono_class_check_context_used (klass);
8449 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8450 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8457 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8460 if (cfg->compile_aot)
8461 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8463 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8465 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8468 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8469 MonoMethod *mono_isinst;
8470 MonoInst *iargs [1];
8473 mono_isinst = mono_marshal_get_isinst (klass);
8476 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8477 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8478 CHECK_CFG_EXCEPTION;
8479 g_assert (costs > 0);
8482 cfg->real_offset += 5;
8487 inline_costs += costs;
8490 ins = handle_isinst (cfg, klass, *sp, context_used);
8491 CHECK_CFG_EXCEPTION;
8498 case CEE_UNBOX_ANY: {
8502 token = read32 (ip + 1);
8503 klass = mini_get_class (method, token, generic_context);
8504 CHECK_TYPELOAD (klass);
8506 mono_save_token_info (cfg, image, token, klass);
8508 if (cfg->generic_sharing_context)
8509 context_used = mono_class_check_context_used (klass);
8511 if (generic_class_is_reference_type (cfg, klass)) {
8512 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8513 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8514 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8521 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8524 /*FIXME AOT support*/
8525 if (cfg->compile_aot)
8526 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8528 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8530 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8531 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8534 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8535 MonoMethod *mono_castclass;
8536 MonoInst *iargs [1];
8539 mono_castclass = mono_marshal_get_castclass (klass);
8542 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8543 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8544 CHECK_CFG_EXCEPTION;
8545 g_assert (costs > 0);
8548 cfg->real_offset += 5;
8552 inline_costs += costs;
8554 ins = handle_castclass (cfg, klass, *sp, context_used);
8555 CHECK_CFG_EXCEPTION;
8563 if (mono_class_is_nullable (klass)) {
8564 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8571 ins = handle_unbox (cfg, klass, sp, context_used);
8577 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8590 token = read32 (ip + 1);
8591 klass = mini_get_class (method, token, generic_context);
8592 CHECK_TYPELOAD (klass);
8594 mono_save_token_info (cfg, image, token, klass);
8596 if (cfg->generic_sharing_context)
8597 context_used = mono_class_check_context_used (klass);
8599 if (generic_class_is_reference_type (cfg, klass)) {
8605 if (klass == mono_defaults.void_class)
8607 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8609 /* frequent check in generic code: box (struct), brtrue */
8611 // FIXME: LLVM can't handle the inconsistent bb linking
8612 if (!mono_class_is_nullable (klass) &&
8613 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8614 (ip [5] == CEE_BRTRUE ||
8615 ip [5] == CEE_BRTRUE_S ||
8616 ip [5] == CEE_BRFALSE ||
8617 ip [5] == CEE_BRFALSE_S)) {
8618 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8620 MonoBasicBlock *true_bb, *false_bb;
8624 if (cfg->verbose_level > 3) {
8625 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8626 printf ("<box+brtrue opt>\n");
8634 target = ip + 1 + (signed char)(*ip);
8641 target = ip + 4 + (gint)(read32 (ip));
8645 g_assert_not_reached ();
8649 * We need to link both bblocks, since it is needed for handling stack
8650 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8651 * Branching to only one of them would lead to inconsistencies, so
8652 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8654 GET_BBLOCK (cfg, true_bb, target);
8655 GET_BBLOCK (cfg, false_bb, ip);
8657 mono_link_bblock (cfg, cfg->cbb, true_bb);
8658 mono_link_bblock (cfg, cfg->cbb, false_bb);
8660 if (sp != stack_start) {
8661 handle_stack_args (cfg, stack_start, sp - stack_start);
8663 CHECK_UNVERIFIABLE (cfg);
8666 if (COMPILE_LLVM (cfg)) {
8667 dreg = alloc_ireg (cfg);
8668 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8671 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8673 /* The JIT can't eliminate the iconst+compare */
8674 MONO_INST_NEW (cfg, ins, OP_BR);
8675 ins->inst_target_bb = is_true ? true_bb : false_bb;
8676 MONO_ADD_INS (cfg->cbb, ins);
8679 start_new_bblock = 1;
8683 *sp++ = handle_box (cfg, val, klass, context_used);
8685 CHECK_CFG_EXCEPTION;
8694 token = read32 (ip + 1);
8695 klass = mini_get_class (method, token, generic_context);
8696 CHECK_TYPELOAD (klass);
8698 mono_save_token_info (cfg, image, token, klass);
8700 if (cfg->generic_sharing_context)
8701 context_used = mono_class_check_context_used (klass);
8703 if (mono_class_is_nullable (klass)) {
8706 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8707 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8711 ins = handle_unbox (cfg, klass, sp, context_used);
8724 MonoClassField *field;
8727 gboolean is_instance;
8729 gpointer addr = NULL;
8730 gboolean is_special_static;
8732 MonoInst *store_val = NULL;
8735 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
8737 if (op == CEE_STFLD) {
8745 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8747 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8750 if (op == CEE_STSFLD) {
8758 token = read32 (ip + 1);
8759 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8760 field = mono_method_get_wrapper_data (method, token);
8761 klass = field->parent;
8764 field = mono_field_from_token (image, token, &klass, generic_context);
8768 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8769 FIELD_ACCESS_FAILURE;
8770 mono_class_init (klass);
8772 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
8775 /* if the class is Critical then transparent code cannot access it's fields */
8776 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8777 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8779 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8780 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8781 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8782 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8786 * LDFLD etc. is usable on static fields as well, so convert those cases to
8789 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
8801 g_assert_not_reached ();
8803 is_instance = FALSE;
8808 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8809 if (op == CEE_STFLD) {
8810 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8812 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8813 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8814 MonoInst *iargs [5];
8817 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8818 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8819 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8823 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8824 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8825 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8826 CHECK_CFG_EXCEPTION;
8827 g_assert (costs > 0);
8829 cfg->real_offset += 5;
8832 inline_costs += costs;
8834 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8839 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8841 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8842 if (sp [0]->opcode != OP_LDADDR)
8843 store->flags |= MONO_INST_FAULT;
8845 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8846 /* insert call to write barrier */
8850 dreg = alloc_ireg_mp (cfg);
8851 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8852 emit_write_barrier (cfg, ptr, sp [1], -1);
8855 store->flags |= ins_flag;
8862 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
8863 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8864 MonoInst *iargs [4];
8867 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8868 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8869 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8870 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8871 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8872 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8873 CHECK_CFG_EXCEPTION;
8875 g_assert (costs > 0);
8877 cfg->real_offset += 5;
8881 inline_costs += costs;
8883 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8886 } else if (is_instance) {
8887 if (sp [0]->type == STACK_VTYPE) {
8890 /* Have to compute the address of the variable */
8892 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8894 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8896 g_assert (var->klass == klass);
8898 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8902 if (op == CEE_LDFLDA) {
8903 if (is_magic_tls_access (field)) {
8905 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8907 if (sp [0]->type == STACK_OBJ) {
8908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8909 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8912 dreg = alloc_ireg_mp (cfg);
8914 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8915 ins->klass = mono_class_from_mono_type (field->type);
8916 ins->type = STACK_MP;
8922 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8924 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8925 load->flags |= ins_flag;
8926 if (sp [0]->opcode != OP_LDADDR)
8927 load->flags |= MONO_INST_FAULT;
8941 * We can only support shared generic static
8942 * field access on architectures where the
8943 * trampoline code has been extended to handle
8944 * the generic class init.
8946 #ifndef MONO_ARCH_VTABLE_REG
8947 GENERIC_SHARING_FAILURE (op);
8950 if (cfg->generic_sharing_context)
8951 context_used = mono_class_check_context_used (klass);
8953 ftype = mono_field_get_type (field);
8955 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
8958 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8959 * to be called here.
8961 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8962 mono_class_vtable (cfg->domain, klass);
8963 CHECK_TYPELOAD (klass);
8965 mono_domain_lock (cfg->domain);
8966 if (cfg->domain->special_static_fields)
8967 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8968 mono_domain_unlock (cfg->domain);
8970 is_special_static = mono_class_field_is_special_static (field);
8972 /* Generate IR to compute the field address */
8973 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8975 * Fast access to TLS data
8976 * Inline version of get_thread_static_data () in
8980 int idx, static_data_reg, array_reg, dreg;
8981 MonoInst *thread_ins;
8983 // offset &= 0x7fffffff;
8984 // idx = (offset >> 24) - 1;
8985 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8987 thread_ins = mono_get_thread_intrinsic (cfg);
8988 MONO_ADD_INS (cfg->cbb, thread_ins);
8989 static_data_reg = alloc_ireg (cfg);
8990 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8992 if (cfg->compile_aot) {
8993 int offset_reg, offset2_reg, idx_reg;
8995 /* For TLS variables, this will return the TLS offset */
8996 EMIT_NEW_SFLDACONST (cfg, ins, field);
8997 offset_reg = ins->dreg;
8998 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8999 idx_reg = alloc_ireg (cfg);
9000 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9002 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9003 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9004 array_reg = alloc_ireg (cfg);
9005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9006 offset2_reg = alloc_ireg (cfg);
9007 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9008 dreg = alloc_ireg (cfg);
9009 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9011 offset = (gsize)addr & 0x7fffffff;
9012 idx = (offset >> 24) - 1;
9014 array_reg = alloc_ireg (cfg);
9015 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9016 dreg = alloc_ireg (cfg);
9017 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9019 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9020 (cfg->compile_aot && is_special_static) ||
9021 (context_used && is_special_static)) {
9022 MonoInst *iargs [2];
9024 g_assert (field->parent);
9025 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9027 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9028 field, MONO_RGCTX_INFO_CLASS_FIELD);
9030 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9032 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9033 } else if (context_used) {
9034 MonoInst *static_data;
9037 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9038 method->klass->name_space, method->klass->name, method->name,
9039 depth, field->offset);
9042 if (mono_class_needs_cctor_run (klass, method))
9043 emit_generic_class_init (cfg, klass);
9046 * The pointer we're computing here is
9048 * super_info.static_data + field->offset
9050 static_data = emit_get_rgctx_klass (cfg, context_used,
9051 klass, MONO_RGCTX_INFO_STATIC_DATA);
9053 if (field->offset == 0) {
9056 int addr_reg = mono_alloc_preg (cfg);
9057 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9059 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9060 MonoInst *iargs [2];
9062 g_assert (field->parent);
9063 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9064 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9065 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9067 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9069 CHECK_TYPELOAD (klass);
9071 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9072 if (!(g_slist_find (class_inits, vtable))) {
9073 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9074 if (cfg->verbose_level > 2)
9075 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9076 class_inits = g_slist_prepend (class_inits, vtable);
9079 if (cfg->run_cctors) {
9081 /* This makes so that inline cannot trigger */
9082 /* .cctors: too many apps depend on them */
9083 /* running with a specific order... */
9084 if (! vtable->initialized)
9086 ex = mono_runtime_class_init_full (vtable, FALSE);
9088 set_exception_object (cfg, ex);
9089 goto exception_exit;
9093 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9095 if (cfg->compile_aot)
9096 EMIT_NEW_SFLDACONST (cfg, ins, field);
9098 EMIT_NEW_PCONST (cfg, ins, addr);
9100 MonoInst *iargs [1];
9101 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9102 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9106 /* Generate IR to do the actual load/store operation */
9108 if (op == CEE_LDSFLDA) {
9109 ins->klass = mono_class_from_mono_type (ftype);
9110 ins->type = STACK_PTR;
9112 } else if (op == CEE_STSFLD) {
9115 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9116 store->flags |= ins_flag;
9118 gboolean is_const = FALSE;
9119 MonoVTable *vtable = NULL;
9120 gpointer addr = NULL;
9122 if (!context_used) {
9123 vtable = mono_class_vtable (cfg->domain, klass);
9124 CHECK_TYPELOAD (klass);
9126 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9127 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9128 int ro_type = ftype->type;
9130 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9131 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9132 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9134 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9137 case MONO_TYPE_BOOLEAN:
9139 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9143 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9146 case MONO_TYPE_CHAR:
9148 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9152 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9157 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9161 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9167 case MONO_TYPE_FNPTR:
9168 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9169 type_to_eval_stack_type ((cfg), field->type, *sp);
9172 case MONO_TYPE_STRING:
9173 case MONO_TYPE_OBJECT:
9174 case MONO_TYPE_CLASS:
9175 case MONO_TYPE_SZARRAY:
9176 case MONO_TYPE_ARRAY:
9177 if (!mono_gc_is_moving ()) {
9178 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9179 type_to_eval_stack_type ((cfg), field->type, *sp);
9187 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9192 case MONO_TYPE_VALUETYPE:
9202 CHECK_STACK_OVF (1);
9204 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9205 load->flags |= ins_flag;
9218 token = read32 (ip + 1);
9219 klass = mini_get_class (method, token, generic_context);
9220 CHECK_TYPELOAD (klass);
9221 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9222 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9223 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9224 generic_class_is_reference_type (cfg, klass)) {
9225 /* insert call to write barrier */
9226 emit_write_barrier (cfg, sp [0], sp [1], -1);
9238 const char *data_ptr;
9240 guint32 field_token;
9246 token = read32 (ip + 1);
9248 klass = mini_get_class (method, token, generic_context);
9249 CHECK_TYPELOAD (klass);
9251 if (cfg->generic_sharing_context)
9252 context_used = mono_class_check_context_used (klass);
9254 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9255 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9256 ins->sreg1 = sp [0]->dreg;
9257 ins->type = STACK_I4;
9258 ins->dreg = alloc_ireg (cfg);
9259 MONO_ADD_INS (cfg->cbb, ins);
9260 *sp = mono_decompose_opcode (cfg, ins);
9265 MonoClass *array_class = mono_array_class_get (klass, 1);
9266 /* FIXME: we cannot get a managed
9267 allocator because we can't get the
9268 open generic class's vtable. We
9269 have the same problem in
9270 handle_alloc(). This
9271 needs to be solved so that we can
9272 have managed allocs of shared
9275 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9276 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9278 MonoMethod *managed_alloc = NULL;
9280 /* FIXME: Decompose later to help abcrem */
9283 args [0] = emit_get_rgctx_klass (cfg, context_used,
9284 array_class, MONO_RGCTX_INFO_VTABLE);
9289 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9291 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9293 if (cfg->opt & MONO_OPT_SHARED) {
9294 /* Decompose now to avoid problems with references to the domainvar */
9295 MonoInst *iargs [3];
9297 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9298 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9301 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9303 /* Decompose later since it is needed by abcrem */
9304 MonoClass *array_type = mono_array_class_get (klass, 1);
9305 mono_class_vtable (cfg->domain, array_type);
9306 CHECK_TYPELOAD (array_type);
9308 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9309 ins->dreg = alloc_ireg_ref (cfg);
9310 ins->sreg1 = sp [0]->dreg;
9311 ins->inst_newa_class = klass;
9312 ins->type = STACK_OBJ;
9313 ins->klass = array_type;
9314 MONO_ADD_INS (cfg->cbb, ins);
9315 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9316 cfg->cbb->has_array_access = TRUE;
9318 /* Needed so mono_emit_load_get_addr () gets called */
9319 mono_get_got_var (cfg);
9329 * we inline/optimize the initialization sequence if possible.
9330 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9331 * for small sizes open code the memcpy
9332 * ensure the rva field is big enough
9334 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9335 MonoMethod *memcpy_method = get_memcpy_method ();
9336 MonoInst *iargs [3];
9337 int add_reg = alloc_ireg_mp (cfg);
9339 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9340 if (cfg->compile_aot) {
9341 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9343 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9345 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9346 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9355 if (sp [0]->type != STACK_OBJ)
9358 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9359 ins->dreg = alloc_preg (cfg);
9360 ins->sreg1 = sp [0]->dreg;
9361 ins->type = STACK_I4;
9362 /* This flag will be inherited by the decomposition */
9363 ins->flags |= MONO_INST_FAULT;
9364 MONO_ADD_INS (cfg->cbb, ins);
9365 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9366 cfg->cbb->has_array_access = TRUE;
9374 if (sp [0]->type != STACK_OBJ)
9377 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9379 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9380 CHECK_TYPELOAD (klass);
9381 /* we need to make sure that this array is exactly the type it needs
9382 * to be for correctness. the wrappers are lax with their usage
9383 * so we need to ignore them here
9385 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9386 MonoClass *array_class = mono_array_class_get (klass, 1);
9387 mini_emit_check_array_type (cfg, sp [0], array_class);
9388 CHECK_TYPELOAD (array_class);
9392 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9407 case CEE_LDELEM_REF: {
9413 if (*ip == CEE_LDELEM) {
9415 token = read32 (ip + 1);
9416 klass = mini_get_class (method, token, generic_context);
9417 CHECK_TYPELOAD (klass);
9418 mono_class_init (klass);
9421 klass = array_access_to_klass (*ip);
9423 if (sp [0]->type != STACK_OBJ)
9426 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9428 if (sp [1]->opcode == OP_ICONST) {
9429 int array_reg = sp [0]->dreg;
9430 int index_reg = sp [1]->dreg;
9431 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9433 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9434 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9436 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9437 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9440 if (*ip == CEE_LDELEM)
9453 case CEE_STELEM_REF:
9460 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9462 if (*ip == CEE_STELEM) {
9464 token = read32 (ip + 1);
9465 klass = mini_get_class (method, token, generic_context);
9466 CHECK_TYPELOAD (klass);
9467 mono_class_init (klass);
9470 klass = array_access_to_klass (*ip);
9472 if (sp [0]->type != STACK_OBJ)
9475 /* storing a NULL doesn't need any of the complex checks in stelemref */
9476 if (generic_class_is_reference_type (cfg, klass) &&
9477 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9478 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9479 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9480 MonoInst *iargs [3];
9483 mono_class_setup_vtable (obj_array);
9484 g_assert (helper->slot);
9486 if (sp [0]->type != STACK_OBJ)
9488 if (sp [2]->type != STACK_OBJ)
9495 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9497 if (sp [1]->opcode == OP_ICONST) {
9498 int array_reg = sp [0]->dreg;
9499 int index_reg = sp [1]->dreg;
9500 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9502 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9503 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9505 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9506 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9510 if (*ip == CEE_STELEM)
9517 case CEE_CKFINITE: {
9521 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9522 ins->sreg1 = sp [0]->dreg;
9523 ins->dreg = alloc_freg (cfg);
9524 ins->type = STACK_R8;
9525 MONO_ADD_INS (bblock, ins);
9527 *sp++ = mono_decompose_opcode (cfg, ins);
9532 case CEE_REFANYVAL: {
9533 MonoInst *src_var, *src;
9535 int klass_reg = alloc_preg (cfg);
9536 int dreg = alloc_preg (cfg);
9539 MONO_INST_NEW (cfg, ins, *ip);
9542 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9543 CHECK_TYPELOAD (klass);
9544 mono_class_init (klass);
9546 if (cfg->generic_sharing_context)
9547 context_used = mono_class_check_context_used (klass);
9550 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9552 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9553 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9554 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9557 MonoInst *klass_ins;
9559 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9560 klass, MONO_RGCTX_INFO_KLASS);
9563 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9564 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9566 mini_emit_class_check (cfg, klass_reg, klass);
9568 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9569 ins->type = STACK_MP;
9574 case CEE_MKREFANY: {
9575 MonoInst *loc, *addr;
9578 MONO_INST_NEW (cfg, ins, *ip);
9581 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9582 CHECK_TYPELOAD (klass);
9583 mono_class_init (klass);
9585 if (cfg->generic_sharing_context)
9586 context_used = mono_class_check_context_used (klass);
9588 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9589 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9592 MonoInst *const_ins;
9593 int type_reg = alloc_preg (cfg);
9595 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9596 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9598 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9599 } else if (cfg->compile_aot) {
9600 int const_reg = alloc_preg (cfg);
9601 int type_reg = alloc_preg (cfg);
9603 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9604 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9606 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9608 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9609 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9611 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9613 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9614 ins->type = STACK_VTYPE;
9615 ins->klass = mono_defaults.typed_reference_class;
9622 MonoClass *handle_class;
9624 CHECK_STACK_OVF (1);
9627 n = read32 (ip + 1);
9629 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9630 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9631 handle = mono_method_get_wrapper_data (method, n);
9632 handle_class = mono_method_get_wrapper_data (method, n + 1);
9633 if (handle_class == mono_defaults.typehandle_class)
9634 handle = &((MonoClass*)handle)->byval_arg;
9637 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9641 mono_class_init (handle_class);
9642 if (cfg->generic_sharing_context) {
9643 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9644 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9645 /* This case handles ldtoken
9646 of an open type, like for
9649 } else if (handle_class == mono_defaults.typehandle_class) {
9650 /* If we get a MONO_TYPE_CLASS
9651 then we need to provide the
9653 instantiation of it. */
9654 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9657 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9658 } else if (handle_class == mono_defaults.fieldhandle_class)
9659 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9660 else if (handle_class == mono_defaults.methodhandle_class)
9661 context_used = mono_method_check_context_used (handle);
9663 g_assert_not_reached ();
9666 if ((cfg->opt & MONO_OPT_SHARED) &&
9667 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9668 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9669 MonoInst *addr, *vtvar, *iargs [3];
9670 int method_context_used;
9672 if (cfg->generic_sharing_context)
9673 method_context_used = mono_method_check_context_used (method);
9675 method_context_used = 0;
9677 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9679 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9680 EMIT_NEW_ICONST (cfg, iargs [1], n);
9681 if (method_context_used) {
9682 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9683 method, MONO_RGCTX_INFO_METHOD);
9684 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9686 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9687 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9689 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9693 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9695 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9696 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9697 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9698 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9699 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9700 MonoClass *tclass = mono_class_from_mono_type (handle);
9702 mono_class_init (tclass);
9704 ins = emit_get_rgctx_klass (cfg, context_used,
9705 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9706 } else if (cfg->compile_aot) {
9707 if (method->wrapper_type) {
9708 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9709 /* Special case for static synchronized wrappers */
9710 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9712 /* FIXME: n is not a normal token */
9713 cfg->disable_aot = TRUE;
9714 EMIT_NEW_PCONST (cfg, ins, NULL);
9717 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9720 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9722 ins->type = STACK_OBJ;
9723 ins->klass = cmethod->klass;
9726 MonoInst *addr, *vtvar;
9728 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9731 if (handle_class == mono_defaults.typehandle_class) {
9732 ins = emit_get_rgctx_klass (cfg, context_used,
9733 mono_class_from_mono_type (handle),
9734 MONO_RGCTX_INFO_TYPE);
9735 } else if (handle_class == mono_defaults.methodhandle_class) {
9736 ins = emit_get_rgctx_method (cfg, context_used,
9737 handle, MONO_RGCTX_INFO_METHOD);
9738 } else if (handle_class == mono_defaults.fieldhandle_class) {
9739 ins = emit_get_rgctx_field (cfg, context_used,
9740 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9742 g_assert_not_reached ();
9744 } else if (cfg->compile_aot) {
9745 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9747 EMIT_NEW_PCONST (cfg, ins, handle);
9749 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9751 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9761 MONO_INST_NEW (cfg, ins, OP_THROW);
9763 ins->sreg1 = sp [0]->dreg;
9765 bblock->out_of_line = TRUE;
9766 MONO_ADD_INS (bblock, ins);
9767 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9768 MONO_ADD_INS (bblock, ins);
9771 link_bblock (cfg, bblock, end_bblock);
9772 start_new_bblock = 1;
9774 case CEE_ENDFINALLY:
9775 /* mono_save_seq_point_info () depends on this */
9776 if (sp != stack_start)
9777 emit_seq_point (cfg, method, ip, FALSE);
9778 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9779 MONO_ADD_INS (bblock, ins);
9781 start_new_bblock = 1;
9784 * Control will leave the method so empty the stack, otherwise
9785 * the next basic block will start with a nonempty stack.
9787 while (sp != stack_start) {
9795 if (*ip == CEE_LEAVE) {
9797 target = ip + 5 + (gint32)read32(ip + 1);
9800 target = ip + 2 + (signed char)(ip [1]);
9803 /* empty the stack */
9804 while (sp != stack_start) {
9809 * If this leave statement is in a catch block, check for a
9810 * pending exception, and rethrow it if necessary.
9811 * We avoid doing this in runtime invoke wrappers, since those are called
9812 * by native code which excepts the wrapper to catch all exceptions.
9814 for (i = 0; i < header->num_clauses; ++i) {
9815 MonoExceptionClause *clause = &header->clauses [i];
9818 * Use <= in the final comparison to handle clauses with multiple
9819 * leave statements, like in bug #78024.
9820 * The ordering of the exception clauses guarantees that we find the
9823 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9825 MonoBasicBlock *dont_throw;
9830 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9833 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9835 NEW_BBLOCK (cfg, dont_throw);
9838 * Currently, we always rethrow the abort exception, despite the
9839 * fact that this is not correct. See thread6.cs for an example.
9840 * But propagating the abort exception is more important than
9841 * getting the sematics right.
9843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9844 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9845 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9847 MONO_START_BB (cfg, dont_throw);
9852 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9854 MonoExceptionClause *clause;
9856 for (tmp = handlers; tmp; tmp = tmp->next) {
9858 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9860 link_bblock (cfg, bblock, tblock);
9861 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9862 ins->inst_target_bb = tblock;
9863 ins->inst_eh_block = clause;
9864 MONO_ADD_INS (bblock, ins);
9865 bblock->has_call_handler = 1;
9866 if (COMPILE_LLVM (cfg)) {
9867 MonoBasicBlock *target_bb;
9870 * Link the finally bblock with the target, since it will
9871 * conceptually branch there.
9872 * FIXME: Have to link the bblock containing the endfinally.
9874 GET_BBLOCK (cfg, target_bb, target);
9875 link_bblock (cfg, tblock, target_bb);
9878 g_list_free (handlers);
9881 MONO_INST_NEW (cfg, ins, OP_BR);
9882 MONO_ADD_INS (bblock, ins);
9883 GET_BBLOCK (cfg, tblock, target);
9884 link_bblock (cfg, bblock, tblock);
9885 ins->inst_target_bb = tblock;
9886 start_new_bblock = 1;
9888 if (*ip == CEE_LEAVE)
9897 * Mono specific opcodes
9899 case MONO_CUSTOM_PREFIX: {
9901 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9905 case CEE_MONO_ICALL: {
9907 MonoJitICallInfo *info;
9909 token = read32 (ip + 2);
9910 func = mono_method_get_wrapper_data (method, token);
9911 info = mono_find_jit_icall_by_addr (func);
9914 CHECK_STACK (info->sig->param_count);
9915 sp -= info->sig->param_count;
9917 ins = mono_emit_jit_icall (cfg, info->func, sp);
9918 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9922 inline_costs += 10 * num_calls++;
9926 case CEE_MONO_LDPTR: {
9929 CHECK_STACK_OVF (1);
9931 token = read32 (ip + 2);
9933 ptr = mono_method_get_wrapper_data (method, token);
9934 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9935 MonoJitICallInfo *callinfo;
9936 const char *icall_name;
9938 icall_name = method->name + strlen ("__icall_wrapper_");
9939 g_assert (icall_name);
9940 callinfo = mono_find_jit_icall_by_name (icall_name);
9941 g_assert (callinfo);
9943 if (ptr == callinfo->func) {
9944 /* Will be transformed into an AOTCONST later */
9945 EMIT_NEW_PCONST (cfg, ins, ptr);
9951 /* FIXME: Generalize this */
9952 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9953 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9958 EMIT_NEW_PCONST (cfg, ins, ptr);
9961 inline_costs += 10 * num_calls++;
9962 /* Can't embed random pointers into AOT code */
9963 cfg->disable_aot = 1;
9966 case CEE_MONO_ICALL_ADDR: {
9967 MonoMethod *cmethod;
9970 CHECK_STACK_OVF (1);
9972 token = read32 (ip + 2);
9974 cmethod = mono_method_get_wrapper_data (method, token);
9976 if (cfg->compile_aot) {
9977 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9979 ptr = mono_lookup_internal_call (cmethod);
9981 EMIT_NEW_PCONST (cfg, ins, ptr);
9987 case CEE_MONO_VTADDR: {
9988 MonoInst *src_var, *src;
9994 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9995 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10000 case CEE_MONO_NEWOBJ: {
10001 MonoInst *iargs [2];
10003 CHECK_STACK_OVF (1);
10005 token = read32 (ip + 2);
10006 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10007 mono_class_init (klass);
10008 NEW_DOMAINCONST (cfg, iargs [0]);
10009 MONO_ADD_INS (cfg->cbb, iargs [0]);
10010 NEW_CLASSCONST (cfg, iargs [1], klass);
10011 MONO_ADD_INS (cfg->cbb, iargs [1]);
10012 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10014 inline_costs += 10 * num_calls++;
10017 case CEE_MONO_OBJADDR:
10020 MONO_INST_NEW (cfg, ins, OP_MOVE);
10021 ins->dreg = alloc_ireg_mp (cfg);
10022 ins->sreg1 = sp [0]->dreg;
10023 ins->type = STACK_MP;
10024 MONO_ADD_INS (cfg->cbb, ins);
10028 case CEE_MONO_LDNATIVEOBJ:
10030 * Similar to LDOBJ, but instead load the unmanaged
10031 * representation of the vtype to the stack.
10036 token = read32 (ip + 2);
10037 klass = mono_method_get_wrapper_data (method, token);
10038 g_assert (klass->valuetype);
10039 mono_class_init (klass);
10042 MonoInst *src, *dest, *temp;
10045 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10046 temp->backend.is_pinvoke = 1;
10047 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10048 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10050 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10051 dest->type = STACK_VTYPE;
10052 dest->klass = klass;
10058 case CEE_MONO_RETOBJ: {
10060 * Same as RET, but return the native representation of a vtype
10063 g_assert (cfg->ret);
10064 g_assert (mono_method_signature (method)->pinvoke);
10069 token = read32 (ip + 2);
10070 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10072 if (!cfg->vret_addr) {
10073 g_assert (cfg->ret_var_is_local);
10075 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10077 EMIT_NEW_RETLOADA (cfg, ins);
10079 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10081 if (sp != stack_start)
10084 MONO_INST_NEW (cfg, ins, OP_BR);
10085 ins->inst_target_bb = end_bblock;
10086 MONO_ADD_INS (bblock, ins);
10087 link_bblock (cfg, bblock, end_bblock);
10088 start_new_bblock = 1;
10092 case CEE_MONO_CISINST:
10093 case CEE_MONO_CCASTCLASS: {
10098 token = read32 (ip + 2);
10099 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10100 if (ip [1] == CEE_MONO_CISINST)
10101 ins = handle_cisinst (cfg, klass, sp [0]);
10103 ins = handle_ccastclass (cfg, klass, sp [0]);
10109 case CEE_MONO_SAVE_LMF:
10110 case CEE_MONO_RESTORE_LMF:
10111 #ifdef MONO_ARCH_HAVE_LMF_OPS
10112 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10113 MONO_ADD_INS (bblock, ins);
10114 cfg->need_lmf_area = TRUE;
10118 case CEE_MONO_CLASSCONST:
10119 CHECK_STACK_OVF (1);
10121 token = read32 (ip + 2);
10122 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10125 inline_costs += 10 * num_calls++;
10127 case CEE_MONO_NOT_TAKEN:
10128 bblock->out_of_line = TRUE;
10132 CHECK_STACK_OVF (1);
10134 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10135 ins->dreg = alloc_preg (cfg);
10136 ins->inst_offset = (gint32)read32 (ip + 2);
10137 ins->type = STACK_PTR;
10138 MONO_ADD_INS (bblock, ins);
10142 case CEE_MONO_DYN_CALL: {
10143 MonoCallInst *call;
10145 /* It would be easier to call a trampoline, but that would put an
10146 * extra frame on the stack, confusing exception handling. So
10147 * implement it inline using an opcode for now.
10150 if (!cfg->dyn_call_var) {
10151 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10152 /* prevent it from being register allocated */
10153 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10156 /* Has to use a call inst since it local regalloc expects it */
10157 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10158 ins = (MonoInst*)call;
10160 ins->sreg1 = sp [0]->dreg;
10161 ins->sreg2 = sp [1]->dreg;
10162 MONO_ADD_INS (bblock, ins);
10164 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10165 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10169 inline_costs += 10 * num_calls++;
10173 case CEE_MONO_MEMORY_BARRIER: {
10175 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10179 case CEE_MONO_JIT_ATTACH: {
10180 MonoInst *args [16];
10181 MonoInst *ad_ins, *lmf_ins;
10182 MonoBasicBlock *next_bb = NULL;
10184 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10186 EMIT_NEW_PCONST (cfg, ins, NULL);
10187 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10193 ad_ins = mono_get_domain_intrinsic (cfg);
10194 lmf_ins = mono_get_lmf_intrinsic (cfg);
10197 #ifdef MONO_ARCH_HAVE_TLS_GET
10198 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
10199 NEW_BBLOCK (cfg, next_bb);
10201 MONO_ADD_INS (cfg->cbb, ad_ins);
10202 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
10203 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10205 MONO_ADD_INS (cfg->cbb, lmf_ins);
10206 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
10207 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10211 if (cfg->compile_aot) {
10212 /* AOT code is only used in the root domain */
10213 EMIT_NEW_PCONST (cfg, args [0], NULL);
10215 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
10217 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
10218 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10221 MONO_START_BB (cfg, next_bb);
10227 case CEE_MONO_JIT_DETACH: {
10228 MonoInst *args [16];
10230 /* Restore the original domain */
10231 dreg = alloc_ireg (cfg);
10232 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
10233 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
10238 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10244 case CEE_PREFIX1: {
10247 case CEE_ARGLIST: {
10248 /* somewhat similar to LDTOKEN */
10249 MonoInst *addr, *vtvar;
10250 CHECK_STACK_OVF (1);
10251 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10253 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10254 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10256 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10257 ins->type = STACK_VTYPE;
10258 ins->klass = mono_defaults.argumenthandle_class;
10271 * The following transforms:
10272 * CEE_CEQ into OP_CEQ
10273 * CEE_CGT into OP_CGT
10274 * CEE_CGT_UN into OP_CGT_UN
10275 * CEE_CLT into OP_CLT
10276 * CEE_CLT_UN into OP_CLT_UN
10278 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10280 MONO_INST_NEW (cfg, ins, cmp->opcode);
10282 cmp->sreg1 = sp [0]->dreg;
10283 cmp->sreg2 = sp [1]->dreg;
10284 type_from_op (cmp, sp [0], sp [1]);
10286 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10287 cmp->opcode = OP_LCOMPARE;
10288 else if (sp [0]->type == STACK_R8)
10289 cmp->opcode = OP_FCOMPARE;
10291 cmp->opcode = OP_ICOMPARE;
10292 MONO_ADD_INS (bblock, cmp);
10293 ins->type = STACK_I4;
10294 ins->dreg = alloc_dreg (cfg, ins->type);
10295 type_from_op (ins, sp [0], sp [1]);
10297 if (cmp->opcode == OP_FCOMPARE) {
10299 * The backends expect the fceq opcodes to do the
10302 cmp->opcode = OP_NOP;
10303 ins->sreg1 = cmp->sreg1;
10304 ins->sreg2 = cmp->sreg2;
10306 MONO_ADD_INS (bblock, ins);
10312 MonoInst *argconst;
10313 MonoMethod *cil_method;
10315 CHECK_STACK_OVF (1);
10317 n = read32 (ip + 2);
10318 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10319 if (!cmethod || mono_loader_get_last_error ())
10321 mono_class_init (cmethod->klass);
10323 mono_save_token_info (cfg, image, n, cmethod);
10325 if (cfg->generic_sharing_context)
10326 context_used = mono_method_check_context_used (cmethod);
10328 cil_method = cmethod;
10329 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10330 METHOD_ACCESS_FAILURE;
10332 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10333 if (check_linkdemand (cfg, method, cmethod))
10335 CHECK_CFG_EXCEPTION;
10336 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10337 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10341 * Optimize the common case of ldftn+delegate creation
10343 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10344 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10345 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10346 MonoInst *target_ins;
10347 MonoMethod *invoke;
10348 int invoke_context_used = 0;
10350 invoke = mono_get_delegate_invoke (ctor_method->klass);
10351 if (!invoke || !mono_method_signature (invoke))
10354 if (cfg->generic_sharing_context)
10355 invoke_context_used = mono_method_check_context_used (invoke);
10357 target_ins = sp [-1];
10359 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10360 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10362 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10363 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10364 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10366 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10370 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10371 /* FIXME: SGEN support */
10372 if (invoke_context_used == 0) {
10374 if (cfg->verbose_level > 3)
10375 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10377 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10378 CHECK_CFG_EXCEPTION;
10387 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10388 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10392 inline_costs += 10 * num_calls++;
10395 case CEE_LDVIRTFTN: {
10396 MonoInst *args [2];
10400 n = read32 (ip + 2);
10401 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10402 if (!cmethod || mono_loader_get_last_error ())
10404 mono_class_init (cmethod->klass);
10406 if (cfg->generic_sharing_context)
10407 context_used = mono_method_check_context_used (cmethod);
10409 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10410 if (check_linkdemand (cfg, method, cmethod))
10412 CHECK_CFG_EXCEPTION;
10413 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10414 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10420 args [1] = emit_get_rgctx_method (cfg, context_used,
10421 cmethod, MONO_RGCTX_INFO_METHOD);
10424 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10426 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10429 inline_costs += 10 * num_calls++;
10433 CHECK_STACK_OVF (1);
10435 n = read16 (ip + 2);
10437 EMIT_NEW_ARGLOAD (cfg, ins, n);
10442 CHECK_STACK_OVF (1);
10444 n = read16 (ip + 2);
10446 NEW_ARGLOADA (cfg, ins, n);
10447 MONO_ADD_INS (cfg->cbb, ins);
10455 n = read16 (ip + 2);
10457 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10459 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10463 CHECK_STACK_OVF (1);
10465 n = read16 (ip + 2);
10467 EMIT_NEW_LOCLOAD (cfg, ins, n);
10472 unsigned char *tmp_ip;
10473 CHECK_STACK_OVF (1);
10475 n = read16 (ip + 2);
10478 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10484 EMIT_NEW_LOCLOADA (cfg, ins, n);
10493 n = read16 (ip + 2);
10495 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10497 emit_stloc_ir (cfg, sp, header, n);
10504 if (sp != stack_start)
10506 if (cfg->method != method)
10508 * Inlining this into a loop in a parent could lead to
10509 * stack overflows which is different behavior than the
10510 * non-inlined case, thus disable inlining in this case.
10512 goto inline_failure;
10514 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10515 ins->dreg = alloc_preg (cfg);
10516 ins->sreg1 = sp [0]->dreg;
10517 ins->type = STACK_PTR;
10518 MONO_ADD_INS (cfg->cbb, ins);
10520 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10522 ins->flags |= MONO_INST_INIT;
10527 case CEE_ENDFILTER: {
10528 MonoExceptionClause *clause, *nearest;
10529 int cc, nearest_num;
10533 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10535 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10536 ins->sreg1 = (*sp)->dreg;
10537 MONO_ADD_INS (bblock, ins);
10538 start_new_bblock = 1;
10543 for (cc = 0; cc < header->num_clauses; ++cc) {
10544 clause = &header->clauses [cc];
10545 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10546 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10547 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10552 g_assert (nearest);
10553 if ((ip - header->code) != nearest->handler_offset)
10558 case CEE_UNALIGNED_:
10559 ins_flag |= MONO_INST_UNALIGNED;
10560 /* FIXME: record alignment? we can assume 1 for now */
10564 case CEE_VOLATILE_:
10565 ins_flag |= MONO_INST_VOLATILE;
10569 ins_flag |= MONO_INST_TAILCALL;
10570 cfg->flags |= MONO_CFG_HAS_TAIL;
10571 /* Can't inline tail calls at this time */
10572 inline_costs += 100000;
10579 token = read32 (ip + 2);
10580 klass = mini_get_class (method, token, generic_context);
10581 CHECK_TYPELOAD (klass);
10582 if (generic_class_is_reference_type (cfg, klass))
10583 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10585 mini_emit_initobj (cfg, *sp, NULL, klass);
10589 case CEE_CONSTRAINED_:
10591 token = read32 (ip + 2);
10592 if (method->wrapper_type != MONO_WRAPPER_NONE)
10593 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10595 constrained_call = mono_class_get_full (image, token, generic_context);
10596 CHECK_TYPELOAD (constrained_call);
10600 case CEE_INITBLK: {
10601 MonoInst *iargs [3];
10605 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10606 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10607 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10608 /* emit_memset only works when val == 0 */
10609 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10611 iargs [0] = sp [0];
10612 iargs [1] = sp [1];
10613 iargs [2] = sp [2];
10614 if (ip [1] == CEE_CPBLK) {
10615 MonoMethod *memcpy_method = get_memcpy_method ();
10616 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10618 MonoMethod *memset_method = get_memset_method ();
10619 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10629 ins_flag |= MONO_INST_NOTYPECHECK;
10631 ins_flag |= MONO_INST_NORANGECHECK;
10632 /* we ignore the no-nullcheck for now since we
10633 * really do it explicitly only when doing callvirt->call
10637 case CEE_RETHROW: {
10639 int handler_offset = -1;
10641 for (i = 0; i < header->num_clauses; ++i) {
10642 MonoExceptionClause *clause = &header->clauses [i];
10643 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10644 handler_offset = clause->handler_offset;
10649 bblock->flags |= BB_EXCEPTION_UNSAFE;
10651 g_assert (handler_offset != -1);
10653 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10654 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10655 ins->sreg1 = load->dreg;
10656 MONO_ADD_INS (bblock, ins);
10658 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10659 MONO_ADD_INS (bblock, ins);
10662 link_bblock (cfg, bblock, end_bblock);
10663 start_new_bblock = 1;
10671 CHECK_STACK_OVF (1);
10673 token = read32 (ip + 2);
10674 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10675 MonoType *type = mono_type_create_from_typespec (image, token);
10676 token = mono_type_size (type, &ialign);
10678 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10679 CHECK_TYPELOAD (klass);
10680 mono_class_init (klass);
10681 token = mono_class_value_size (klass, &align);
10683 EMIT_NEW_ICONST (cfg, ins, token);
10688 case CEE_REFANYTYPE: {
10689 MonoInst *src_var, *src;
10695 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10697 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10698 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10699 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10704 case CEE_READONLY_:
10717 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10727 g_warning ("opcode 0x%02x not handled", *ip);
10731 if (start_new_bblock != 1)
10734 bblock->cil_length = ip - bblock->cil_code;
10735 if (bblock->next_bb) {
10736 /* This could already be set because of inlining, #693905 */
10737 MonoBasicBlock *bb = bblock;
10739 while (bb->next_bb)
10741 bb->next_bb = end_bblock;
10743 bblock->next_bb = end_bblock;
10746 if (cfg->method == method && cfg->domainvar) {
10748 MonoInst *get_domain;
10750 cfg->cbb = init_localsbb;
10752 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10753 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10756 get_domain->dreg = alloc_preg (cfg);
10757 MONO_ADD_INS (cfg->cbb, get_domain);
10759 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10760 MONO_ADD_INS (cfg->cbb, store);
10763 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10764 if (cfg->compile_aot)
10765 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10766 mono_get_got_var (cfg);
10769 if (cfg->method == method && cfg->got_var)
10770 mono_emit_load_got_addr (cfg);
10775 cfg->cbb = init_localsbb;
10777 for (i = 0; i < header->num_locals; ++i) {
10778 MonoType *ptype = header->locals [i];
10779 int t = ptype->type;
10780 dreg = cfg->locals [i]->dreg;
10782 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10783 t = mono_class_enum_basetype (ptype->data.klass)->type;
10784 if (ptype->byref) {
10785 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10786 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10787 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10788 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10789 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10790 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10791 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10792 ins->type = STACK_R8;
10793 ins->inst_p0 = (void*)&r8_0;
10794 ins->dreg = alloc_dreg (cfg, STACK_R8);
10795 MONO_ADD_INS (init_localsbb, ins);
10796 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10797 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10798 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10799 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10801 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10806 if (cfg->init_ref_vars && cfg->method == method) {
10807 /* Emit initialization for ref vars */
10808 // FIXME: Avoid duplication initialization for IL locals.
10809 for (i = 0; i < cfg->num_varinfo; ++i) {
10810 MonoInst *ins = cfg->varinfo [i];
10812 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10813 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10818 MonoBasicBlock *bb;
10821 * Make seq points at backward branch targets interruptable.
10823 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
10824 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
10825 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
10828 /* Add a sequence point for method entry/exit events */
10830 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10831 MONO_ADD_INS (init_localsbb, ins);
10832 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10833 MONO_ADD_INS (cfg->bb_exit, ins);
10838 if (cfg->method == method) {
10839 MonoBasicBlock *bb;
10840 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10841 bb->region = mono_find_block_region (cfg, bb->real_offset);
10843 mono_create_spvar_for_region (cfg, bb->region);
10844 if (cfg->verbose_level > 2)
10845 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10849 g_slist_free (class_inits);
10850 dont_inline = g_list_remove (dont_inline, method);
10852 if (inline_costs < 0) {
10855 /* Method is too large */
10856 mname = mono_method_full_name (method, TRUE);
10857 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10858 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10860 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10861 mono_basic_block_free (original_bb);
10865 if ((cfg->verbose_level > 2) && (cfg->method == method))
10866 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10868 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10869 mono_basic_block_free (original_bb);
10870 return inline_costs;
10873 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10880 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10884 set_exception_type_from_invalid_il (cfg, method, ip);
10888 g_slist_free (class_inits);
10889 mono_basic_block_free (original_bb);
10890 dont_inline = g_list_remove (dont_inline, method);
10891 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10896 store_membase_reg_to_store_membase_imm (int opcode)
10899 case OP_STORE_MEMBASE_REG:
10900 return OP_STORE_MEMBASE_IMM;
10901 case OP_STOREI1_MEMBASE_REG:
10902 return OP_STOREI1_MEMBASE_IMM;
10903 case OP_STOREI2_MEMBASE_REG:
10904 return OP_STOREI2_MEMBASE_IMM;
10905 case OP_STOREI4_MEMBASE_REG:
10906 return OP_STOREI4_MEMBASE_IMM;
10907 case OP_STOREI8_MEMBASE_REG:
10908 return OP_STOREI8_MEMBASE_IMM;
10910 g_assert_not_reached ();
10916 #endif /* DISABLE_JIT */
10919 mono_op_to_op_imm (int opcode)
10923 return OP_IADD_IMM;
10925 return OP_ISUB_IMM;
10927 return OP_IDIV_IMM;
10929 return OP_IDIV_UN_IMM;
10931 return OP_IREM_IMM;
10933 return OP_IREM_UN_IMM;
10935 return OP_IMUL_IMM;
10937 return OP_IAND_IMM;
10941 return OP_IXOR_IMM;
10943 return OP_ISHL_IMM;
10945 return OP_ISHR_IMM;
10947 return OP_ISHR_UN_IMM;
10950 return OP_LADD_IMM;
10952 return OP_LSUB_IMM;
10954 return OP_LAND_IMM;
10958 return OP_LXOR_IMM;
10960 return OP_LSHL_IMM;
10962 return OP_LSHR_IMM;
10964 return OP_LSHR_UN_IMM;
10967 return OP_COMPARE_IMM;
10969 return OP_ICOMPARE_IMM;
10971 return OP_LCOMPARE_IMM;
10973 case OP_STORE_MEMBASE_REG:
10974 return OP_STORE_MEMBASE_IMM;
10975 case OP_STOREI1_MEMBASE_REG:
10976 return OP_STOREI1_MEMBASE_IMM;
10977 case OP_STOREI2_MEMBASE_REG:
10978 return OP_STOREI2_MEMBASE_IMM;
10979 case OP_STOREI4_MEMBASE_REG:
10980 return OP_STOREI4_MEMBASE_IMM;
10982 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10984 return OP_X86_PUSH_IMM;
10985 case OP_X86_COMPARE_MEMBASE_REG:
10986 return OP_X86_COMPARE_MEMBASE_IMM;
10988 #if defined(TARGET_AMD64)
10989 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10990 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10992 case OP_VOIDCALL_REG:
10993 return OP_VOIDCALL;
11001 return OP_LOCALLOC_IMM;
11008 ldind_to_load_membase (int opcode)
11012 return OP_LOADI1_MEMBASE;
11014 return OP_LOADU1_MEMBASE;
11016 return OP_LOADI2_MEMBASE;
11018 return OP_LOADU2_MEMBASE;
11020 return OP_LOADI4_MEMBASE;
11022 return OP_LOADU4_MEMBASE;
11024 return OP_LOAD_MEMBASE;
11025 case CEE_LDIND_REF:
11026 return OP_LOAD_MEMBASE;
11028 return OP_LOADI8_MEMBASE;
11030 return OP_LOADR4_MEMBASE;
11032 return OP_LOADR8_MEMBASE;
11034 g_assert_not_reached ();
11041 stind_to_store_membase (int opcode)
11045 return OP_STOREI1_MEMBASE_REG;
11047 return OP_STOREI2_MEMBASE_REG;
11049 return OP_STOREI4_MEMBASE_REG;
11051 case CEE_STIND_REF:
11052 return OP_STORE_MEMBASE_REG;
11054 return OP_STOREI8_MEMBASE_REG;
11056 return OP_STORER4_MEMBASE_REG;
11058 return OP_STORER8_MEMBASE_REG;
11060 g_assert_not_reached ();
11067 mono_load_membase_to_load_mem (int opcode)
11069 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11070 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11072 case OP_LOAD_MEMBASE:
11073 return OP_LOAD_MEM;
11074 case OP_LOADU1_MEMBASE:
11075 return OP_LOADU1_MEM;
11076 case OP_LOADU2_MEMBASE:
11077 return OP_LOADU2_MEM;
11078 case OP_LOADI4_MEMBASE:
11079 return OP_LOADI4_MEM;
11080 case OP_LOADU4_MEMBASE:
11081 return OP_LOADU4_MEM;
11082 #if SIZEOF_REGISTER == 8
11083 case OP_LOADI8_MEMBASE:
11084 return OP_LOADI8_MEM;
11093 op_to_op_dest_membase (int store_opcode, int opcode)
11095 #if defined(TARGET_X86)
11096 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11101 return OP_X86_ADD_MEMBASE_REG;
11103 return OP_X86_SUB_MEMBASE_REG;
11105 return OP_X86_AND_MEMBASE_REG;
11107 return OP_X86_OR_MEMBASE_REG;
11109 return OP_X86_XOR_MEMBASE_REG;
11112 return OP_X86_ADD_MEMBASE_IMM;
11115 return OP_X86_SUB_MEMBASE_IMM;
11118 return OP_X86_AND_MEMBASE_IMM;
11121 return OP_X86_OR_MEMBASE_IMM;
11124 return OP_X86_XOR_MEMBASE_IMM;
11130 #if defined(TARGET_AMD64)
11131 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11136 return OP_X86_ADD_MEMBASE_REG;
11138 return OP_X86_SUB_MEMBASE_REG;
11140 return OP_X86_AND_MEMBASE_REG;
11142 return OP_X86_OR_MEMBASE_REG;
11144 return OP_X86_XOR_MEMBASE_REG;
11146 return OP_X86_ADD_MEMBASE_IMM;
11148 return OP_X86_SUB_MEMBASE_IMM;
11150 return OP_X86_AND_MEMBASE_IMM;
11152 return OP_X86_OR_MEMBASE_IMM;
11154 return OP_X86_XOR_MEMBASE_IMM;
11156 return OP_AMD64_ADD_MEMBASE_REG;
11158 return OP_AMD64_SUB_MEMBASE_REG;
11160 return OP_AMD64_AND_MEMBASE_REG;
11162 return OP_AMD64_OR_MEMBASE_REG;
11164 return OP_AMD64_XOR_MEMBASE_REG;
11167 return OP_AMD64_ADD_MEMBASE_IMM;
11170 return OP_AMD64_SUB_MEMBASE_IMM;
11173 return OP_AMD64_AND_MEMBASE_IMM;
11176 return OP_AMD64_OR_MEMBASE_IMM;
11179 return OP_AMD64_XOR_MEMBASE_IMM;
11189 op_to_op_store_membase (int store_opcode, int opcode)
11191 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11194 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11195 return OP_X86_SETEQ_MEMBASE;
11197 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11198 return OP_X86_SETNE_MEMBASE;
11206 op_to_op_src1_membase (int load_opcode, int opcode)
11209 /* FIXME: This has sign extension issues */
11211 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11212 return OP_X86_COMPARE_MEMBASE8_IMM;
11215 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11220 return OP_X86_PUSH_MEMBASE;
11221 case OP_COMPARE_IMM:
11222 case OP_ICOMPARE_IMM:
11223 return OP_X86_COMPARE_MEMBASE_IMM;
11226 return OP_X86_COMPARE_MEMBASE_REG;
11230 #ifdef TARGET_AMD64
11231 /* FIXME: This has sign extension issues */
11233 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11234 return OP_X86_COMPARE_MEMBASE8_IMM;
11239 #ifdef __mono_ilp32__
11240 if (load_opcode == OP_LOADI8_MEMBASE)
11242 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11244 return OP_X86_PUSH_MEMBASE;
11246 /* FIXME: This only works for 32 bit immediates
11247 case OP_COMPARE_IMM:
11248 case OP_LCOMPARE_IMM:
11249 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11250 return OP_AMD64_COMPARE_MEMBASE_IMM;
11252 case OP_ICOMPARE_IMM:
11253 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11254 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11258 #ifdef __mono_ilp32__
11259 if (load_opcode == OP_LOAD_MEMBASE)
11260 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11261 if (load_opcode == OP_LOADI8_MEMBASE)
11263 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11265 return OP_AMD64_COMPARE_MEMBASE_REG;
11268 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11269 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11278 op_to_op_src2_membase (int load_opcode, int opcode)
11281 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11287 return OP_X86_COMPARE_REG_MEMBASE;
11289 return OP_X86_ADD_REG_MEMBASE;
11291 return OP_X86_SUB_REG_MEMBASE;
11293 return OP_X86_AND_REG_MEMBASE;
11295 return OP_X86_OR_REG_MEMBASE;
11297 return OP_X86_XOR_REG_MEMBASE;
11301 #ifdef TARGET_AMD64
11302 #ifdef __mono_ilp32__
11303 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11305 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11309 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11311 return OP_X86_ADD_REG_MEMBASE;
11313 return OP_X86_SUB_REG_MEMBASE;
11315 return OP_X86_AND_REG_MEMBASE;
11317 return OP_X86_OR_REG_MEMBASE;
11319 return OP_X86_XOR_REG_MEMBASE;
11321 #ifdef __mono_ilp32__
11322 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11324 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11329 return OP_AMD64_COMPARE_REG_MEMBASE;
11331 return OP_AMD64_ADD_REG_MEMBASE;
11333 return OP_AMD64_SUB_REG_MEMBASE;
11335 return OP_AMD64_AND_REG_MEMBASE;
11337 return OP_AMD64_OR_REG_MEMBASE;
11339 return OP_AMD64_XOR_REG_MEMBASE;
11348 mono_op_to_op_imm_noemul (int opcode)
11351 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11357 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11364 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11369 return mono_op_to_op_imm (opcode);
11373 #ifndef DISABLE_JIT
11376 * mono_handle_global_vregs:
11378 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11382 mono_handle_global_vregs (MonoCompile *cfg)
11384 gint32 *vreg_to_bb;
11385 MonoBasicBlock *bb;
11388 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11390 #ifdef MONO_ARCH_SIMD_INTRINSICS
11391 if (cfg->uses_simd_intrinsics)
11392 mono_simd_simplify_indirection (cfg);
11395 /* Find local vregs used in more than one bb */
11396 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11397 MonoInst *ins = bb->code;
11398 int block_num = bb->block_num;
11400 if (cfg->verbose_level > 2)
11401 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11404 for (; ins; ins = ins->next) {
11405 const char *spec = INS_INFO (ins->opcode);
11406 int regtype = 0, regindex;
11409 if (G_UNLIKELY (cfg->verbose_level > 2))
11410 mono_print_ins (ins);
11412 g_assert (ins->opcode >= MONO_CEE_LAST);
11414 for (regindex = 0; regindex < 4; regindex ++) {
11417 if (regindex == 0) {
11418 regtype = spec [MONO_INST_DEST];
11419 if (regtype == ' ')
11422 } else if (regindex == 1) {
11423 regtype = spec [MONO_INST_SRC1];
11424 if (regtype == ' ')
11427 } else if (regindex == 2) {
11428 regtype = spec [MONO_INST_SRC2];
11429 if (regtype == ' ')
11432 } else if (regindex == 3) {
11433 regtype = spec [MONO_INST_SRC3];
11434 if (regtype == ' ')
11439 #if SIZEOF_REGISTER == 4
11440 /* In the LLVM case, the long opcodes are not decomposed */
11441 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11443 * Since some instructions reference the original long vreg,
11444 * and some reference the two component vregs, it is quite hard
11445 * to determine when it needs to be global. So be conservative.
11447 if (!get_vreg_to_inst (cfg, vreg)) {
11448 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11450 if (cfg->verbose_level > 2)
11451 printf ("LONG VREG R%d made global.\n", vreg);
11455 * Make the component vregs volatile since the optimizations can
11456 * get confused otherwise.
11458 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11459 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11463 g_assert (vreg != -1);
11465 prev_bb = vreg_to_bb [vreg];
11466 if (prev_bb == 0) {
11467 /* 0 is a valid block num */
11468 vreg_to_bb [vreg] = block_num + 1;
11469 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11470 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11473 if (!get_vreg_to_inst (cfg, vreg)) {
11474 if (G_UNLIKELY (cfg->verbose_level > 2))
11475 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11479 if (vreg_is_ref (cfg, vreg))
11480 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11482 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11485 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11488 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11491 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11494 g_assert_not_reached ();
11498 /* Flag as having been used in more than one bb */
11499 vreg_to_bb [vreg] = -1;
11505 /* If a variable is used in only one bblock, convert it into a local vreg */
11506 for (i = 0; i < cfg->num_varinfo; i++) {
11507 MonoInst *var = cfg->varinfo [i];
11508 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11510 switch (var->type) {
11516 #if SIZEOF_REGISTER == 8
11519 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11520 /* Enabling this screws up the fp stack on x86 */
11523 /* Arguments are implicitly global */
11524 /* Putting R4 vars into registers doesn't work currently */
11525 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11527 * Make that the variable's liveness interval doesn't contain a call, since
11528 * that would cause the lvreg to be spilled, making the whole optimization
11531 /* This is too slow for JIT compilation */
11533 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11535 int def_index, call_index, ins_index;
11536 gboolean spilled = FALSE;
11541 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11542 const char *spec = INS_INFO (ins->opcode);
11544 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11545 def_index = ins_index;
11547 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11548 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11549 if (call_index > def_index) {
11555 if (MONO_IS_CALL (ins))
11556 call_index = ins_index;
11566 if (G_UNLIKELY (cfg->verbose_level > 2))
11567 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11568 var->flags |= MONO_INST_IS_DEAD;
11569 cfg->vreg_to_inst [var->dreg] = NULL;
11576 * Compress the varinfo and vars tables so the liveness computation is faster and
11577 * takes up less space.
11580 for (i = 0; i < cfg->num_varinfo; ++i) {
11581 MonoInst *var = cfg->varinfo [i];
11582 if (pos < i && cfg->locals_start == i)
11583 cfg->locals_start = pos;
11584 if (!(var->flags & MONO_INST_IS_DEAD)) {
11586 cfg->varinfo [pos] = cfg->varinfo [i];
11587 cfg->varinfo [pos]->inst_c0 = pos;
11588 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11589 cfg->vars [pos].idx = pos;
11590 #if SIZEOF_REGISTER == 4
11591 if (cfg->varinfo [pos]->type == STACK_I8) {
11592 /* Modify the two component vars too */
11595 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11596 var1->inst_c0 = pos;
11597 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11598 var1->inst_c0 = pos;
11605 cfg->num_varinfo = pos;
11606 if (cfg->locals_start > cfg->num_varinfo)
11607 cfg->locals_start = cfg->num_varinfo;
11611 * mono_spill_global_vars:
11613 * Generate spill code for variables which are not allocated to registers,
11614 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11615 * code is generated which could be optimized by the local optimization passes.
11618 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11620 MonoBasicBlock *bb;
11622 int orig_next_vreg;
11623 guint32 *vreg_to_lvreg;
11625 guint32 i, lvregs_len;
11626 gboolean dest_has_lvreg = FALSE;
11627 guint32 stacktypes [128];
11628 MonoInst **live_range_start, **live_range_end;
11629 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11631 *need_local_opts = FALSE;
11633 memset (spec2, 0, sizeof (spec2));
11635 /* FIXME: Move this function to mini.c */
11636 stacktypes ['i'] = STACK_PTR;
11637 stacktypes ['l'] = STACK_I8;
11638 stacktypes ['f'] = STACK_R8;
11639 #ifdef MONO_ARCH_SIMD_INTRINSICS
11640 stacktypes ['x'] = STACK_VTYPE;
11643 #if SIZEOF_REGISTER == 4
11644 /* Create MonoInsts for longs */
11645 for (i = 0; i < cfg->num_varinfo; i++) {
11646 MonoInst *ins = cfg->varinfo [i];
11648 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11649 switch (ins->type) {
11654 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11657 g_assert (ins->opcode == OP_REGOFFSET);
11659 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11661 tree->opcode = OP_REGOFFSET;
11662 tree->inst_basereg = ins->inst_basereg;
11663 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11665 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11667 tree->opcode = OP_REGOFFSET;
11668 tree->inst_basereg = ins->inst_basereg;
11669 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11679 if (cfg->compute_gc_maps) {
11680 /* registers need liveness info even for !non refs */
11681 for (i = 0; i < cfg->num_varinfo; i++) {
11682 MonoInst *ins = cfg->varinfo [i];
11684 if (ins->opcode == OP_REGVAR)
11685 ins->flags |= MONO_INST_GC_TRACK;
11689 /* FIXME: widening and truncation */
11692 * As an optimization, when a variable allocated to the stack is first loaded into
11693 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11694 * the variable again.
11696 orig_next_vreg = cfg->next_vreg;
11697 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11698 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11702 * These arrays contain the first and last instructions accessing a given
11704 * Since we emit bblocks in the same order we process them here, and we
11705 * don't split live ranges, these will precisely describe the live range of
11706 * the variable, i.e. the instruction range where a valid value can be found
11707 * in the variables location.
11708 * The live range is computed using the liveness info computed by the liveness pass.
11709 * We can't use vmv->range, since that is an abstract live range, and we need
11710 * one which is instruction precise.
11711 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11713 /* FIXME: Only do this if debugging info is requested */
11714 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11715 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11716 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11717 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11719 /* Add spill loads/stores */
11720 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11723 if (cfg->verbose_level > 2)
11724 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11726 /* Clear vreg_to_lvreg array */
11727 for (i = 0; i < lvregs_len; i++)
11728 vreg_to_lvreg [lvregs [i]] = 0;
11732 MONO_BB_FOR_EACH_INS (bb, ins) {
11733 const char *spec = INS_INFO (ins->opcode);
11734 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11735 gboolean store, no_lvreg;
11736 int sregs [MONO_MAX_SRC_REGS];
11738 if (G_UNLIKELY (cfg->verbose_level > 2))
11739 mono_print_ins (ins);
11741 if (ins->opcode == OP_NOP)
11745 * We handle LDADDR here as well, since it can only be decomposed
11746 * when variable addresses are known.
11748 if (ins->opcode == OP_LDADDR) {
11749 MonoInst *var = ins->inst_p0;
11751 if (var->opcode == OP_VTARG_ADDR) {
11752 /* Happens on SPARC/S390 where vtypes are passed by reference */
11753 MonoInst *vtaddr = var->inst_left;
11754 if (vtaddr->opcode == OP_REGVAR) {
11755 ins->opcode = OP_MOVE;
11756 ins->sreg1 = vtaddr->dreg;
11758 else if (var->inst_left->opcode == OP_REGOFFSET) {
11759 ins->opcode = OP_LOAD_MEMBASE;
11760 ins->inst_basereg = vtaddr->inst_basereg;
11761 ins->inst_offset = vtaddr->inst_offset;
11765 g_assert (var->opcode == OP_REGOFFSET);
11767 ins->opcode = OP_ADD_IMM;
11768 ins->sreg1 = var->inst_basereg;
11769 ins->inst_imm = var->inst_offset;
11772 *need_local_opts = TRUE;
11773 spec = INS_INFO (ins->opcode);
11776 if (ins->opcode < MONO_CEE_LAST) {
11777 mono_print_ins (ins);
11778 g_assert_not_reached ();
11782 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11786 if (MONO_IS_STORE_MEMBASE (ins)) {
11787 tmp_reg = ins->dreg;
11788 ins->dreg = ins->sreg2;
11789 ins->sreg2 = tmp_reg;
11792 spec2 [MONO_INST_DEST] = ' ';
11793 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11794 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11795 spec2 [MONO_INST_SRC3] = ' ';
11797 } else if (MONO_IS_STORE_MEMINDEX (ins))
11798 g_assert_not_reached ();
11803 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11804 printf ("\t %.3s %d", spec, ins->dreg);
11805 num_sregs = mono_inst_get_src_registers (ins, sregs);
11806 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11807 printf (" %d", sregs [srcindex]);
11814 regtype = spec [MONO_INST_DEST];
11815 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11818 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11819 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11820 MonoInst *store_ins;
11822 MonoInst *def_ins = ins;
11823 int dreg = ins->dreg; /* The original vreg */
11825 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11827 if (var->opcode == OP_REGVAR) {
11828 ins->dreg = var->dreg;
11829 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11831 * Instead of emitting a load+store, use a _membase opcode.
11833 g_assert (var->opcode == OP_REGOFFSET);
11834 if (ins->opcode == OP_MOVE) {
11838 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11839 ins->inst_basereg = var->inst_basereg;
11840 ins->inst_offset = var->inst_offset;
11843 spec = INS_INFO (ins->opcode);
11847 g_assert (var->opcode == OP_REGOFFSET);
11849 prev_dreg = ins->dreg;
11851 /* Invalidate any previous lvreg for this vreg */
11852 vreg_to_lvreg [ins->dreg] = 0;
11856 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11858 store_opcode = OP_STOREI8_MEMBASE_REG;
11861 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11863 if (regtype == 'l') {
11864 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11865 mono_bblock_insert_after_ins (bb, ins, store_ins);
11866 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11867 mono_bblock_insert_after_ins (bb, ins, store_ins);
11868 def_ins = store_ins;
11871 g_assert (store_opcode != OP_STOREV_MEMBASE);
11873 /* Try to fuse the store into the instruction itself */
11874 /* FIXME: Add more instructions */
11875 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11876 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11877 ins->inst_imm = ins->inst_c0;
11878 ins->inst_destbasereg = var->inst_basereg;
11879 ins->inst_offset = var->inst_offset;
11880 spec = INS_INFO (ins->opcode);
11881 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11882 ins->opcode = store_opcode;
11883 ins->inst_destbasereg = var->inst_basereg;
11884 ins->inst_offset = var->inst_offset;
11888 tmp_reg = ins->dreg;
11889 ins->dreg = ins->sreg2;
11890 ins->sreg2 = tmp_reg;
11893 spec2 [MONO_INST_DEST] = ' ';
11894 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11895 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11896 spec2 [MONO_INST_SRC3] = ' ';
11898 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11899 // FIXME: The backends expect the base reg to be in inst_basereg
11900 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11902 ins->inst_basereg = var->inst_basereg;
11903 ins->inst_offset = var->inst_offset;
11904 spec = INS_INFO (ins->opcode);
11906 /* printf ("INS: "); mono_print_ins (ins); */
11907 /* Create a store instruction */
11908 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11910 /* Insert it after the instruction */
11911 mono_bblock_insert_after_ins (bb, ins, store_ins);
11913 def_ins = store_ins;
11916 * We can't assign ins->dreg to var->dreg here, since the
11917 * sregs could use it. So set a flag, and do it after
11920 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11921 dest_has_lvreg = TRUE;
11926 if (def_ins && !live_range_start [dreg]) {
11927 live_range_start [dreg] = def_ins;
11928 live_range_start_bb [dreg] = bb;
11931 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11934 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11935 tmp->inst_c1 = dreg;
11936 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11943 num_sregs = mono_inst_get_src_registers (ins, sregs);
11944 for (srcindex = 0; srcindex < 3; ++srcindex) {
11945 regtype = spec [MONO_INST_SRC1 + srcindex];
11946 sreg = sregs [srcindex];
11948 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11949 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11950 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11951 MonoInst *use_ins = ins;
11952 MonoInst *load_ins;
11953 guint32 load_opcode;
11955 if (var->opcode == OP_REGVAR) {
11956 sregs [srcindex] = var->dreg;
11957 //mono_inst_set_src_registers (ins, sregs);
11958 live_range_end [sreg] = use_ins;
11959 live_range_end_bb [sreg] = bb;
11961 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11964 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11965 /* var->dreg is a hreg */
11966 tmp->inst_c1 = sreg;
11967 mono_bblock_insert_after_ins (bb, ins, tmp);
11973 g_assert (var->opcode == OP_REGOFFSET);
11975 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11977 g_assert (load_opcode != OP_LOADV_MEMBASE);
11979 if (vreg_to_lvreg [sreg]) {
11980 g_assert (vreg_to_lvreg [sreg] != -1);
11982 /* The variable is already loaded to an lvreg */
11983 if (G_UNLIKELY (cfg->verbose_level > 2))
11984 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11985 sregs [srcindex] = vreg_to_lvreg [sreg];
11986 //mono_inst_set_src_registers (ins, sregs);
11990 /* Try to fuse the load into the instruction */
11991 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11992 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11993 sregs [0] = var->inst_basereg;
11994 //mono_inst_set_src_registers (ins, sregs);
11995 ins->inst_offset = var->inst_offset;
11996 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11997 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11998 sregs [1] = var->inst_basereg;
11999 //mono_inst_set_src_registers (ins, sregs);
12000 ins->inst_offset = var->inst_offset;
12002 if (MONO_IS_REAL_MOVE (ins)) {
12003 ins->opcode = OP_NOP;
12006 //printf ("%d ", srcindex); mono_print_ins (ins);
12008 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12010 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12011 if (var->dreg == prev_dreg) {
12013 * sreg refers to the value loaded by the load
12014 * emitted below, but we need to use ins->dreg
12015 * since it refers to the store emitted earlier.
12019 g_assert (sreg != -1);
12020 vreg_to_lvreg [var->dreg] = sreg;
12021 g_assert (lvregs_len < 1024);
12022 lvregs [lvregs_len ++] = var->dreg;
12026 sregs [srcindex] = sreg;
12027 //mono_inst_set_src_registers (ins, sregs);
12029 if (regtype == 'l') {
12030 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12031 mono_bblock_insert_before_ins (bb, ins, load_ins);
12032 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12033 mono_bblock_insert_before_ins (bb, ins, load_ins);
12034 use_ins = load_ins;
12037 #if SIZEOF_REGISTER == 4
12038 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12040 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12041 mono_bblock_insert_before_ins (bb, ins, load_ins);
12042 use_ins = load_ins;
12046 if (var->dreg < orig_next_vreg) {
12047 live_range_end [var->dreg] = use_ins;
12048 live_range_end_bb [var->dreg] = bb;
12051 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12054 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12055 tmp->inst_c1 = var->dreg;
12056 mono_bblock_insert_after_ins (bb, ins, tmp);
12060 mono_inst_set_src_registers (ins, sregs);
12062 if (dest_has_lvreg) {
12063 g_assert (ins->dreg != -1);
12064 vreg_to_lvreg [prev_dreg] = ins->dreg;
12065 g_assert (lvregs_len < 1024);
12066 lvregs [lvregs_len ++] = prev_dreg;
12067 dest_has_lvreg = FALSE;
12071 tmp_reg = ins->dreg;
12072 ins->dreg = ins->sreg2;
12073 ins->sreg2 = tmp_reg;
12076 if (MONO_IS_CALL (ins)) {
12077 /* Clear vreg_to_lvreg array */
12078 for (i = 0; i < lvregs_len; i++)
12079 vreg_to_lvreg [lvregs [i]] = 0;
12081 } else if (ins->opcode == OP_NOP) {
12083 MONO_INST_NULLIFY_SREGS (ins);
12086 if (cfg->verbose_level > 2)
12087 mono_print_ins_index (1, ins);
12090 /* Extend the live range based on the liveness info */
12091 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12092 for (i = 0; i < cfg->num_varinfo; i ++) {
12093 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12095 if (vreg_is_volatile (cfg, vi->vreg))
12096 /* The liveness info is incomplete */
12099 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12100 /* Live from at least the first ins of this bb */
12101 live_range_start [vi->vreg] = bb->code;
12102 live_range_start_bb [vi->vreg] = bb;
12105 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12106 /* Live at least until the last ins of this bb */
12107 live_range_end [vi->vreg] = bb->last_ins;
12108 live_range_end_bb [vi->vreg] = bb;
12114 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12116 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12117 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12119 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12120 for (i = 0; i < cfg->num_varinfo; ++i) {
12121 int vreg = MONO_VARINFO (cfg, i)->vreg;
12124 if (live_range_start [vreg]) {
12125 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12127 ins->inst_c1 = vreg;
12128 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12130 if (live_range_end [vreg]) {
12131 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12133 ins->inst_c1 = vreg;
12134 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12135 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12137 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12143 g_free (live_range_start);
12144 g_free (live_range_end);
12145 g_free (live_range_start_bb);
12146 g_free (live_range_end_bb);
12151 * - use 'iadd' instead of 'int_add'
12152 * - handling ovf opcodes: decompose in method_to_ir.
12153 * - unify iregs/fregs
12154 * -> partly done, the missing parts are:
12155 * - a more complete unification would involve unifying the hregs as well, so
12156 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12157 * would no longer map to the machine hregs, so the code generators would need to
12158 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12159 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12160 * fp/non-fp branches speeds it up by about 15%.
12161 * - use sext/zext opcodes instead of shifts
12163 * - get rid of TEMPLOADs if possible and use vregs instead
12164 * - clean up usage of OP_P/OP_ opcodes
12165 * - cleanup usage of DUMMY_USE
12166 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12168 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12169 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12170 * - make sure handle_stack_args () is called before the branch is emitted
12171 * - when the new IR is done, get rid of all unused stuff
12172 * - COMPARE/BEQ as separate instructions or unify them ?
12173 * - keeping them separate allows specialized compare instructions like
12174 * compare_imm, compare_membase
12175 * - most back ends unify fp compare+branch, fp compare+ceq
12176 * - integrate mono_save_args into inline_method
12177 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12178 * - handle long shift opts on 32 bit platforms somehow: they require
12179 * 3 sregs (2 for arg1 and 1 for arg2)
12180 * - make byref a 'normal' type.
12181 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12182 * variable if needed.
12183 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12184 * like inline_method.
12185 * - remove inlining restrictions
12186 * - fix LNEG and enable cfold of INEG
12187 * - generalize x86 optimizations like ldelema as a peephole optimization
12188 * - add store_mem_imm for amd64
12189 * - optimize the loading of the interruption flag in the managed->native wrappers
12190 * - avoid special handling of OP_NOP in passes
12191 * - move code inserting instructions into one function/macro.
12192 * - try a coalescing phase after liveness analysis
12193 * - add float -> vreg conversion + local optimizations on !x86
12194 * - figure out how to handle decomposed branches during optimizations, ie.
12195 * compare+branch, op_jump_table+op_br etc.
12196 * - promote RuntimeXHandles to vregs
12197 * - vtype cleanups:
12198 * - add a NEW_VARLOADA_VREG macro
12199 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12200 * accessing vtype fields.
12201 * - get rid of I8CONST on 64 bit platforms
12202 * - dealing with the increase in code size due to branches created during opcode
12204 * - use extended basic blocks
12205 * - all parts of the JIT
12206 * - handle_global_vregs () && local regalloc
12207 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12208 * - sources of increase in code size:
12211 * - isinst and castclass
12212 * - lvregs not allocated to global registers even if used multiple times
12213 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12215 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12216 * - add all micro optimizations from the old JIT
12217 * - put tree optimizations into the deadce pass
12218 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12219 * specific function.
12220 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12221 * fcompare + branchCC.
12222 * - create a helper function for allocating a stack slot, taking into account
12223 * MONO_CFG_HAS_SPILLUP.
12225 * - merge the ia64 switch changes.
12226 * - optimize mono_regstate2_alloc_int/float.
12227 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12228 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12229 * parts of the tree could be separated by other instructions, killing the tree
12230 * arguments, or stores killing loads etc. Also, should we fold loads into other
12231 * instructions if the result of the load is used multiple times ?
12232 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12233 * - LAST MERGE: 108395.
12234 * - when returning vtypes in registers, generate IR and append it to the end of the
12235 * last bb instead of doing it in the epilog.
12236 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12244 - When to decompose opcodes:
12245 - earlier: this makes some optimizations hard to implement, since the low level IR
12246 no longer contains the neccessary information. But it is easier to do.
12247 - later: harder to implement, enables more optimizations.
12248 - Branches inside bblocks:
12249 - created when decomposing complex opcodes.
12250 - branches to another bblock: harmless, but not tracked by the branch
12251 optimizations, so need to branch to a label at the start of the bblock.
12252 - branches to inside the same bblock: very problematic, trips up the local
12253 reg allocator. Can be fixed by spitting the current bblock, but that is a
12254 complex operation, since some local vregs can become global vregs etc.
12255 - Local/global vregs:
12256 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12257 local register allocator.
12258 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12259 structure, created by mono_create_var (). Assigned to hregs or the stack by
12260 the global register allocator.
12261 - When to do optimizations like alu->alu_imm:
12262 - earlier -> saves work later on since the IR will be smaller/simpler
12263 - later -> can work on more instructions
12264 - Handling of valuetypes:
12265 - When a vtype is pushed on the stack, a new temporary is created, an
12266 instruction computing its address (LDADDR) is emitted and pushed on
12267 the stack. Need to optimize cases when the vtype is used immediately as in
12268 argument passing, stloc etc.
12269 - Instead of the to_end stuff in the old JIT, simply call the function handling
12270 the values on the stack before emitting the last instruction of the bb.
12273 #endif /* DISABLE_JIT */