2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
24 #ifdef HAVE_SYS_TIME_H
32 #include <mono/utils/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/attrdefs.h>
36 #include <mono/metadata/loader.h>
37 #include <mono/metadata/tabledefs.h>
38 #include <mono/metadata/class.h>
39 #include <mono/metadata/object.h>
40 #include <mono/metadata/exception.h>
41 #include <mono/metadata/opcodes.h>
42 #include <mono/metadata/mono-endian.h>
43 #include <mono/metadata/tokentype.h>
44 #include <mono/metadata/tabledefs.h>
45 #include <mono/metadata/marshal.h>
46 #include <mono/metadata/debug-helpers.h>
47 #include <mono/metadata/mono-debug.h>
48 #include <mono/metadata/gc-internal.h>
49 #include <mono/metadata/security-manager.h>
50 #include <mono/metadata/threads-types.h>
51 #include <mono/metadata/security-core-clr.h>
52 #include <mono/metadata/monitor.h>
53 #include <mono/metadata/profiler-private.h>
54 #include <mono/metadata/profiler.h>
55 #include <mono/metadata/debug-mono-symfile.h>
56 #include <mono/utils/mono-compiler.h>
57 #include <mono/utils/mono-memory-model.h>
58 #include <mono/metadata/mono-basic-block.h>
65 #include "jit-icalls.h"
67 #include "debugger-agent.h"
69 #define BRANCH_COST 10
70 #define INLINE_LENGTH_LIMIT 20
71 #define INLINE_FAILURE(msg) do { \
72 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
73 if (cfg->verbose_level >= 2) \
74 printf ("inline failed: %s\n", msg); \
75 goto inline_failure; \
78 #define CHECK_CFG_EXCEPTION do {\
79 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
82 #define METHOD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
85 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
86 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (cil_method_fname); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE do { \
92 char *method_fname = mono_method_full_name (method, TRUE); \
93 char *field_fname = mono_field_full_name (field); \
94 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
95 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
96 g_free (method_fname); \
97 g_free (field_fname); \
98 goto exception_exit; \
100 #define GENERIC_SHARING_FAILURE(opcode) do { \
101 if (cfg->generic_sharing_context) { \
102 if (cfg->verbose_level > 2) \
103 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
104 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
105 goto exception_exit; \
108 #define GSHAREDVT_FAILURE(opcode) do { \
109 if (cfg->gsharedvt) { \
110 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
111 if (cfg->verbose_level >= 2) \
112 printf ("%s\n", cfg->exception_message); \
113 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
114 goto exception_exit; \
117 #define OUT_OF_MEMORY_FAILURE do { \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
119 goto exception_exit; \
121 /* Determine whenever 'ins' represents a load of the 'this' argument */
122 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
124 static int ldind_to_load_membase (int opcode);
125 static int stind_to_store_membase (int opcode);
127 int mono_op_to_op_imm (int opcode);
128 int mono_op_to_op_imm_noemul (int opcode);
130 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
132 /* helper methods signatures */
133 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
134 static MonoMethodSignature *helper_sig_domain_get = NULL;
135 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
136 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
137 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
138 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
139 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
142 * Instruction metadata
150 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
151 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
157 #if SIZEOF_REGISTER == 8
162 /* keep in sync with the enum in mini.h */
165 #include "mini-ops.h"
170 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
171 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
173 * This should contain the index of the last sreg + 1. This is not the same
174 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
176 const gint8 ins_sreg_counts[] = {
177 #include "mini-ops.h"
182 #define MONO_INIT_VARINFO(vi,id) do { \
183 (vi)->range.first_use.pos.bid = 0xffff; \
189 mono_inst_set_src_registers (MonoInst *ins, int *regs)
191 ins->sreg1 = regs [0];
192 ins->sreg2 = regs [1];
193 ins->sreg3 = regs [2];
197 mono_alloc_ireg (MonoCompile *cfg)
199 return alloc_ireg (cfg);
203 mono_alloc_freg (MonoCompile *cfg)
205 return alloc_freg (cfg);
209 mono_alloc_preg (MonoCompile *cfg)
211 return alloc_preg (cfg);
215 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
217 return alloc_dreg (cfg, stack_type);
221 * mono_alloc_ireg_ref:
223 * Allocate an IREG, and mark it as holding a GC ref.
226 mono_alloc_ireg_ref (MonoCompile *cfg)
228 return alloc_ireg_ref (cfg);
232 * mono_alloc_ireg_mp:
234 * Allocate an IREG, and mark it as holding a managed pointer.
237 mono_alloc_ireg_mp (MonoCompile *cfg)
239 return alloc_ireg_mp (cfg);
243 * mono_alloc_ireg_copy:
245 * Allocate an IREG with the same GC type as VREG.
248 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
250 if (vreg_is_ref (cfg, vreg))
251 return alloc_ireg_ref (cfg);
252 else if (vreg_is_mp (cfg, vreg))
253 return alloc_ireg_mp (cfg);
255 return alloc_ireg (cfg);
259 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
265 switch (type->type) {
268 case MONO_TYPE_BOOLEAN:
280 case MONO_TYPE_FNPTR:
282 case MONO_TYPE_CLASS:
283 case MONO_TYPE_STRING:
284 case MONO_TYPE_OBJECT:
285 case MONO_TYPE_SZARRAY:
286 case MONO_TYPE_ARRAY:
290 #if SIZEOF_REGISTER == 8
299 case MONO_TYPE_VALUETYPE:
300 if (type->data.klass->enumtype) {
301 type = mono_class_enum_basetype (type->data.klass);
304 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
307 case MONO_TYPE_TYPEDBYREF:
309 case MONO_TYPE_GENERICINST:
310 type = &type->data.generic_class->container_class->byval_arg;
314 g_assert (cfg->generic_sharing_context);
315 if (mini_type_var_is_vt (cfg, type))
320 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
326 mono_print_bb (MonoBasicBlock *bb, const char *msg)
331 printf ("\n%s %d: [IN: ", msg, bb->block_num);
332 for (i = 0; i < bb->in_count; ++i)
333 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
335 for (i = 0; i < bb->out_count; ++i)
336 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
338 for (tree = bb->code; tree; tree = tree->next)
339 mono_print_ins_index (-1, tree);
343 mono_create_helper_signatures (void)
345 helper_sig_domain_get = mono_create_icall_signature ("ptr");
346 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
347 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
348 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
349 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
350 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
351 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
355 * Can't put this at the beginning, since other files reference stuff from this
361 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
362 * foo<T> (int i) { ldarg.0; box T; }
364 #define UNVERIFIED do { \
365 if (cfg->gsharedvt) { \
366 if (cfg->verbose_level > 2) \
367 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
368 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
369 goto exception_exit; \
371 if (mini_get_debug_options ()->break_on_unverified) \
377 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
379 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
381 #define GET_BBLOCK(cfg,tblock,ip) do { \
382 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
384 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
385 NEW_BBLOCK (cfg, (tblock)); \
386 (tblock)->cil_code = (ip); \
387 ADD_BBLOCK (cfg, (tblock)); \
391 #if defined(TARGET_X86) || defined(TARGET_AMD64)
392 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
393 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
394 (dest)->dreg = alloc_ireg_mp ((cfg)); \
395 (dest)->sreg1 = (sr1); \
396 (dest)->sreg2 = (sr2); \
397 (dest)->inst_imm = (imm); \
398 (dest)->backend.shift_amount = (shift); \
399 MONO_ADD_INS ((cfg)->cbb, (dest)); \
403 #if SIZEOF_REGISTER == 8
404 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
405 /* FIXME: Need to add many more cases */ \
406 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
408 int dr = alloc_preg (cfg); \
409 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
410 (ins)->sreg2 = widen->dreg; \
414 #define ADD_WIDEN_OP(ins, arg1, arg2)
417 #define ADD_BINOP(op) do { \
418 MONO_INST_NEW (cfg, ins, (op)); \
420 ins->sreg1 = sp [0]->dreg; \
421 ins->sreg2 = sp [1]->dreg; \
422 type_from_op (ins, sp [0], sp [1]); \
424 /* Have to insert a widening op */ \
425 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
426 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
427 MONO_ADD_INS ((cfg)->cbb, (ins)); \
428 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
431 #define ADD_UNOP(op) do { \
432 MONO_INST_NEW (cfg, ins, (op)); \
434 ins->sreg1 = sp [0]->dreg; \
435 type_from_op (ins, sp [0], NULL); \
437 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
438 MONO_ADD_INS ((cfg)->cbb, (ins)); \
439 *sp++ = mono_decompose_opcode (cfg, ins); \
442 #define ADD_BINCOND(next_block) do { \
445 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
446 cmp->sreg1 = sp [0]->dreg; \
447 cmp->sreg2 = sp [1]->dreg; \
448 type_from_op (cmp, sp [0], sp [1]); \
450 type_from_op (ins, sp [0], sp [1]); \
451 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
452 GET_BBLOCK (cfg, tblock, target); \
453 link_bblock (cfg, bblock, tblock); \
454 ins->inst_true_bb = tblock; \
455 if ((next_block)) { \
456 link_bblock (cfg, bblock, (next_block)); \
457 ins->inst_false_bb = (next_block); \
458 start_new_bblock = 1; \
460 GET_BBLOCK (cfg, tblock, ip); \
461 link_bblock (cfg, bblock, tblock); \
462 ins->inst_false_bb = tblock; \
463 start_new_bblock = 2; \
465 if (sp != stack_start) { \
466 handle_stack_args (cfg, stack_start, sp - stack_start); \
467 CHECK_UNVERIFIABLE (cfg); \
469 MONO_ADD_INS (bblock, cmp); \
470 MONO_ADD_INS (bblock, ins); \
474 * link_bblock: Links two basic blocks
476 * links two basic blocks in the control flow graph, the 'from'
477 * argument is the starting block and the 'to' argument is the block
478 * the control flow ends to after 'from'.
481 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
483 MonoBasicBlock **newa;
487 if (from->cil_code) {
489 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
491 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
494 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
496 printf ("edge from entry to exit\n");
501 for (i = 0; i < from->out_count; ++i) {
502 if (to == from->out_bb [i]) {
508 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
509 for (i = 0; i < from->out_count; ++i) {
510 newa [i] = from->out_bb [i];
518 for (i = 0; i < to->in_count; ++i) {
519 if (from == to->in_bb [i]) {
525 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
526 for (i = 0; i < to->in_count; ++i) {
527 newa [i] = to->in_bb [i];
536 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
538 link_bblock (cfg, from, to);
542 * mono_find_block_region:
544 * We mark each basic block with a region ID. We use that to avoid BB
545 * optimizations when blocks are in different regions.
548 * A region token that encodes where this region is, and information
549 * about the clause owner for this block.
551 * The region encodes the try/catch/filter clause that owns this block
552 * as well as the type. -1 is a special value that represents a block
553 * that is in none of try/catch/filter.
556 mono_find_block_region (MonoCompile *cfg, int offset)
558 MonoMethodHeader *header = cfg->header;
559 MonoExceptionClause *clause;
562 for (i = 0; i < header->num_clauses; ++i) {
563 clause = &header->clauses [i];
564 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
565 (offset < (clause->handler_offset)))
566 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
568 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
569 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
570 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
571 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
572 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
574 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
577 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
578 return ((i + 1) << 8) | clause->flags;
585 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
587 MonoMethodHeader *header = cfg->header;
588 MonoExceptionClause *clause;
592 for (i = 0; i < header->num_clauses; ++i) {
593 clause = &header->clauses [i];
594 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
595 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
596 if (clause->flags == type)
597 res = g_list_append (res, clause);
604 mono_create_spvar_for_region (MonoCompile *cfg, int region)
608 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
612 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
613 /* prevent it from being register allocated */
614 var->flags |= MONO_INST_INDIRECT;
616 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
620 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
622 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
626 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
630 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
634 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
635 /* prevent it from being register allocated */
636 var->flags |= MONO_INST_INDIRECT;
638 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
644 * Returns the type used in the eval stack when @type is loaded.
645 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
648 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
652 inst->klass = klass = mono_class_from_mono_type (type);
654 inst->type = STACK_MP;
659 switch (type->type) {
661 inst->type = STACK_INV;
665 case MONO_TYPE_BOOLEAN:
671 inst->type = STACK_I4;
676 case MONO_TYPE_FNPTR:
677 inst->type = STACK_PTR;
679 case MONO_TYPE_CLASS:
680 case MONO_TYPE_STRING:
681 case MONO_TYPE_OBJECT:
682 case MONO_TYPE_SZARRAY:
683 case MONO_TYPE_ARRAY:
684 inst->type = STACK_OBJ;
688 inst->type = STACK_I8;
692 inst->type = STACK_R8;
694 case MONO_TYPE_VALUETYPE:
695 if (type->data.klass->enumtype) {
696 type = mono_class_enum_basetype (type->data.klass);
700 inst->type = STACK_VTYPE;
703 case MONO_TYPE_TYPEDBYREF:
704 inst->klass = mono_defaults.typed_reference_class;
705 inst->type = STACK_VTYPE;
707 case MONO_TYPE_GENERICINST:
708 type = &type->data.generic_class->container_class->byval_arg;
712 g_assert (cfg->generic_sharing_context);
713 if (mini_is_gsharedvt_type (cfg, type)) {
714 g_assert (cfg->gsharedvt);
715 inst->type = STACK_VTYPE;
717 inst->type = STACK_OBJ;
721 g_error ("unknown type 0x%02x in eval stack type", type->type);
726 * The following tables are used to quickly validate the IL code in type_from_op ().
729 bin_num_table [STACK_MAX] [STACK_MAX] = {
730 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
731 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
732 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
733 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
734 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
735 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
742 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
745 /* reduce the size of this table */
747 bin_int_table [STACK_MAX] [STACK_MAX] = {
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
749 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
750 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
751 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
752 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
759 bin_comp_table [STACK_MAX] [STACK_MAX] = {
760 /* Inv i L p F & O vt */
762 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
763 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
764 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
765 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
766 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
767 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
768 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
771 /* reduce the size of this table */
773 shift_table [STACK_MAX] [STACK_MAX] = {
774 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
775 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
776 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
777 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
778 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
785 * Tables to map from the non-specific opcode to the matching
786 * type-specific opcode.
788 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
790 binops_op_map [STACK_MAX] = {
791 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
794 /* handles from CEE_NEG to CEE_CONV_U8 */
796 unops_op_map [STACK_MAX] = {
797 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
800 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
802 ovfops_op_map [STACK_MAX] = {
803 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
806 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
808 ovf2ops_op_map [STACK_MAX] = {
809 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
812 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
814 ovf3ops_op_map [STACK_MAX] = {
815 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
818 /* handles from CEE_BEQ to CEE_BLT_UN */
820 beqops_op_map [STACK_MAX] = {
821 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
824 /* handles from CEE_CEQ to CEE_CLT_UN */
826 ceqops_op_map [STACK_MAX] = {
827 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
831 * Sets ins->type (the type on the eval stack) according to the
832 * type of the opcode and the arguments to it.
833 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
835 * FIXME: this function sets ins->type unconditionally in some cases, but
836 * it should set it to invalid for some types (a conv.x on an object)
839 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
841 switch (ins->opcode) {
848 /* FIXME: check unverifiable args for STACK_MP */
849 ins->type = bin_num_table [src1->type] [src2->type];
850 ins->opcode += binops_op_map [ins->type];
857 ins->type = bin_int_table [src1->type] [src2->type];
858 ins->opcode += binops_op_map [ins->type];
863 ins->type = shift_table [src1->type] [src2->type];
864 ins->opcode += binops_op_map [ins->type];
869 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
870 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
871 ins->opcode = OP_LCOMPARE;
872 else if (src1->type == STACK_R8)
873 ins->opcode = OP_FCOMPARE;
875 ins->opcode = OP_ICOMPARE;
877 case OP_ICOMPARE_IMM:
878 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
879 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
880 ins->opcode = OP_LCOMPARE_IMM;
892 ins->opcode += beqops_op_map [src1->type];
895 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
896 ins->opcode += ceqops_op_map [src1->type];
902 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
903 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = neg_table [src1->type];
908 ins->opcode += unops_op_map [ins->type];
911 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
912 ins->type = src1->type;
914 ins->type = STACK_INV;
915 ins->opcode += unops_op_map [ins->type];
921 ins->type = STACK_I4;
922 ins->opcode += unops_op_map [src1->type];
925 ins->type = STACK_R8;
926 switch (src1->type) {
929 ins->opcode = OP_ICONV_TO_R_UN;
932 ins->opcode = OP_LCONV_TO_R_UN;
936 case CEE_CONV_OVF_I1:
937 case CEE_CONV_OVF_U1:
938 case CEE_CONV_OVF_I2:
939 case CEE_CONV_OVF_U2:
940 case CEE_CONV_OVF_I4:
941 case CEE_CONV_OVF_U4:
942 ins->type = STACK_I4;
943 ins->opcode += ovf3ops_op_map [src1->type];
945 case CEE_CONV_OVF_I_UN:
946 case CEE_CONV_OVF_U_UN:
947 ins->type = STACK_PTR;
948 ins->opcode += ovf2ops_op_map [src1->type];
950 case CEE_CONV_OVF_I1_UN:
951 case CEE_CONV_OVF_I2_UN:
952 case CEE_CONV_OVF_I4_UN:
953 case CEE_CONV_OVF_U1_UN:
954 case CEE_CONV_OVF_U2_UN:
955 case CEE_CONV_OVF_U4_UN:
956 ins->type = STACK_I4;
957 ins->opcode += ovf2ops_op_map [src1->type];
960 ins->type = STACK_PTR;
961 switch (src1->type) {
963 ins->opcode = OP_ICONV_TO_U;
967 #if SIZEOF_VOID_P == 8
968 ins->opcode = OP_LCONV_TO_U;
970 ins->opcode = OP_MOVE;
974 ins->opcode = OP_LCONV_TO_U;
977 ins->opcode = OP_FCONV_TO_U;
983 ins->type = STACK_I8;
984 ins->opcode += unops_op_map [src1->type];
986 case CEE_CONV_OVF_I8:
987 case CEE_CONV_OVF_U8:
988 ins->type = STACK_I8;
989 ins->opcode += ovf3ops_op_map [src1->type];
991 case CEE_CONV_OVF_U8_UN:
992 case CEE_CONV_OVF_I8_UN:
993 ins->type = STACK_I8;
994 ins->opcode += ovf2ops_op_map [src1->type];
998 ins->type = STACK_R8;
999 ins->opcode += unops_op_map [src1->type];
1002 ins->type = STACK_R8;
1006 ins->type = STACK_I4;
1007 ins->opcode += ovfops_op_map [src1->type];
1010 case CEE_CONV_OVF_I:
1011 case CEE_CONV_OVF_U:
1012 ins->type = STACK_PTR;
1013 ins->opcode += ovfops_op_map [src1->type];
1016 case CEE_ADD_OVF_UN:
1018 case CEE_MUL_OVF_UN:
1020 case CEE_SUB_OVF_UN:
1021 ins->type = bin_num_table [src1->type] [src2->type];
1022 ins->opcode += ovfops_op_map [src1->type];
1023 if (ins->type == STACK_R8)
1024 ins->type = STACK_INV;
1026 case OP_LOAD_MEMBASE:
1027 ins->type = STACK_PTR;
1029 case OP_LOADI1_MEMBASE:
1030 case OP_LOADU1_MEMBASE:
1031 case OP_LOADI2_MEMBASE:
1032 case OP_LOADU2_MEMBASE:
1033 case OP_LOADI4_MEMBASE:
1034 case OP_LOADU4_MEMBASE:
1035 ins->type = STACK_PTR;
1037 case OP_LOADI8_MEMBASE:
1038 ins->type = STACK_I8;
1040 case OP_LOADR4_MEMBASE:
1041 case OP_LOADR8_MEMBASE:
1042 ins->type = STACK_R8;
1045 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1049 if (ins->type == STACK_MP)
1050 ins->klass = mono_defaults.object_class;
1055 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1061 param_table [STACK_MAX] [STACK_MAX] = {
1066 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1070 switch (args->type) {
1080 for (i = 0; i < sig->param_count; ++i) {
1081 switch (args [i].type) {
1085 if (!sig->params [i]->byref)
1089 if (sig->params [i]->byref)
1091 switch (sig->params [i]->type) {
1092 case MONO_TYPE_CLASS:
1093 case MONO_TYPE_STRING:
1094 case MONO_TYPE_OBJECT:
1095 case MONO_TYPE_SZARRAY:
1096 case MONO_TYPE_ARRAY:
1103 if (sig->params [i]->byref)
1105 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1114 /*if (!param_table [args [i].type] [sig->params [i]->type])
1122 * When we need a pointer to the current domain many times in a method, we
1123 * call mono_domain_get() once and we store the result in a local variable.
1124 * This function returns the variable that represents the MonoDomain*.
1126 inline static MonoInst *
1127 mono_get_domainvar (MonoCompile *cfg)
1129 if (!cfg->domainvar)
1130 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1131 return cfg->domainvar;
1135 * The got_var contains the address of the Global Offset Table when AOT
1139 mono_get_got_var (MonoCompile *cfg)
1141 #ifdef MONO_ARCH_NEED_GOT_VAR
1142 if (!cfg->compile_aot)
1144 if (!cfg->got_var) {
1145 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1147 return cfg->got_var;
1154 mono_get_vtable_var (MonoCompile *cfg)
1156 g_assert (cfg->generic_sharing_context);
1158 if (!cfg->rgctx_var) {
1159 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1160 /* force the var to be stack allocated */
1161 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1164 return cfg->rgctx_var;
1168 type_from_stack_type (MonoInst *ins) {
1169 switch (ins->type) {
1170 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1171 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1172 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1173 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1175 return &ins->klass->this_arg;
1176 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1177 case STACK_VTYPE: return &ins->klass->byval_arg;
1179 g_error ("stack type %d to monotype not handled\n", ins->type);
1184 static G_GNUC_UNUSED int
1185 type_to_stack_type (MonoType *t)
1187 t = mono_type_get_underlying_type (t);
1191 case MONO_TYPE_BOOLEAN:
1194 case MONO_TYPE_CHAR:
1201 case MONO_TYPE_FNPTR:
1203 case MONO_TYPE_CLASS:
1204 case MONO_TYPE_STRING:
1205 case MONO_TYPE_OBJECT:
1206 case MONO_TYPE_SZARRAY:
1207 case MONO_TYPE_ARRAY:
1215 case MONO_TYPE_VALUETYPE:
1216 case MONO_TYPE_TYPEDBYREF:
1218 case MONO_TYPE_GENERICINST:
1219 if (mono_type_generic_inst_is_valuetype (t))
1225 g_assert_not_reached ();
1232 array_access_to_klass (int opcode)
1236 return mono_defaults.byte_class;
1238 return mono_defaults.uint16_class;
1241 return mono_defaults.int_class;
1244 return mono_defaults.sbyte_class;
1247 return mono_defaults.int16_class;
1250 return mono_defaults.int32_class;
1252 return mono_defaults.uint32_class;
1255 return mono_defaults.int64_class;
1258 return mono_defaults.single_class;
1261 return mono_defaults.double_class;
1262 case CEE_LDELEM_REF:
1263 case CEE_STELEM_REF:
1264 return mono_defaults.object_class;
1266 g_assert_not_reached ();
1272 * We try to share variables when possible
1275 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1280 /* inlining can result in deeper stacks */
1281 if (slot >= cfg->header->max_stack)
1282 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1284 pos = ins->type - 1 + slot * STACK_MAX;
1286 switch (ins->type) {
1293 if ((vnum = cfg->intvars [pos]))
1294 return cfg->varinfo [vnum];
1295 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1296 cfg->intvars [pos] = res->inst_c0;
1299 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1305 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1308 * Don't use this if a generic_context is set, since that means AOT can't
1309 * look up the method using just the image+token.
1310 * table == 0 means this is a reference made from a wrapper.
1312 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1313 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1314 jump_info_token->image = image;
1315 jump_info_token->token = token;
1316 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1321 * This function is called to handle items that are left on the evaluation stack
1322 * at basic block boundaries. What happens is that we save the values to local variables
1323 * and we reload them later when first entering the target basic block (with the
1324 * handle_loaded_temps () function).
1325 * A single joint point will use the same variables (stored in the array bb->out_stack or
1326 * bb->in_stack, if the basic block is before or after the joint point).
1328 * This function needs to be called _before_ emitting the last instruction of
1329 * the bb (i.e. before emitting a branch).
1330 * If the stack merge fails at a join point, cfg->unverifiable is set.
1333 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1336 MonoBasicBlock *bb = cfg->cbb;
1337 MonoBasicBlock *outb;
1338 MonoInst *inst, **locals;
1343 if (cfg->verbose_level > 3)
1344 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1345 if (!bb->out_scount) {
1346 bb->out_scount = count;
1347 //printf ("bblock %d has out:", bb->block_num);
1349 for (i = 0; i < bb->out_count; ++i) {
1350 outb = bb->out_bb [i];
1351 /* exception handlers are linked, but they should not be considered for stack args */
1352 if (outb->flags & BB_EXCEPTION_HANDLER)
1354 //printf (" %d", outb->block_num);
1355 if (outb->in_stack) {
1357 bb->out_stack = outb->in_stack;
1363 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1364 for (i = 0; i < count; ++i) {
1366 * try to reuse temps already allocated for this purpouse, if they occupy the same
1367 * stack slot and if they are of the same type.
1368 * This won't cause conflicts since if 'local' is used to
1369 * store one of the values in the in_stack of a bblock, then
1370 * the same variable will be used for the same outgoing stack
1372 * This doesn't work when inlining methods, since the bblocks
1373 * in the inlined methods do not inherit their in_stack from
1374 * the bblock they are inlined to. See bug #58863 for an
1377 if (cfg->inlined_method)
1378 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1380 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1385 for (i = 0; i < bb->out_count; ++i) {
1386 outb = bb->out_bb [i];
1387 /* exception handlers are linked, but they should not be considered for stack args */
1388 if (outb->flags & BB_EXCEPTION_HANDLER)
1390 if (outb->in_scount) {
1391 if (outb->in_scount != bb->out_scount) {
1392 cfg->unverifiable = TRUE;
1395 continue; /* check they are the same locals */
1397 outb->in_scount = count;
1398 outb->in_stack = bb->out_stack;
1401 locals = bb->out_stack;
1403 for (i = 0; i < count; ++i) {
1404 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1405 inst->cil_code = sp [i]->cil_code;
1406 sp [i] = locals [i];
1407 if (cfg->verbose_level > 3)
1408 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1412 * It is possible that the out bblocks already have in_stack assigned, and
1413 * the in_stacks differ. In this case, we will store to all the different
1420 /* Find a bblock which has a different in_stack */
1422 while (bindex < bb->out_count) {
1423 outb = bb->out_bb [bindex];
1424 /* exception handlers are linked, but they should not be considered for stack args */
1425 if (outb->flags & BB_EXCEPTION_HANDLER) {
1429 if (outb->in_stack != locals) {
1430 for (i = 0; i < count; ++i) {
1431 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1432 inst->cil_code = sp [i]->cil_code;
1433 sp [i] = locals [i];
1434 if (cfg->verbose_level > 3)
1435 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1437 locals = outb->in_stack;
1446 /* Emit code which loads interface_offsets [klass->interface_id]
1447 * The array is stored in memory before vtable.
1450 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1452 if (cfg->compile_aot) {
1453 int ioffset_reg = alloc_preg (cfg);
1454 int iid_reg = alloc_preg (cfg);
1456 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1457 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1458 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1461 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1466 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1468 int ibitmap_reg = alloc_preg (cfg);
1469 #ifdef COMPRESSED_INTERFACE_BITMAP
1471 MonoInst *res, *ins;
1472 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1473 MONO_ADD_INS (cfg->cbb, ins);
1475 if (cfg->compile_aot)
1476 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1478 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1479 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1480 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1482 int ibitmap_byte_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1486 if (cfg->compile_aot) {
1487 int iid_reg = alloc_preg (cfg);
1488 int shifted_iid_reg = alloc_preg (cfg);
1489 int ibitmap_byte_address_reg = alloc_preg (cfg);
1490 int masked_iid_reg = alloc_preg (cfg);
1491 int iid_one_bit_reg = alloc_preg (cfg);
1492 int iid_bit_reg = alloc_preg (cfg);
1493 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1494 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1495 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1498 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1499 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1509 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1510 * stored in "klass_reg" implements the interface "klass".
1513 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1515 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1519 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1520 * stored in "vtable_reg" implements the interface "klass".
1523 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1525 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1529 * Emit code which checks whenever the interface id of @klass is smaller than
1530 * than the value given by max_iid_reg.
1533 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1534 MonoBasicBlock *false_target)
1536 if (cfg->compile_aot) {
1537 int iid_reg = alloc_preg (cfg);
1538 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1539 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1546 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1549 /* Same as above, but obtains max_iid from a vtable */
1551 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1552 MonoBasicBlock *false_target)
1554 int max_iid_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1557 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1560 /* Same as above, but obtains max_iid from a klass */
1562 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1563 MonoBasicBlock *false_target)
1565 int max_iid_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1568 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1572 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1574 int idepth_reg = alloc_preg (cfg);
1575 int stypes_reg = alloc_preg (cfg);
1576 int stype = alloc_preg (cfg);
1578 mono_class_setup_supertypes (klass);
1580 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1583 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1585 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1588 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1589 } else if (cfg->compile_aot) {
1590 int const_reg = alloc_preg (cfg);
1591 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1592 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1596 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1600 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1602 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1606 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1608 int intf_reg = alloc_preg (cfg);
1610 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1611 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1616 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1620 * Variant of the above that takes a register to the class, not the vtable.
1623 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1625 int intf_bit_reg = alloc_preg (cfg);
1627 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1628 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1633 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1637 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1640 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1641 } else if (cfg->compile_aot) {
1642 int const_reg = alloc_preg (cfg);
1643 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1644 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1652 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1654 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1658 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1660 if (cfg->compile_aot) {
1661 int const_reg = alloc_preg (cfg);
1662 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1663 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1665 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1667 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1671 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1674 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1677 int rank_reg = alloc_preg (cfg);
1678 int eclass_reg = alloc_preg (cfg);
1680 g_assert (!klass_inst);
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1683 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1684 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1686 if (klass->cast_class == mono_defaults.object_class) {
1687 int parent_reg = alloc_preg (cfg);
1688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1689 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1690 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1691 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1692 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1693 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1694 } else if (klass->cast_class == mono_defaults.enum_class) {
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1697 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1699 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1700 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1703 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1704 /* Check that the object is a vector too */
1705 int bounds_reg = alloc_preg (cfg);
1706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1707 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1708 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1711 int idepth_reg = alloc_preg (cfg);
1712 int stypes_reg = alloc_preg (cfg);
1713 int stype = alloc_preg (cfg);
1715 mono_class_setup_supertypes (klass);
1717 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1720 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1724 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1729 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1731 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1735 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1739 g_assert (val == 0);
1744 if ((size <= 4) && (size <= align)) {
1747 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1750 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1753 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1755 #if SIZEOF_REGISTER == 8
1757 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1763 val_reg = alloc_preg (cfg);
1765 if (SIZEOF_REGISTER == 8)
1766 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1768 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1771 /* This could be optimized further if neccesary */
1773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1780 #if !NO_UNALIGNED_ACCESS
1781 if (SIZEOF_REGISTER == 8) {
1783 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1796 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1813 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1820 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1821 g_assert (size < 10000);
1824 /* This could be optimized further if neccesary */
1826 cur_reg = alloc_preg (cfg);
1827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1835 #if !NO_UNALIGNED_ACCESS
1836 if (SIZEOF_REGISTER == 8) {
1838 cur_reg = alloc_preg (cfg);
1839 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1849 cur_reg = alloc_preg (cfg);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1857 cur_reg = alloc_preg (cfg);
1858 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1865 cur_reg = alloc_preg (cfg);
1866 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1867 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1875 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1878 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1881 type = mini_get_basic_type_from_generic (gsctx, type);
1882 switch (type->type) {
1883 case MONO_TYPE_VOID:
1884 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1887 case MONO_TYPE_BOOLEAN:
1890 case MONO_TYPE_CHAR:
1893 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1897 case MONO_TYPE_FNPTR:
1898 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1899 case MONO_TYPE_CLASS:
1900 case MONO_TYPE_STRING:
1901 case MONO_TYPE_OBJECT:
1902 case MONO_TYPE_SZARRAY:
1903 case MONO_TYPE_ARRAY:
1904 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1907 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1910 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1911 case MONO_TYPE_VALUETYPE:
1912 if (type->data.klass->enumtype) {
1913 type = mono_class_enum_basetype (type->data.klass);
1916 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1917 case MONO_TYPE_TYPEDBYREF:
1918 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1919 case MONO_TYPE_GENERICINST:
1920 type = &type->data.generic_class->container_class->byval_arg;
1923 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1929 * target_type_is_incompatible:
1930 * @cfg: MonoCompile context
1932 * Check that the item @arg on the evaluation stack can be stored
1933 * in the target type (can be a local, or field, etc).
1934 * The cfg arg can be used to check if we need verification or just
1937 * Returns: non-0 value if arg can't be stored on a target.
1940 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1942 MonoType *simple_type;
1945 if (target->byref) {
1946 /* FIXME: check that the pointed to types match */
1947 if (arg->type == STACK_MP)
1948 return arg->klass != mono_class_from_mono_type (target);
1949 if (arg->type == STACK_PTR)
1954 simple_type = mono_type_get_underlying_type (target);
1955 switch (simple_type->type) {
1956 case MONO_TYPE_VOID:
1960 case MONO_TYPE_BOOLEAN:
1963 case MONO_TYPE_CHAR:
1966 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1970 /* STACK_MP is needed when setting pinned locals */
1971 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1976 case MONO_TYPE_FNPTR:
1978 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1979 * in native int. (#688008).
1981 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1984 case MONO_TYPE_CLASS:
1985 case MONO_TYPE_STRING:
1986 case MONO_TYPE_OBJECT:
1987 case MONO_TYPE_SZARRAY:
1988 case MONO_TYPE_ARRAY:
1989 if (arg->type != STACK_OBJ)
1991 /* FIXME: check type compatibility */
1995 if (arg->type != STACK_I8)
2000 if (arg->type != STACK_R8)
2003 case MONO_TYPE_VALUETYPE:
2004 if (arg->type != STACK_VTYPE)
2006 klass = mono_class_from_mono_type (simple_type);
2007 if (klass != arg->klass)
2010 case MONO_TYPE_TYPEDBYREF:
2011 if (arg->type != STACK_VTYPE)
2013 klass = mono_class_from_mono_type (simple_type);
2014 if (klass != arg->klass)
2017 case MONO_TYPE_GENERICINST:
2018 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2019 if (arg->type != STACK_VTYPE)
2021 klass = mono_class_from_mono_type (simple_type);
2022 if (klass != arg->klass)
2026 if (arg->type != STACK_OBJ)
2028 /* FIXME: check type compatibility */
2032 case MONO_TYPE_MVAR:
2033 g_assert (cfg->generic_sharing_context);
2034 if (mini_type_var_is_vt (cfg, simple_type)) {
2035 if (arg->type != STACK_VTYPE)
2038 if (arg->type != STACK_OBJ)
2043 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2049 * Prepare arguments for passing to a function call.
2050 * Return a non-zero value if the arguments can't be passed to the given
2052 * The type checks are not yet complete and some conversions may need
2053 * casts on 32 or 64 bit architectures.
2055 * FIXME: implement this using target_type_is_incompatible ()
2058 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2060 MonoType *simple_type;
2064 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2068 for (i = 0; i < sig->param_count; ++i) {
2069 if (sig->params [i]->byref) {
2070 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2074 simple_type = sig->params [i];
2075 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2077 switch (simple_type->type) {
2078 case MONO_TYPE_VOID:
2083 case MONO_TYPE_BOOLEAN:
2086 case MONO_TYPE_CHAR:
2089 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2095 case MONO_TYPE_FNPTR:
2096 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2099 case MONO_TYPE_CLASS:
2100 case MONO_TYPE_STRING:
2101 case MONO_TYPE_OBJECT:
2102 case MONO_TYPE_SZARRAY:
2103 case MONO_TYPE_ARRAY:
2104 if (args [i]->type != STACK_OBJ)
2109 if (args [i]->type != STACK_I8)
2114 if (args [i]->type != STACK_R8)
2117 case MONO_TYPE_VALUETYPE:
2118 if (simple_type->data.klass->enumtype) {
2119 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2122 if (args [i]->type != STACK_VTYPE)
2125 case MONO_TYPE_TYPEDBYREF:
2126 if (args [i]->type != STACK_VTYPE)
2129 case MONO_TYPE_GENERICINST:
2130 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2134 g_error ("unknown type 0x%02x in check_call_signature",
2142 callvirt_to_call (int opcode)
2147 case OP_VOIDCALLVIRT:
2156 g_assert_not_reached ();
2163 callvirt_to_call_membase (int opcode)
2167 return OP_CALL_MEMBASE;
2168 case OP_VOIDCALLVIRT:
2169 return OP_VOIDCALL_MEMBASE;
2171 return OP_FCALL_MEMBASE;
2173 return OP_LCALL_MEMBASE;
2175 return OP_VCALL_MEMBASE;
2177 g_assert_not_reached ();
2183 #ifdef MONO_ARCH_HAVE_IMT
2185 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2189 if (COMPILE_LLVM (cfg)) {
2190 method_reg = alloc_preg (cfg);
2193 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2194 } else if (cfg->compile_aot) {
2195 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2198 MONO_INST_NEW (cfg, ins, OP_PCONST);
2199 ins->inst_p0 = method;
2200 ins->dreg = method_reg;
2201 MONO_ADD_INS (cfg->cbb, ins);
2205 call->imt_arg_reg = method_reg;
2207 #ifdef MONO_ARCH_IMT_REG
2208 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2210 /* Need this to keep the IMT arg alive */
2211 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2216 #ifdef MONO_ARCH_IMT_REG
2217 method_reg = alloc_preg (cfg);
2220 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2221 } else if (cfg->compile_aot) {
2222 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2225 MONO_INST_NEW (cfg, ins, OP_PCONST);
2226 ins->inst_p0 = method;
2227 ins->dreg = method_reg;
2228 MONO_ADD_INS (cfg->cbb, ins);
2231 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2233 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2238 static MonoJumpInfo *
2239 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2241 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2245 ji->data.target = target;
2250 inline static MonoCallInst *
2251 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2252 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2255 #ifdef MONO_ARCH_SOFT_FLOAT
2260 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2262 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2265 call->signature = sig;
2266 call->rgctx_reg = rgctx;
2268 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2271 if (mini_type_is_vtype (cfg, sig->ret)) {
2272 call->vret_var = cfg->vret_addr;
2273 //g_assert_not_reached ();
2275 } else if (mini_type_is_vtype (cfg, sig->ret)) {
2276 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2279 temp->backend.is_pinvoke = sig->pinvoke;
2282 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2283 * address of return value to increase optimization opportunities.
2284 * Before vtype decomposition, the dreg of the call ins itself represents the
2285 * fact the call modifies the return value. After decomposition, the call will
2286 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2287 * will be transformed into an LDADDR.
2289 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2290 loada->dreg = alloc_preg (cfg);
2291 loada->inst_p0 = temp;
2292 /* We reference the call too since call->dreg could change during optimization */
2293 loada->inst_p1 = call;
2294 MONO_ADD_INS (cfg->cbb, loada);
2296 call->inst.dreg = temp->dreg;
2298 call->vret_var = loada;
2299 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2300 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2302 #ifdef MONO_ARCH_SOFT_FLOAT
2303 if (COMPILE_SOFT_FLOAT (cfg)) {
2305 * If the call has a float argument, we would need to do an r8->r4 conversion using
2306 * an icall, but that cannot be done during the call sequence since it would clobber
2307 * the call registers + the stack. So we do it before emitting the call.
2309 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2311 MonoInst *in = call->args [i];
2313 if (i >= sig->hasthis)
2314 t = sig->params [i - sig->hasthis];
2316 t = &mono_defaults.int_class->byval_arg;
2317 t = mono_type_get_underlying_type (t);
2319 if (!t->byref && t->type == MONO_TYPE_R4) {
2320 MonoInst *iargs [1];
2324 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2326 /* The result will be in an int vreg */
2327 call->args [i] = conv;
2333 call->need_unbox_trampoline = unbox_trampoline;
2336 if (COMPILE_LLVM (cfg))
2337 mono_llvm_emit_call (cfg, call);
2339 mono_arch_emit_call (cfg, call);
2341 mono_arch_emit_call (cfg, call);
2344 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2345 cfg->flags |= MONO_CFG_HAS_CALLS;
2351 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2353 #ifdef MONO_ARCH_RGCTX_REG
2354 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2355 cfg->uses_rgctx_reg = TRUE;
2356 call->rgctx_reg = TRUE;
2358 call->rgctx_arg_reg = rgctx_reg;
2365 inline static MonoInst*
2366 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2372 rgctx_reg = mono_alloc_preg (cfg);
2373 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2376 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2378 call->inst.sreg1 = addr->dreg;
2380 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2383 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2385 return (MonoInst*)call;
2388 /* This is like calli, but we pass rgctx/imt arguments as well */
2390 emit_gsharedvt_call (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoMethod *method, MonoInst *imt_arg, MonoInst *rgctx_arg)
2396 rgctx_reg = mono_alloc_preg (cfg);
2397 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2400 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2402 call->inst.sreg1 = addr->dreg;
2405 emit_imt_argument (cfg, call, method, imt_arg);
2407 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2410 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2412 return (MonoInst*)call;
2416 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2418 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2421 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2422 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2424 gboolean might_be_remote;
2425 gboolean virtual = this != NULL;
2426 gboolean enable_for_aot = TRUE;
2430 gboolean need_unbox_trampoline;
2433 rgctx_reg = mono_alloc_preg (cfg);
2434 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2437 if (method->string_ctor) {
2438 /* Create the real signature */
2439 /* FIXME: Cache these */
2440 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2441 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2446 context_used = mono_method_check_context_used (method);
2448 might_be_remote = this && sig->hasthis &&
2449 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2450 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2452 if (might_be_remote && context_used) {
2455 g_assert (cfg->generic_sharing_context);
2457 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2459 return mono_emit_calli (cfg, sig, args, addr, NULL);
2462 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2464 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2466 if (might_be_remote)
2467 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2469 call->method = method;
2470 call->inst.flags |= MONO_INST_HAS_METHOD;
2471 call->inst.inst_left = this;
2474 int vtable_reg, slot_reg, this_reg;
2476 this_reg = this->dreg;
2478 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2479 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2480 MonoInst *dummy_use;
2482 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2484 /* Make a call to delegate->invoke_impl */
2485 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2486 call->inst.inst_basereg = this_reg;
2487 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2488 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2490 /* We must emit a dummy use here because the delegate trampoline will
2491 replace the 'this' argument with the delegate target making this activation
2492 no longer a root for the delegate.
2493 This is an issue for delegates that target collectible code such as dynamic
2494 methods of GC'able assemblies.
2496 For a test case look into #667921.
2498 FIXME: a dummy use is not the best way to do it as the local register allocator
2499 will put it on a caller save register and spil it around the call.
2500 Ideally, we would either put it on a callee save register or only do the store part.
2502 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2504 return (MonoInst*)call;
2508 if ((!cfg->compile_aot || enable_for_aot) &&
2509 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2510 (MONO_METHOD_IS_FINAL (method) &&
2511 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2512 !(method->klass->marshalbyref && context_used)) {
2514 * the method is not virtual, we just need to ensure this is not null
2515 * and then we can call the method directly.
2517 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2519 * The check above ensures method is not gshared, this is needed since
2520 * gshared methods can't have wrappers.
2522 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2525 if (!method->string_ctor)
2526 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2528 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2529 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2531 * the method is virtual, but we can statically dispatch since either
2532 * it's class or the method itself are sealed.
2533 * But first we need to ensure it's not a null reference.
2535 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2537 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2539 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2541 vtable_reg = alloc_preg (cfg);
2542 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2543 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2545 #ifdef MONO_ARCH_HAVE_IMT
2547 guint32 imt_slot = mono_method_get_imt_slot (method);
2548 emit_imt_argument (cfg, call, call->method, imt_arg);
2549 slot_reg = vtable_reg;
2550 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2553 if (slot_reg == -1) {
2554 slot_reg = alloc_preg (cfg);
2555 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2556 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2559 slot_reg = vtable_reg;
2560 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2561 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2562 #ifdef MONO_ARCH_HAVE_IMT
2564 g_assert (mono_method_signature (method)->generic_param_count);
2565 emit_imt_argument (cfg, call, call->method, imt_arg);
2570 call->inst.sreg1 = slot_reg;
2571 call->virtual = TRUE;
2575 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2578 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2580 return (MonoInst*)call;
2584 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2586 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2590 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2597 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2600 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2602 return (MonoInst*)call;
2606 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2608 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2612 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2616 * mono_emit_abs_call:
2618 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2620 inline static MonoInst*
2621 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2622 MonoMethodSignature *sig, MonoInst **args)
2624 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2628 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2631 if (cfg->abs_patches == NULL)
2632 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2633 g_hash_table_insert (cfg->abs_patches, ji, ji);
2634 ins = mono_emit_native_call (cfg, ji, sig, args);
2635 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2640 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2642 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2643 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2647 * Native code might return non register sized integers
2648 * without initializing the upper bits.
2650 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2651 case OP_LOADI1_MEMBASE:
2652 widen_op = OP_ICONV_TO_I1;
2654 case OP_LOADU1_MEMBASE:
2655 widen_op = OP_ICONV_TO_U1;
2657 case OP_LOADI2_MEMBASE:
2658 widen_op = OP_ICONV_TO_I2;
2660 case OP_LOADU2_MEMBASE:
2661 widen_op = OP_ICONV_TO_U2;
2667 if (widen_op != -1) {
2668 int dreg = alloc_preg (cfg);
2671 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2672 widen->type = ins->type;
2682 get_memcpy_method (void)
2684 static MonoMethod *memcpy_method = NULL;
2685 if (!memcpy_method) {
2686 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2688 g_error ("Old corlib found. Install a new one");
2690 return memcpy_method;
2694 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2696 MonoClassField *field;
2697 gpointer iter = NULL;
2699 while ((field = mono_class_get_fields (klass, &iter))) {
2702 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2704 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2705 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2706 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2707 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2709 MonoClass *field_class = mono_class_from_mono_type (field->type);
2710 if (field_class->has_references)
2711 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2717 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2719 int card_table_shift_bits;
2720 gpointer card_table_mask;
2722 MonoInst *dummy_use;
2723 int nursery_shift_bits;
2724 size_t nursery_size;
2725 gboolean has_card_table_wb = FALSE;
2727 if (!cfg->gen_write_barriers)
2730 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2732 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2734 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2735 has_card_table_wb = TRUE;
2738 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2741 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2742 wbarrier->sreg1 = ptr->dreg;
2744 wbarrier->sreg2 = value->dreg;
2746 wbarrier->sreg2 = value_reg;
2747 MONO_ADD_INS (cfg->cbb, wbarrier);
2748 } else if (card_table) {
2749 int offset_reg = alloc_preg (cfg);
2750 int card_reg = alloc_preg (cfg);
2753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2754 if (card_table_mask)
2755 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2757 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2758 * IMM's larger than 32bits.
2760 if (cfg->compile_aot) {
2761 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2763 MONO_INST_NEW (cfg, ins, OP_PCONST);
2764 ins->inst_p0 = card_table;
2765 ins->dreg = card_reg;
2766 MONO_ADD_INS (cfg->cbb, ins);
2769 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2770 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2772 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2773 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2777 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2779 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2780 dummy_use->sreg1 = value_reg;
2781 MONO_ADD_INS (cfg->cbb, dummy_use);
2786 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2788 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2789 unsigned need_wb = 0;
2794 /*types with references can't have alignment smaller than sizeof(void*) */
2795 if (align < SIZEOF_VOID_P)
2798 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2799 if (size > 32 * SIZEOF_VOID_P)
2802 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2804 /* We don't unroll more than 5 stores to avoid code bloat. */
2805 if (size > 5 * SIZEOF_VOID_P) {
2806 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2807 size += (SIZEOF_VOID_P - 1);
2808 size &= ~(SIZEOF_VOID_P - 1);
2810 EMIT_NEW_ICONST (cfg, iargs [2], size);
2811 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2812 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2816 destreg = iargs [0]->dreg;
2817 srcreg = iargs [1]->dreg;
2820 dest_ptr_reg = alloc_preg (cfg);
2821 tmp_reg = alloc_preg (cfg);
2824 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2826 while (size >= SIZEOF_VOID_P) {
2827 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2831 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2833 offset += SIZEOF_VOID_P;
2834 size -= SIZEOF_VOID_P;
2837 /*tmp += sizeof (void*)*/
2838 if (size >= SIZEOF_VOID_P) {
2839 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2840 MONO_ADD_INS (cfg->cbb, iargs [0]);
2844 /* Those cannot be references since size < sizeof (void*) */
2846 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2853 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2860 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2870 * Emit code to copy a valuetype of type @klass whose address is stored in
2871 * @src->dreg to memory whose address is stored at @dest->dreg.
2874 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2876 MonoInst *iargs [4];
2877 int context_used, n;
2879 MonoMethod *memcpy_method;
2880 MonoInst *size_ins = NULL;
2884 * This check breaks with spilled vars... need to handle it during verification anyway.
2885 * g_assert (klass && klass == src->klass && klass == dest->klass);
2888 if (mini_is_gsharedvt_klass (cfg, klass)) {
2890 context_used = mono_class_check_context_used (klass);
2891 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2895 n = mono_class_native_size (klass, &align);
2897 n = mono_class_value_size (klass, &align);
2899 /* if native is true there should be no references in the struct */
2900 if (cfg->gen_write_barriers && klass->has_references && !native) {
2901 /* Avoid barriers when storing to the stack */
2902 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2903 (dest->opcode == OP_LDADDR))) {
2904 int context_used = 0;
2909 if (cfg->generic_sharing_context)
2910 context_used = mono_class_check_context_used (klass);
2912 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2913 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2915 } else if (context_used) {
2916 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2918 if (cfg->compile_aot) {
2919 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2921 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2922 mono_class_compute_gc_descriptor (klass);
2926 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2931 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2932 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2933 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2938 iargs [2] = size_ins;
2940 EMIT_NEW_ICONST (cfg, iargs [2], n);
2942 memcpy_method = get_memcpy_method ();
2943 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2948 get_memset_method (void)
2950 static MonoMethod *memset_method = NULL;
2951 if (!memset_method) {
2952 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2954 g_error ("Old corlib found. Install a new one");
2956 return memset_method;
2960 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2962 MonoInst *iargs [3];
2963 int n, context_used;
2965 MonoMethod *memset_method;
2966 MonoInst *size_ins = NULL;
2968 /* FIXME: Optimize this for the case when dest is an LDADDR */
2970 mono_class_init (klass);
2971 if (mini_is_gsharedvt_klass (cfg, klass)) {
2972 context_used = mono_class_check_context_used (klass);
2973 size_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_VALUE_SIZE);
2976 n = mono_class_value_size (klass, &align);
2979 if (!size_ins && n <= sizeof (gpointer) * 5) {
2980 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2983 memset_method = get_memset_method ();
2985 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2987 iargs [2] = size_ins;
2989 EMIT_NEW_ICONST (cfg, iargs [2], n);
2990 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2995 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2997 MonoInst *this = NULL;
2999 g_assert (cfg->generic_sharing_context);
3001 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3002 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3003 !method->klass->valuetype)
3004 EMIT_NEW_ARGLOAD (cfg, this, 0);
3006 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3007 MonoInst *mrgctx_loc, *mrgctx_var;
3010 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3012 mrgctx_loc = mono_get_vtable_var (cfg);
3013 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3016 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3017 MonoInst *vtable_loc, *vtable_var;
3021 vtable_loc = mono_get_vtable_var (cfg);
3022 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3024 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3025 MonoInst *mrgctx_var = vtable_var;
3028 vtable_reg = alloc_preg (cfg);
3029 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3030 vtable_var->type = STACK_PTR;
3038 vtable_reg = alloc_preg (cfg);
3039 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3044 static MonoJumpInfoRgctxEntry *
3045 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3047 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3048 res->method = method;
3049 res->in_mrgctx = in_mrgctx;
3050 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3051 res->data->type = patch_type;
3052 res->data->data.target = patch_data;
3053 res->info_type = info_type;
3058 static inline MonoInst*
3059 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3061 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3065 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3066 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3068 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3069 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3071 return emit_rgctx_fetch (cfg, rgctx, entry);
3075 * emit_get_rgctx_method:
3077 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3078 * normal constants, else emit a load from the rgctx.
3081 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3082 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3084 if (!context_used) {
3087 switch (rgctx_type) {
3088 case MONO_RGCTX_INFO_METHOD:
3089 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3091 case MONO_RGCTX_INFO_METHOD_RGCTX:
3092 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3095 g_assert_not_reached ();
3098 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3099 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3101 return emit_rgctx_fetch (cfg, rgctx, entry);
3106 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3107 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3109 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3110 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3112 return emit_rgctx_fetch (cfg, rgctx, entry);
3116 * On return the caller must check @klass for load errors.
3119 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3121 MonoInst *vtable_arg;
3123 int context_used = 0;
3125 if (cfg->generic_sharing_context)
3126 context_used = mono_class_check_context_used (klass);
3129 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3130 klass, MONO_RGCTX_INFO_VTABLE);
3132 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3136 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3139 if (COMPILE_LLVM (cfg))
3140 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3142 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3143 #ifdef MONO_ARCH_VTABLE_REG
3144 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3145 cfg->uses_vtable_reg = TRUE;
3152 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc)
3156 if (cfg->gen_seq_points && cfg->method == method) {
3157 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3158 MONO_ADD_INS (cfg->cbb, ins);
3163 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3165 if (mini_get_debug_options ()->better_cast_details) {
3166 int to_klass_reg = alloc_preg (cfg);
3167 int vtable_reg = alloc_preg (cfg);
3168 int klass_reg = alloc_preg (cfg);
3169 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3172 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3176 MONO_ADD_INS (cfg->cbb, tls_get);
3177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3181 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3182 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3187 reset_cast_details (MonoCompile *cfg)
3189 /* Reset the variables holding the cast details */
3190 if (mini_get_debug_options ()->better_cast_details) {
3191 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3193 MONO_ADD_INS (cfg->cbb, tls_get);
3194 /* It is enough to reset the from field */
3195 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3200 * On return the caller must check @array_class for load errors
3203 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3205 int vtable_reg = alloc_preg (cfg);
3206 int context_used = 0;
3208 if (cfg->generic_sharing_context)
3209 context_used = mono_class_check_context_used (array_class);
3211 save_cast_details (cfg, array_class, obj->dreg);
3213 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3215 if (cfg->opt & MONO_OPT_SHARED) {
3216 int class_reg = alloc_preg (cfg);
3217 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3218 if (cfg->compile_aot) {
3219 int klass_reg = alloc_preg (cfg);
3220 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3221 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3225 } else if (context_used) {
3226 MonoInst *vtable_ins;
3228 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3229 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3231 if (cfg->compile_aot) {
3235 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3237 vt_reg = alloc_preg (cfg);
3238 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3239 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3242 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3248 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3250 reset_cast_details (cfg);
3254 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3255 * generic code is generated.
3258 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3260 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3263 MonoInst *rgctx, *addr;
3265 /* FIXME: What if the class is shared? We might not
3266 have to get the address of the method from the
3268 addr = emit_get_rgctx_method (cfg, context_used, method,
3269 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3271 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3273 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3275 return mono_emit_method_call (cfg, method, &val, NULL);
3280 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3284 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3285 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3286 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3287 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3289 obj_reg = sp [0]->dreg;
3290 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3291 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3293 /* FIXME: generics */
3294 g_assert (klass->rank == 0);
3297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3298 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3300 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3301 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3304 MonoInst *element_class;
3306 /* This assertion is from the unboxcast insn */
3307 g_assert (klass->rank == 0);
3309 element_class = emit_get_rgctx_klass (cfg, context_used,
3310 klass->element_class, MONO_RGCTX_INFO_KLASS);
3312 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3313 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3315 save_cast_details (cfg, klass->element_class, obj_reg);
3316 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3317 reset_cast_details (cfg);
3320 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3321 MONO_ADD_INS (cfg->cbb, add);
3322 add->type = STACK_MP;
3329 * Returns NULL and set the cfg exception on error.
3332 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3334 MonoInst *iargs [2];
3340 MonoInst *iargs [2];
3343 FIXME: we cannot get managed_alloc here because we can't get
3344 the class's vtable (because it's not a closed class)
3346 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3347 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3350 if (cfg->opt & MONO_OPT_SHARED)
3351 rgctx_info = MONO_RGCTX_INFO_KLASS;
3353 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3354 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3356 if (cfg->opt & MONO_OPT_SHARED) {
3357 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3359 alloc_ftn = mono_object_new;
3362 alloc_ftn = mono_object_new_specific;
3365 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3368 if (cfg->opt & MONO_OPT_SHARED) {
3369 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3370 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3372 alloc_ftn = mono_object_new;
3373 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3374 /* This happens often in argument checking code, eg. throw new FooException... */
3375 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3376 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3377 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3379 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3380 MonoMethod *managed_alloc = NULL;
3384 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3385 cfg->exception_ptr = klass;
3389 #ifndef MONO_CROSS_COMPILE
3390 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3393 if (managed_alloc) {
3394 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3395 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3397 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3399 guint32 lw = vtable->klass->instance_size;
3400 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3401 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3402 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3405 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3409 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3413 * Returns NULL and set the cfg exception on error.
3416 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3418 MonoInst *alloc, *ins;
3420 if (mono_class_is_nullable (klass)) {
3421 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3424 /* FIXME: What if the class is shared? We might not
3425 have to get the method address from the RGCTX. */
3426 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3427 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3428 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3430 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3432 return mono_emit_method_call (cfg, method, &val, NULL);
3436 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3440 if (mini_is_gsharedvt_klass (cfg, klass)) {
3441 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3442 ins->opcode = OP_STOREV_MEMBASE;
3444 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3452 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3455 MonoGenericContainer *container;
3456 MonoGenericInst *ginst;
3458 if (klass->generic_class) {
3459 container = klass->generic_class->container_class->generic_container;
3460 ginst = klass->generic_class->context.class_inst;
3461 } else if (klass->generic_container && context_used) {
3462 container = klass->generic_container;
3463 ginst = container->context.class_inst;
3468 for (i = 0; i < container->type_argc; ++i) {
3470 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3472 type = ginst->type_argv [i];
3473 if (mini_type_is_reference (cfg, type))
3479 // FIXME: This doesn't work yet (class libs tests fail?)
3480 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3483 * Returns NULL and set the cfg exception on error.
3486 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3488 MonoBasicBlock *is_null_bb;
3489 int obj_reg = src->dreg;
3490 int vtable_reg = alloc_preg (cfg);
3491 MonoInst *klass_inst = NULL;
3496 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3497 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3498 MonoInst *cache_ins;
3500 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3505 /* klass - it's the second element of the cache entry*/
3506 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3509 args [2] = cache_ins;
3511 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3514 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3516 if (is_complex_isinst (klass)) {
3517 /* Complex case, handle by an icall */
3523 args [1] = klass_inst;
3525 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3527 /* Simple case, handled by the code below */
3531 NEW_BBLOCK (cfg, is_null_bb);
3533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3534 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3536 save_cast_details (cfg, klass, obj_reg);
3538 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3540 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3542 int klass_reg = alloc_preg (cfg);
3544 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3546 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3547 /* the remoting code is broken, access the class for now */
3548 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3549 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3551 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3552 cfg->exception_ptr = klass;
3555 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3560 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3563 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3567 MONO_START_BB (cfg, is_null_bb);
3569 reset_cast_details (cfg);
3575 * Returns NULL and set the cfg exception on error.
3578 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3581 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3582 int obj_reg = src->dreg;
3583 int vtable_reg = alloc_preg (cfg);
3584 int res_reg = alloc_ireg_ref (cfg);
3585 MonoInst *klass_inst = NULL;
3590 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3591 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3592 MonoInst *cache_ins;
3594 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3599 /* klass - it's the second element of the cache entry*/
3600 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3603 args [2] = cache_ins;
3605 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3608 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3610 if (is_complex_isinst (klass)) {
3611 /* Complex case, handle by an icall */
3617 args [1] = klass_inst;
3619 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3621 /* Simple case, the code below can handle it */
3625 NEW_BBLOCK (cfg, is_null_bb);
3626 NEW_BBLOCK (cfg, false_bb);
3627 NEW_BBLOCK (cfg, end_bb);
3629 /* Do the assignment at the beginning, so the other assignment can be if converted */
3630 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3631 ins->type = STACK_OBJ;
3634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3635 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3639 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3640 g_assert (!context_used);
3641 /* the is_null_bb target simply copies the input register to the output */
3642 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3644 int klass_reg = alloc_preg (cfg);
3647 int rank_reg = alloc_preg (cfg);
3648 int eclass_reg = alloc_preg (cfg);
3650 g_assert (!context_used);
3651 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3654 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3656 if (klass->cast_class == mono_defaults.object_class) {
3657 int parent_reg = alloc_preg (cfg);
3658 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3659 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3660 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3662 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3663 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3664 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3665 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3666 } else if (klass->cast_class == mono_defaults.enum_class) {
3667 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3668 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3669 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3670 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3672 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3673 /* Check that the object is a vector too */
3674 int bounds_reg = alloc_preg (cfg);
3675 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3677 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3680 /* the is_null_bb target simply copies the input register to the output */
3681 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3683 } else if (mono_class_is_nullable (klass)) {
3684 g_assert (!context_used);
3685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3686 /* the is_null_bb target simply copies the input register to the output */
3687 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3689 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3690 g_assert (!context_used);
3691 /* the remoting code is broken, access the class for now */
3692 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3693 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3695 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3696 cfg->exception_ptr = klass;
3699 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3702 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3704 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3705 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3708 /* the is_null_bb target simply copies the input register to the output */
3709 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3714 MONO_START_BB (cfg, false_bb);
3716 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3719 MONO_START_BB (cfg, is_null_bb);
3721 MONO_START_BB (cfg, end_bb);
3727 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3729 /* This opcode takes as input an object reference and a class, and returns:
3730 0) if the object is an instance of the class,
3731 1) if the object is not instance of the class,
3732 2) if the object is a proxy whose type cannot be determined */
3735 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3736 int obj_reg = src->dreg;
3737 int dreg = alloc_ireg (cfg);
3739 int klass_reg = alloc_preg (cfg);
3741 NEW_BBLOCK (cfg, true_bb);
3742 NEW_BBLOCK (cfg, false_bb);
3743 NEW_BBLOCK (cfg, false2_bb);
3744 NEW_BBLOCK (cfg, end_bb);
3745 NEW_BBLOCK (cfg, no_proxy_bb);
3747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3748 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3750 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3751 NEW_BBLOCK (cfg, interface_fail_bb);
3753 tmp_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3755 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3756 MONO_START_BB (cfg, interface_fail_bb);
3757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3759 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3761 tmp_reg = alloc_preg (cfg);
3762 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3766 tmp_reg = alloc_preg (cfg);
3767 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3768 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3770 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3771 tmp_reg = alloc_preg (cfg);
3772 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3773 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3775 tmp_reg = alloc_preg (cfg);
3776 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3777 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3780 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3781 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3783 MONO_START_BB (cfg, no_proxy_bb);
3785 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3788 MONO_START_BB (cfg, false_bb);
3790 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3791 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3793 MONO_START_BB (cfg, false2_bb);
3795 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3796 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3798 MONO_START_BB (cfg, true_bb);
3800 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3802 MONO_START_BB (cfg, end_bb);
3805 MONO_INST_NEW (cfg, ins, OP_ICONST);
3807 ins->type = STACK_I4;
3813 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3815 /* This opcode takes as input an object reference and a class, and returns:
3816 0) if the object is an instance of the class,
3817 1) if the object is a proxy whose type cannot be determined
3818 an InvalidCastException exception is thrown otherwhise*/
3821 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3822 int obj_reg = src->dreg;
3823 int dreg = alloc_ireg (cfg);
3824 int tmp_reg = alloc_preg (cfg);
3825 int klass_reg = alloc_preg (cfg);
3827 NEW_BBLOCK (cfg, end_bb);
3828 NEW_BBLOCK (cfg, ok_result_bb);
3830 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3831 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3833 save_cast_details (cfg, klass, obj_reg);
3835 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3836 NEW_BBLOCK (cfg, interface_fail_bb);
3838 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3839 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3840 MONO_START_BB (cfg, interface_fail_bb);
3841 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3843 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3845 tmp_reg = alloc_preg (cfg);
3846 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3848 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3850 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3851 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3854 NEW_BBLOCK (cfg, no_proxy_bb);
3856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3858 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3860 tmp_reg = alloc_preg (cfg);
3861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3862 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3864 tmp_reg = alloc_preg (cfg);
3865 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3867 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3869 NEW_BBLOCK (cfg, fail_1_bb);
3871 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3873 MONO_START_BB (cfg, fail_1_bb);
3875 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3878 MONO_START_BB (cfg, no_proxy_bb);
3880 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3883 MONO_START_BB (cfg, ok_result_bb);
3885 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3887 MONO_START_BB (cfg, end_bb);
3890 MONO_INST_NEW (cfg, ins, OP_ICONST);
3892 ins->type = STACK_I4;
3898 * Returns NULL and set the cfg exception on error.
3900 static G_GNUC_UNUSED MonoInst*
3901 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3905 gpointer *trampoline;
3906 MonoInst *obj, *method_ins, *tramp_ins;
3910 obj = handle_alloc (cfg, klass, FALSE, 0);
3914 /* Inline the contents of mono_delegate_ctor */
3916 /* Set target field */
3917 /* Optimize away setting of NULL target */
3918 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3919 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3920 if (cfg->gen_write_barriers) {
3921 dreg = alloc_preg (cfg);
3922 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3923 emit_write_barrier (cfg, ptr, target, 0);
3927 /* Set method field */
3928 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3929 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3930 if (cfg->gen_write_barriers) {
3931 dreg = alloc_preg (cfg);
3932 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3933 emit_write_barrier (cfg, ptr, method_ins, 0);
3936 * To avoid looking up the compiled code belonging to the target method
3937 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3938 * store it, and we fill it after the method has been compiled.
3940 if (!cfg->compile_aot && !method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
3941 MonoInst *code_slot_ins;
3944 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3946 domain = mono_domain_get ();
3947 mono_domain_lock (domain);
3948 if (!domain_jit_info (domain)->method_code_hash)
3949 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3950 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3952 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3953 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3955 mono_domain_unlock (domain);
3957 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3962 /* Set invoke_impl field */
3963 if (cfg->compile_aot) {
3964 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3966 trampoline = mono_create_delegate_trampoline (cfg->domain, klass);
3967 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3969 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3971 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3977 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3979 MonoJitICallInfo *info;
3981 /* Need to register the icall so it gets an icall wrapper */
3982 info = mono_get_array_new_va_icall (rank);
3984 cfg->flags |= MONO_CFG_HAS_VARARGS;
3986 /* mono_array_new_va () needs a vararg calling convention */
3987 cfg->disable_llvm = TRUE;
3989 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3990 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3994 mono_emit_load_got_addr (MonoCompile *cfg)
3996 MonoInst *getaddr, *dummy_use;
3998 if (!cfg->got_var || cfg->got_var_allocated)
4001 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4002 getaddr->cil_code = cfg->header->code;
4003 getaddr->dreg = cfg->got_var->dreg;
4005 /* Add it to the start of the first bblock */
4006 if (cfg->bb_entry->code) {
4007 getaddr->next = cfg->bb_entry->code;
4008 cfg->bb_entry->code = getaddr;
4011 MONO_ADD_INS (cfg->bb_entry, getaddr);
4013 cfg->got_var_allocated = TRUE;
4016 * Add a dummy use to keep the got_var alive, since real uses might
4017 * only be generated by the back ends.
4018 * Add it to end_bblock, so the variable's lifetime covers the whole
4020 * It would be better to make the usage of the got var explicit in all
4021 * cases when the backend needs it (i.e. calls, throw etc.), so this
4022 * wouldn't be needed.
4024 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4025 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4028 static int inline_limit;
4029 static gboolean inline_limit_inited;
4032 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4034 MonoMethodHeaderSummary header;
4036 #ifdef MONO_ARCH_SOFT_FLOAT
4037 MonoMethodSignature *sig = mono_method_signature (method);
4041 if (cfg->generic_sharing_context)
4044 if (cfg->inline_depth > 10)
4047 #ifdef MONO_ARCH_HAVE_LMF_OPS
4048 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4049 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4050 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4055 if (!mono_method_get_header_summary (method, &header))
4058 /*runtime, icall and pinvoke are checked by summary call*/
4059 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4060 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4061 (method->klass->marshalbyref) ||
4065 /* also consider num_locals? */
4066 /* Do the size check early to avoid creating vtables */
4067 if (!inline_limit_inited) {
4068 if (getenv ("MONO_INLINELIMIT"))
4069 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
4071 inline_limit = INLINE_LENGTH_LIMIT;
4072 inline_limit_inited = TRUE;
4074 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4078 * if we can initialize the class of the method right away, we do,
4079 * otherwise we don't allow inlining if the class needs initialization,
4080 * since it would mean inserting a call to mono_runtime_class_init()
4081 * inside the inlined code
4083 if (!(cfg->opt & MONO_OPT_SHARED)) {
4084 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4085 if (cfg->run_cctors && method->klass->has_cctor) {
4086 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4087 if (!method->klass->runtime_info)
4088 /* No vtable created yet */
4090 vtable = mono_class_vtable (cfg->domain, method->klass);
4093 /* This makes so that inline cannot trigger */
4094 /* .cctors: too many apps depend on them */
4095 /* running with a specific order... */
4096 if (! vtable->initialized)
4098 mono_runtime_class_init (vtable);
4100 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4101 if (!method->klass->runtime_info)
4102 /* No vtable created yet */
4104 vtable = mono_class_vtable (cfg->domain, method->klass);
4107 if (!vtable->initialized)
4112 * If we're compiling for shared code
4113 * the cctor will need to be run at aot method load time, for example,
4114 * or at the end of the compilation of the inlining method.
4116 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4121 * CAS - do not inline methods with declarative security
4122 * Note: this has to be before any possible return TRUE;
4124 if (mono_method_has_declsec (method))
4127 #ifdef MONO_ARCH_SOFT_FLOAT
4129 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4131 for (i = 0; i < sig->param_count; ++i)
4132 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4140 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4142 if (vtable->initialized && !cfg->compile_aot)
4145 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4148 if (!mono_class_needs_cctor_run (vtable->klass, method))
4151 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4152 /* The initialization is already done before the method is called */
4159 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4163 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4166 if (mini_is_gsharedvt_klass (cfg, klass)) {
4169 mono_class_init (klass);
4170 size = mono_class_array_element_size (klass);
4173 mult_reg = alloc_preg (cfg);
4174 array_reg = arr->dreg;
4175 index_reg = index->dreg;
4177 #if SIZEOF_REGISTER == 8
4178 /* The array reg is 64 bits but the index reg is only 32 */
4179 if (COMPILE_LLVM (cfg)) {
4181 index2_reg = index_reg;
4183 index2_reg = alloc_preg (cfg);
4184 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4187 if (index->type == STACK_I8) {
4188 index2_reg = alloc_preg (cfg);
4189 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4191 index2_reg = index_reg;
4196 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4198 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4199 if (size == 1 || size == 2 || size == 4 || size == 8) {
4200 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4202 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4203 ins->klass = mono_class_get_element_class (klass);
4204 ins->type = STACK_MP;
4210 add_reg = alloc_ireg_mp (cfg);
4213 MonoInst *rgctx_ins;
4216 g_assert (cfg->generic_sharing_context);
4217 context_used = mono_class_check_context_used (klass);
4218 g_assert (context_used);
4219 rgctx_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4220 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4224 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4225 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4226 ins->klass = mono_class_get_element_class (klass);
4227 ins->type = STACK_MP;
4228 MONO_ADD_INS (cfg->cbb, ins);
4233 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4235 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4237 int bounds_reg = alloc_preg (cfg);
4238 int add_reg = alloc_ireg_mp (cfg);
4239 int mult_reg = alloc_preg (cfg);
4240 int mult2_reg = alloc_preg (cfg);
4241 int low1_reg = alloc_preg (cfg);
4242 int low2_reg = alloc_preg (cfg);
4243 int high1_reg = alloc_preg (cfg);
4244 int high2_reg = alloc_preg (cfg);
4245 int realidx1_reg = alloc_preg (cfg);
4246 int realidx2_reg = alloc_preg (cfg);
4247 int sum_reg = alloc_preg (cfg);
4248 int index1, index2, tmpreg;
4252 mono_class_init (klass);
4253 size = mono_class_array_element_size (klass);
4255 index1 = index_ins1->dreg;
4256 index2 = index_ins2->dreg;
4258 #if SIZEOF_REGISTER == 8
4259 /* The array reg is 64 bits but the index reg is only 32 */
4260 if (COMPILE_LLVM (cfg)) {
4263 tmpreg = alloc_preg (cfg);
4264 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4266 tmpreg = alloc_preg (cfg);
4267 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4271 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4275 /* range checking */
4276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4277 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4279 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4280 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4281 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4282 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4283 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4284 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4285 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4287 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4288 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4289 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4290 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4291 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4292 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4293 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4295 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4296 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4298 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4299 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4301 ins->type = STACK_MP;
4303 MONO_ADD_INS (cfg->cbb, ins);
4310 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4314 MonoMethod *addr_method;
4317 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4320 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4322 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4323 /* emit_ldelema_2 depends on OP_LMUL */
4324 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4325 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4329 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4330 addr_method = mono_marshal_get_array_address (rank, element_size);
4331 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4336 static MonoBreakPolicy
4337 always_insert_breakpoint (MonoMethod *method)
4339 return MONO_BREAK_POLICY_ALWAYS;
4342 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4345 * mono_set_break_policy:
4346 * policy_callback: the new callback function
4348 * Allow embedders to decide wherther to actually obey breakpoint instructions
4349 * (both break IL instructions and Debugger.Break () method calls), for example
4350 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4351 * untrusted or semi-trusted code.
4353 * @policy_callback will be called every time a break point instruction needs to
4354 * be inserted with the method argument being the method that calls Debugger.Break()
4355 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4356 * if it wants the breakpoint to not be effective in the given method.
4357 * #MONO_BREAK_POLICY_ALWAYS is the default.
4360 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4362 if (policy_callback)
4363 break_policy_func = policy_callback;
4365 break_policy_func = always_insert_breakpoint;
4369 should_insert_brekpoint (MonoMethod *method) {
4370 switch (break_policy_func (method)) {
4371 case MONO_BREAK_POLICY_ALWAYS:
4373 case MONO_BREAK_POLICY_NEVER:
4375 case MONO_BREAK_POLICY_ON_DBG:
4376 return mono_debug_using_mono_debugger ();
4378 g_warning ("Incorrect value returned from break policy callback");
4383 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4385 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4387 MonoInst *addr, *store, *load;
4388 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4390 /* the bounds check is already done by the callers */
4391 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4393 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4394 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4395 if (mini_type_is_reference (cfg, fsig->params [2]))
4396 emit_write_barrier (cfg, addr, load, -1);
4398 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4399 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4406 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4408 return mini_type_is_reference (cfg, &klass->byval_arg);
4412 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4414 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4415 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4416 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4417 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4418 MonoInst *iargs [3];
4421 mono_class_setup_vtable (obj_array);
4422 g_assert (helper->slot);
4424 if (sp [0]->type != STACK_OBJ)
4426 if (sp [2]->type != STACK_OBJ)
4433 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4437 if (mini_is_gsharedvt_klass (cfg, klass)) {
4440 // FIXME-VT: OP_ICONST optimization
4441 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4442 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4443 ins->opcode = OP_STOREV_MEMBASE;
4444 } else if (sp [1]->opcode == OP_ICONST) {
4445 int array_reg = sp [0]->dreg;
4446 int index_reg = sp [1]->dreg;
4447 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4450 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4451 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4453 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4454 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4455 if (generic_class_is_reference_type (cfg, klass))
4456 emit_write_barrier (cfg, addr, sp [2], -1);
4463 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4468 eklass = mono_class_from_mono_type (fsig->params [2]);
4470 eklass = mono_class_from_mono_type (fsig->ret);
4474 return emit_array_store (cfg, eklass, args, FALSE);
4476 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4477 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
4483 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4485 MonoInst *ins = NULL;
4486 #ifdef MONO_ARCH_SIMD_INTRINSICS
4487 if (cfg->opt & MONO_OPT_SIMD) {
4488 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4498 emit_memory_barrier (MonoCompile *cfg, int kind)
4500 MonoInst *ins = NULL;
4501 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4502 MONO_ADD_INS (cfg->cbb, ins);
4503 ins->backend.memory_barrier_kind = kind;
4509 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4511 MonoInst *ins = NULL;
4514 /* The LLVM backend supports these intrinsics */
4515 if (cmethod->klass == mono_defaults.math_class) {
4516 if (strcmp (cmethod->name, "Sin") == 0) {
4518 } else if (strcmp (cmethod->name, "Cos") == 0) {
4520 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
4522 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
4527 MONO_INST_NEW (cfg, ins, opcode);
4528 ins->type = STACK_R8;
4529 ins->dreg = mono_alloc_freg (cfg);
4530 ins->sreg1 = args [0]->dreg;
4531 MONO_ADD_INS (cfg->cbb, ins);
4535 if (cfg->opt & MONO_OPT_CMOV) {
4536 if (strcmp (cmethod->name, "Min") == 0) {
4537 if (fsig->params [0]->type == MONO_TYPE_I4)
4539 if (fsig->params [0]->type == MONO_TYPE_U4)
4540 opcode = OP_IMIN_UN;
4541 else if (fsig->params [0]->type == MONO_TYPE_I8)
4543 else if (fsig->params [0]->type == MONO_TYPE_U8)
4544 opcode = OP_LMIN_UN;
4545 } else if (strcmp (cmethod->name, "Max") == 0) {
4546 if (fsig->params [0]->type == MONO_TYPE_I4)
4548 if (fsig->params [0]->type == MONO_TYPE_U4)
4549 opcode = OP_IMAX_UN;
4550 else if (fsig->params [0]->type == MONO_TYPE_I8)
4552 else if (fsig->params [0]->type == MONO_TYPE_U8)
4553 opcode = OP_LMAX_UN;
4558 MONO_INST_NEW (cfg, ins, opcode);
4559 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
4560 ins->dreg = mono_alloc_ireg (cfg);
4561 ins->sreg1 = args [0]->dreg;
4562 ins->sreg2 = args [1]->dreg;
4563 MONO_ADD_INS (cfg->cbb, ins);
4571 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4573 if (cmethod->klass == mono_defaults.array_class) {
4574 if (strcmp (cmethod->name, "UnsafeStore") == 0)
4575 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
4576 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
4577 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
4584 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4586 MonoInst *ins = NULL;
4588 static MonoClass *runtime_helpers_class = NULL;
4589 if (! runtime_helpers_class)
4590 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4591 "System.Runtime.CompilerServices", "RuntimeHelpers");
4593 if (cmethod->klass == mono_defaults.string_class) {
4594 if (strcmp (cmethod->name, "get_Chars") == 0) {
4595 int dreg = alloc_ireg (cfg);
4596 int index_reg = alloc_preg (cfg);
4597 int mult_reg = alloc_preg (cfg);
4598 int add_reg = alloc_preg (cfg);
4600 #if SIZEOF_REGISTER == 8
4601 /* The array reg is 64 bits but the index reg is only 32 */
4602 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4604 index_reg = args [1]->dreg;
4606 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4608 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4609 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4610 add_reg = ins->dreg;
4611 /* Avoid a warning */
4613 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4616 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4617 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4618 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4619 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4621 type_from_op (ins, NULL, NULL);
4623 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4624 int dreg = alloc_ireg (cfg);
4625 /* Decompose later to allow more optimizations */
4626 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4627 ins->type = STACK_I4;
4628 ins->flags |= MONO_INST_FAULT;
4629 cfg->cbb->has_array_access = TRUE;
4630 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4633 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4634 int mult_reg = alloc_preg (cfg);
4635 int add_reg = alloc_preg (cfg);
4637 /* The corlib functions check for oob already. */
4638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4639 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4640 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4641 return cfg->cbb->last_ins;
4644 } else if (cmethod->klass == mono_defaults.object_class) {
4646 if (strcmp (cmethod->name, "GetType") == 0) {
4647 int dreg = alloc_ireg_ref (cfg);
4648 int vt_reg = alloc_preg (cfg);
4649 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4650 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4651 type_from_op (ins, NULL, NULL);
4654 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4655 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4656 int dreg = alloc_ireg (cfg);
4657 int t1 = alloc_ireg (cfg);
4659 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4660 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4661 ins->type = STACK_I4;
4665 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4666 MONO_INST_NEW (cfg, ins, OP_NOP);
4667 MONO_ADD_INS (cfg->cbb, ins);
4671 } else if (cmethod->klass == mono_defaults.array_class) {
4672 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4673 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4675 #ifndef MONO_BIG_ARRAYS
4677 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4680 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4681 int dreg = alloc_ireg (cfg);
4682 int bounds_reg = alloc_ireg_mp (cfg);
4683 MonoBasicBlock *end_bb, *szarray_bb;
4684 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4686 NEW_BBLOCK (cfg, end_bb);
4687 NEW_BBLOCK (cfg, szarray_bb);
4689 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4690 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4693 /* Non-szarray case */
4695 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4696 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4698 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4699 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4701 MONO_START_BB (cfg, szarray_bb);
4704 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4705 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4707 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4708 MONO_START_BB (cfg, end_bb);
4710 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4711 ins->type = STACK_I4;
4717 if (cmethod->name [0] != 'g')
4720 if (strcmp (cmethod->name, "get_Rank") == 0) {
4721 int dreg = alloc_ireg (cfg);
4722 int vtable_reg = alloc_preg (cfg);
4723 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4724 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4725 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4726 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4727 type_from_op (ins, NULL, NULL);
4730 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4731 int dreg = alloc_ireg (cfg);
4733 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4734 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4735 type_from_op (ins, NULL, NULL);
4740 } else if (cmethod->klass == runtime_helpers_class) {
4742 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4743 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4747 } else if (cmethod->klass == mono_defaults.thread_class) {
4748 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4749 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4750 MONO_ADD_INS (cfg->cbb, ins);
4752 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4753 return emit_memory_barrier (cfg, FullBarrier);
4755 } else if (cmethod->klass == mono_defaults.monitor_class) {
4757 /* FIXME this should be integrated to the check below once we support the trampoline version */
4758 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4759 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4760 MonoMethod *fast_method = NULL;
4762 /* Avoid infinite recursion */
4763 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4766 fast_method = mono_monitor_get_fast_path (cmethod);
4770 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4774 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4775 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4778 if (COMPILE_LLVM (cfg)) {
4780 * Pass the argument normally, the LLVM backend will handle the
4781 * calling convention problems.
4783 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4785 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4786 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4787 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4788 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4791 return (MonoInst*)call;
4792 } else if (strcmp (cmethod->name, "Exit") == 0) {
4795 if (COMPILE_LLVM (cfg)) {
4796 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4798 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4799 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4800 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4801 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4804 return (MonoInst*)call;
4806 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4808 MonoMethod *fast_method = NULL;
4810 /* Avoid infinite recursion */
4811 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4812 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4813 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4816 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
4817 strcmp (cmethod->name, "Exit") == 0)
4818 fast_method = mono_monitor_get_fast_path (cmethod);
4822 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4825 } else if (cmethod->klass->image == mono_defaults.corlib &&
4826 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4827 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4830 #if SIZEOF_REGISTER == 8
4831 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4832 /* 64 bit reads are already atomic */
4833 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4834 ins->dreg = mono_alloc_preg (cfg);
4835 ins->inst_basereg = args [0]->dreg;
4836 ins->inst_offset = 0;
4837 MONO_ADD_INS (cfg->cbb, ins);
4841 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4842 if (strcmp (cmethod->name, "Increment") == 0) {
4843 MonoInst *ins_iconst;
4846 if (fsig->params [0]->type == MONO_TYPE_I4)
4847 opcode = OP_ATOMIC_ADD_NEW_I4;
4848 #if SIZEOF_REGISTER == 8
4849 else if (fsig->params [0]->type == MONO_TYPE_I8)
4850 opcode = OP_ATOMIC_ADD_NEW_I8;
4853 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4854 ins_iconst->inst_c0 = 1;
4855 ins_iconst->dreg = mono_alloc_ireg (cfg);
4856 MONO_ADD_INS (cfg->cbb, ins_iconst);
4858 MONO_INST_NEW (cfg, ins, opcode);
4859 ins->dreg = mono_alloc_ireg (cfg);
4860 ins->inst_basereg = args [0]->dreg;
4861 ins->inst_offset = 0;
4862 ins->sreg2 = ins_iconst->dreg;
4863 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4864 MONO_ADD_INS (cfg->cbb, ins);
4866 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4867 MonoInst *ins_iconst;
4870 if (fsig->params [0]->type == MONO_TYPE_I4)
4871 opcode = OP_ATOMIC_ADD_NEW_I4;
4872 #if SIZEOF_REGISTER == 8
4873 else if (fsig->params [0]->type == MONO_TYPE_I8)
4874 opcode = OP_ATOMIC_ADD_NEW_I8;
4877 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4878 ins_iconst->inst_c0 = -1;
4879 ins_iconst->dreg = mono_alloc_ireg (cfg);
4880 MONO_ADD_INS (cfg->cbb, ins_iconst);
4882 MONO_INST_NEW (cfg, ins, opcode);
4883 ins->dreg = mono_alloc_ireg (cfg);
4884 ins->inst_basereg = args [0]->dreg;
4885 ins->inst_offset = 0;
4886 ins->sreg2 = ins_iconst->dreg;
4887 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4888 MONO_ADD_INS (cfg->cbb, ins);
4890 } else if (strcmp (cmethod->name, "Add") == 0) {
4893 if (fsig->params [0]->type == MONO_TYPE_I4)
4894 opcode = OP_ATOMIC_ADD_NEW_I4;
4895 #if SIZEOF_REGISTER == 8
4896 else if (fsig->params [0]->type == MONO_TYPE_I8)
4897 opcode = OP_ATOMIC_ADD_NEW_I8;
4901 MONO_INST_NEW (cfg, ins, opcode);
4902 ins->dreg = mono_alloc_ireg (cfg);
4903 ins->inst_basereg = args [0]->dreg;
4904 ins->inst_offset = 0;
4905 ins->sreg2 = args [1]->dreg;
4906 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4907 MONO_ADD_INS (cfg->cbb, ins);
4910 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4912 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4913 if (strcmp (cmethod->name, "Exchange") == 0) {
4915 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4917 if (fsig->params [0]->type == MONO_TYPE_I4)
4918 opcode = OP_ATOMIC_EXCHANGE_I4;
4919 #if SIZEOF_REGISTER == 8
4920 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4921 (fsig->params [0]->type == MONO_TYPE_I))
4922 opcode = OP_ATOMIC_EXCHANGE_I8;
4924 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4925 opcode = OP_ATOMIC_EXCHANGE_I4;
4930 MONO_INST_NEW (cfg, ins, opcode);
4931 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4932 ins->inst_basereg = args [0]->dreg;
4933 ins->inst_offset = 0;
4934 ins->sreg2 = args [1]->dreg;
4935 MONO_ADD_INS (cfg->cbb, ins);
4937 switch (fsig->params [0]->type) {
4939 ins->type = STACK_I4;
4943 ins->type = STACK_I8;
4945 case MONO_TYPE_OBJECT:
4946 ins->type = STACK_OBJ;
4949 g_assert_not_reached ();
4952 if (cfg->gen_write_barriers && is_ref)
4953 emit_write_barrier (cfg, args [0], args [1], -1);
4955 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4957 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4958 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4960 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4961 if (fsig->params [1]->type == MONO_TYPE_I4)
4963 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4964 size = sizeof (gpointer);
4965 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4968 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4969 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4970 ins->sreg1 = args [0]->dreg;
4971 ins->sreg2 = args [1]->dreg;
4972 ins->sreg3 = args [2]->dreg;
4973 ins->type = STACK_I4;
4974 MONO_ADD_INS (cfg->cbb, ins);
4975 } else if (size == 8) {
4976 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4977 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4978 ins->sreg1 = args [0]->dreg;
4979 ins->sreg2 = args [1]->dreg;
4980 ins->sreg3 = args [2]->dreg;
4981 ins->type = STACK_I8;
4982 MONO_ADD_INS (cfg->cbb, ins);
4984 /* g_assert_not_reached (); */
4986 if (cfg->gen_write_barriers && is_ref)
4987 emit_write_barrier (cfg, args [0], args [1], -1);
4989 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4993 } else if (cmethod->klass->image == mono_defaults.corlib) {
4994 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4995 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4996 if (should_insert_brekpoint (cfg->method)) {
4997 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4999 MONO_INST_NEW (cfg, ins, OP_NOP);
5000 MONO_ADD_INS (cfg->cbb, ins);
5004 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5005 && strcmp (cmethod->klass->name, "Environment") == 0) {
5007 EMIT_NEW_ICONST (cfg, ins, 1);
5009 EMIT_NEW_ICONST (cfg, ins, 0);
5013 } else if (cmethod->klass == mono_defaults.math_class) {
5015 * There is general branches code for Min/Max, but it does not work for
5017 * http://everything2.com/?node_id=1051618
5021 #ifdef MONO_ARCH_SIMD_INTRINSICS
5022 if (cfg->opt & MONO_OPT_SIMD) {
5023 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5029 if (COMPILE_LLVM (cfg)) {
5030 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5035 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5039 * This entry point could be used later for arbitrary method
5042 inline static MonoInst*
5043 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5044 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5046 if (method->klass == mono_defaults.string_class) {
5047 /* managed string allocation support */
5048 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5049 MonoInst *iargs [2];
5050 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5051 MonoMethod *managed_alloc = NULL;
5053 g_assert (vtable); /*Should not fail since it System.String*/
5054 #ifndef MONO_CROSS_COMPILE
5055 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
5059 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5060 iargs [1] = args [0];
5061 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5068 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5070 MonoInst *store, *temp;
5073 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5074 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5077 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5078 * would be different than the MonoInst's used to represent arguments, and
5079 * the ldelema implementation can't deal with that.
5080 * Solution: When ldelema is used on an inline argument, create a var for
5081 * it, emit ldelema on that var, and emit the saving code below in
5082 * inline_method () if needed.
5084 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5085 cfg->args [i] = temp;
5086 /* This uses cfg->args [i] which is set by the preceeding line */
5087 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5088 store->cil_code = sp [0]->cil_code;
5093 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5094 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5096 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5098 check_inline_called_method_name_limit (MonoMethod *called_method)
5101 static char *limit = NULL;
5103 if (limit == NULL) {
5104 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5106 if (limit_string != NULL)
5107 limit = limit_string;
5109 limit = (char *) "";
5112 if (limit [0] != '\0') {
5113 char *called_method_name = mono_method_full_name (called_method, TRUE);
5115 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5116 g_free (called_method_name);
5118 //return (strncmp_result <= 0);
5119 return (strncmp_result == 0);
5126 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5128 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5131 static char *limit = NULL;
5133 if (limit == NULL) {
5134 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5135 if (limit_string != NULL) {
5136 limit = limit_string;
5138 limit = (char *) "";
5142 if (limit [0] != '\0') {
5143 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5145 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5146 g_free (caller_method_name);
5148 //return (strncmp_result <= 0);
5149 return (strncmp_result == 0);
5157 emit_init_rvar (MonoCompile *cfg, MonoInst *rvar, MonoType *rtype)
5159 static double r8_0 = 0.0;
5162 switch (rvar->type) {
5164 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5167 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5172 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5175 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5176 ins->type = STACK_R8;
5177 ins->inst_p0 = (void*)&r8_0;
5178 ins->dreg = rvar->dreg;
5179 MONO_ADD_INS (cfg->cbb, ins);
5182 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (rtype));
5185 g_assert_not_reached ();
5190 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5191 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5193 MonoInst *ins, *rvar = NULL;
5194 MonoMethodHeader *cheader;
5195 MonoBasicBlock *ebblock, *sbblock;
5197 MonoMethod *prev_inlined_method;
5198 MonoInst **prev_locals, **prev_args;
5199 MonoType **prev_arg_types;
5200 guint prev_real_offset;
5201 GHashTable *prev_cbb_hash;
5202 MonoBasicBlock **prev_cil_offset_to_bb;
5203 MonoBasicBlock *prev_cbb;
5204 unsigned char* prev_cil_start;
5205 guint32 prev_cil_offset_to_bb_len;
5206 MonoMethod *prev_current_method;
5207 MonoGenericContext *prev_generic_context;
5208 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5210 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5212 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5213 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5216 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5217 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5221 if (cfg->verbose_level > 2)
5222 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5224 if (!cmethod->inline_info) {
5225 cfg->stat_inlineable_methods++;
5226 cmethod->inline_info = 1;
5229 /* allocate local variables */
5230 cheader = mono_method_get_header (cmethod);
5232 if (cheader == NULL || mono_loader_get_last_error ()) {
5233 MonoLoaderError *error = mono_loader_get_last_error ();
5236 mono_metadata_free_mh (cheader);
5237 if (inline_always && error)
5238 mono_cfg_set_exception (cfg, error->exception_type);
5240 mono_loader_clear_error ();
5244 /*Must verify before creating locals as it can cause the JIT to assert.*/
5245 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5246 mono_metadata_free_mh (cheader);
5250 /* allocate space to store the return value */
5251 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5252 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5255 prev_locals = cfg->locals;
5256 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5257 for (i = 0; i < cheader->num_locals; ++i)
5258 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5260 /* allocate start and end blocks */
5261 /* This is needed so if the inline is aborted, we can clean up */
5262 NEW_BBLOCK (cfg, sbblock);
5263 sbblock->real_offset = real_offset;
5265 NEW_BBLOCK (cfg, ebblock);
5266 ebblock->block_num = cfg->num_bblocks++;
5267 ebblock->real_offset = real_offset;
5269 prev_args = cfg->args;
5270 prev_arg_types = cfg->arg_types;
5271 prev_inlined_method = cfg->inlined_method;
5272 cfg->inlined_method = cmethod;
5273 cfg->ret_var_set = FALSE;
5274 cfg->inline_depth ++;
5275 prev_real_offset = cfg->real_offset;
5276 prev_cbb_hash = cfg->cbb_hash;
5277 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5278 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5279 prev_cil_start = cfg->cil_start;
5280 prev_cbb = cfg->cbb;
5281 prev_current_method = cfg->current_method;
5282 prev_generic_context = cfg->generic_context;
5283 prev_ret_var_set = cfg->ret_var_set;
5285 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5288 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5290 ret_var_set = cfg->ret_var_set;
5292 cfg->inlined_method = prev_inlined_method;
5293 cfg->real_offset = prev_real_offset;
5294 cfg->cbb_hash = prev_cbb_hash;
5295 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5296 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5297 cfg->cil_start = prev_cil_start;
5298 cfg->locals = prev_locals;
5299 cfg->args = prev_args;
5300 cfg->arg_types = prev_arg_types;
5301 cfg->current_method = prev_current_method;
5302 cfg->generic_context = prev_generic_context;
5303 cfg->ret_var_set = prev_ret_var_set;
5304 cfg->inline_depth --;
5306 if ((costs >= 0 && costs < 60) || inline_always) {
5307 if (cfg->verbose_level > 2)
5308 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5310 cfg->stat_inlined_methods++;
5312 /* always add some code to avoid block split failures */
5313 MONO_INST_NEW (cfg, ins, OP_NOP);
5314 MONO_ADD_INS (prev_cbb, ins);
5316 prev_cbb->next_bb = sbblock;
5317 link_bblock (cfg, prev_cbb, sbblock);
5320 * Get rid of the begin and end bblocks if possible to aid local
5323 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5325 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5326 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5328 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5329 MonoBasicBlock *prev = ebblock->in_bb [0];
5330 mono_merge_basic_blocks (cfg, prev, ebblock);
5332 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5333 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5334 cfg->cbb = prev_cbb;
5338 * Its possible that the rvar is set in some prev bblock, but not in others.
5344 for (i = 0; i < ebblock->in_count; ++i) {
5345 bb = ebblock->in_bb [i];
5347 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5350 emit_init_rvar (cfg, rvar, fsig->ret);
5360 * If the inlined method contains only a throw, then the ret var is not
5361 * set, so set it to a dummy value.
5364 emit_init_rvar (cfg, rvar, fsig->ret);
5366 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5369 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5372 if (cfg->verbose_level > 2)
5373 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5374 cfg->exception_type = MONO_EXCEPTION_NONE;
5375 mono_loader_clear_error ();
5377 /* This gets rid of the newly added bblocks */
5378 cfg->cbb = prev_cbb;
5380 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5385 * Some of these comments may well be out-of-date.
5386 * Design decisions: we do a single pass over the IL code (and we do bblock
5387 * splitting/merging in the few cases when it's required: a back jump to an IL
5388 * address that was not already seen as bblock starting point).
5389 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5390 * Complex operations are decomposed in simpler ones right away. We need to let the
5391 * arch-specific code peek and poke inside this process somehow (except when the
5392 * optimizations can take advantage of the full semantic info of coarse opcodes).
5393 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5394 * MonoInst->opcode initially is the IL opcode or some simplification of that
5395 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5396 * opcode with value bigger than OP_LAST.
5397 * At this point the IR can be handed over to an interpreter, a dumb code generator
5398 * or to the optimizing code generator that will translate it to SSA form.
5400 * Profiling directed optimizations.
5401 * We may compile by default with few or no optimizations and instrument the code
5402 * or the user may indicate what methods to optimize the most either in a config file
5403 * or through repeated runs where the compiler applies offline the optimizations to
5404 * each method and then decides if it was worth it.
5407 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5408 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5409 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5410 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5411 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5412 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5413 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5414 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5416 /* offset from br.s -> br like opcodes */
5417 #define BIG_BRANCH_OFFSET 13
5420 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5422 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5424 return b == NULL || b == bb;
5428 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5430 unsigned char *ip = start;
5431 unsigned char *target;
5434 MonoBasicBlock *bblock;
5435 const MonoOpcode *opcode;
5438 cli_addr = ip - start;
5439 i = mono_opcode_value ((const guint8 **)&ip, end);
5442 opcode = &mono_opcodes [i];
5443 switch (opcode->argument) {
5444 case MonoInlineNone:
5447 case MonoInlineString:
5448 case MonoInlineType:
5449 case MonoInlineField:
5450 case MonoInlineMethod:
5453 case MonoShortInlineR:
5460 case MonoShortInlineVar:
5461 case MonoShortInlineI:
5464 case MonoShortInlineBrTarget:
5465 target = start + cli_addr + 2 + (signed char)ip [1];
5466 GET_BBLOCK (cfg, bblock, target);
5469 GET_BBLOCK (cfg, bblock, ip);
5471 case MonoInlineBrTarget:
5472 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5473 GET_BBLOCK (cfg, bblock, target);
5476 GET_BBLOCK (cfg, bblock, ip);
5478 case MonoInlineSwitch: {
5479 guint32 n = read32 (ip + 1);
5482 cli_addr += 5 + 4 * n;
5483 target = start + cli_addr;
5484 GET_BBLOCK (cfg, bblock, target);
5486 for (j = 0; j < n; ++j) {
5487 target = start + cli_addr + (gint32)read32 (ip);
5488 GET_BBLOCK (cfg, bblock, target);
5498 g_assert_not_reached ();
5501 if (i == CEE_THROW) {
5502 unsigned char *bb_start = ip - 1;
5504 /* Find the start of the bblock containing the throw */
5506 while ((bb_start >= start) && !bblock) {
5507 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5511 bblock->out_of_line = 1;
5521 static inline MonoMethod *
5522 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5526 if (m->wrapper_type != MONO_WRAPPER_NONE)
5527 return mono_method_get_wrapper_data (m, token);
5529 method = mono_get_method_full (m->klass->image, token, klass, context);
5534 static inline MonoMethod *
5535 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5537 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5539 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5545 static inline MonoClass*
5546 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5550 if (method->wrapper_type != MONO_WRAPPER_NONE)
5551 klass = mono_method_get_wrapper_data (method, token);
5553 klass = mono_class_get_full (method->klass->image, token, context);
5555 mono_class_init (klass);
5560 * Returns TRUE if the JIT should abort inlining because "callee"
5561 * is influenced by security attributes.
5564 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5568 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5572 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5573 if (result == MONO_JIT_SECURITY_OK)
5576 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5577 /* Generate code to throw a SecurityException before the actual call/link */
5578 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5581 NEW_ICONST (cfg, args [0], 4);
5582 NEW_METHODCONST (cfg, args [1], caller);
5583 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5584 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5585 /* don't hide previous results */
5586 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5587 cfg->exception_data = result;
5595 throw_exception (void)
5597 static MonoMethod *method = NULL;
5600 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5601 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5608 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5610 MonoMethod *thrower = throw_exception ();
5613 EMIT_NEW_PCONST (cfg, args [0], ex);
5614 mono_emit_method_call (cfg, thrower, args, NULL);
5618 * Return the original method is a wrapper is specified. We can only access
5619 * the custom attributes from the original method.
5622 get_original_method (MonoMethod *method)
5624 if (method->wrapper_type == MONO_WRAPPER_NONE)
5627 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5628 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5631 /* in other cases we need to find the original method */
5632 return mono_marshal_method_from_wrapper (method);
5636 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5637 MonoBasicBlock *bblock, unsigned char *ip)
5639 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5640 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5642 emit_throw_exception (cfg, ex);
5646 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5647 MonoBasicBlock *bblock, unsigned char *ip)
5649 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5650 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5652 emit_throw_exception (cfg, ex);
5656 * Check that the IL instructions at ip are the array initialization
5657 * sequence and return the pointer to the data and the size.
5660 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5663 * newarr[System.Int32]
5665 * ldtoken field valuetype ...
5666 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5668 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5669 guint32 token = read32 (ip + 7);
5670 guint32 field_token = read32 (ip + 2);
5671 guint32 field_index = field_token & 0xffffff;
5673 const char *data_ptr;
5675 MonoMethod *cmethod;
5676 MonoClass *dummy_class;
5677 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5683 *out_field_token = field_token;
5685 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5688 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5690 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5691 case MONO_TYPE_BOOLEAN:
5695 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5696 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5697 case MONO_TYPE_CHAR:
5707 return NULL; /* stupid ARM FP swapped format */
5717 if (size > mono_type_size (field->type, &dummy_align))
5720 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5721 if (!method->klass->image->dynamic) {
5722 field_index = read32 (ip + 2) & 0xffffff;
5723 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5724 data_ptr = mono_image_rva_map (method->klass->image, rva);
5725 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5726 /* for aot code we do the lookup on load */
5727 if (aot && data_ptr)
5728 return GUINT_TO_POINTER (rva);
5730 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5732 data_ptr = mono_field_get_data (field);
5740 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5742 char *method_fname = mono_method_full_name (method, TRUE);
5744 MonoMethodHeader *header = mono_method_get_header (method);
5746 if (header->code_size == 0)
5747 method_code = g_strdup ("method body is empty.");
5749 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5750 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5751 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5752 g_free (method_fname);
5753 g_free (method_code);
5754 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5758 set_exception_object (MonoCompile *cfg, MonoException *exception)
5760 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5761 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5762 cfg->exception_ptr = exception;
5766 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5769 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5770 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5771 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5772 /* Optimize reg-reg moves away */
5774 * Can't optimize other opcodes, since sp[0] might point to
5775 * the last ins of a decomposed opcode.
5777 sp [0]->dreg = (cfg)->locals [n]->dreg;
5779 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5784 * ldloca inhibits many optimizations so try to get rid of it in common
5787 static inline unsigned char *
5788 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5797 local = read16 (ip + 2);
5801 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5802 gboolean skip = FALSE;
5804 /* From the INITOBJ case */
5805 token = read32 (ip + 2);
5806 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5807 CHECK_TYPELOAD (klass);
5808 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5809 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5810 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5811 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5824 is_exception_class (MonoClass *class)
5827 if (class == mono_defaults.exception_class)
5829 class = class->parent;
5835 * is_jit_optimizer_disabled:
5837 * Determine whenever M's assembly has a DebuggableAttribute with the
5838 * IsJITOptimizerDisabled flag set.
5841 is_jit_optimizer_disabled (MonoMethod *m)
5843 MonoAssembly *ass = m->klass->image->assembly;
5844 MonoCustomAttrInfo* attrs;
5845 static MonoClass *klass;
5847 gboolean val = FALSE;
5850 if (ass->jit_optimizer_disabled_inited)
5851 return ass->jit_optimizer_disabled;
5854 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5857 ass->jit_optimizer_disabled = FALSE;
5858 mono_memory_barrier ();
5859 ass->jit_optimizer_disabled_inited = TRUE;
5863 attrs = mono_custom_attrs_from_assembly (ass);
5865 for (i = 0; i < attrs->num_attrs; ++i) {
5866 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5869 MonoMethodSignature *sig;
5871 if (!attr->ctor || attr->ctor->klass != klass)
5873 /* Decode the attribute. See reflection.c */
5874 len = attr->data_size;
5875 p = (const char*)attr->data;
5876 g_assert (read16 (p) == 0x0001);
5879 // FIXME: Support named parameters
5880 sig = mono_method_signature (attr->ctor);
5881 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5883 /* Two boolean arguments */
5887 mono_custom_attrs_free (attrs);
5890 ass->jit_optimizer_disabled = val;
5891 mono_memory_barrier ();
5892 ass->jit_optimizer_disabled_inited = TRUE;
5898 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5900 gboolean supported_tail_call;
5903 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5904 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5906 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5909 for (i = 0; i < fsig->param_count; ++i) {
5910 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5911 /* These can point to the current method's stack */
5912 supported_tail_call = FALSE;
5914 if (fsig->hasthis && cmethod->klass->valuetype)
5915 /* this might point to the current method's stack */
5916 supported_tail_call = FALSE;
5917 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5918 supported_tail_call = FALSE;
5919 if (cfg->method->save_lmf)
5920 supported_tail_call = FALSE;
5921 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5922 supported_tail_call = FALSE;
5924 /* Debugging support */
5926 if (supported_tail_call) {
5927 if (!mono_debug_count ())
5928 supported_tail_call = FALSE;
5932 return supported_tail_call;
5935 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5936 * it to the thread local value based on the tls_offset field. Every other kind of access to
5937 * the field causes an assert.
5940 is_magic_tls_access (MonoClassField *field)
5942 if (strcmp (field->name, "tlsdata"))
5944 if (strcmp (field->parent->name, "ThreadLocal`1"))
5946 return field->parent->image == mono_defaults.corlib;
5949 /* emits the code needed to access a managed tls var (like ThreadStatic)
5950 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5951 * pointer for the current thread.
5952 * Returns the MonoInst* representing the address of the tls var.
5955 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5958 int static_data_reg, array_reg, dreg;
5959 int offset2_reg, idx_reg;
5960 // inlined access to the tls data
5961 // idx = (offset >> 24) - 1;
5962 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5963 static_data_reg = alloc_ireg (cfg);
5964 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5965 idx_reg = alloc_ireg (cfg);
5966 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5967 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5968 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5969 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5970 array_reg = alloc_ireg (cfg);
5971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5972 offset2_reg = alloc_ireg (cfg);
5973 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5974 dreg = alloc_ireg (cfg);
5975 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5980 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5981 * this address is cached per-method in cached_tls_addr.
5984 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5986 MonoInst *load, *addr, *temp, *store, *thread_ins;
5987 MonoClassField *offset_field;
5989 if (*cached_tls_addr) {
5990 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5993 thread_ins = mono_get_thread_intrinsic (cfg);
5994 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5996 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5998 MONO_ADD_INS (cfg->cbb, thread_ins);
6000 MonoMethod *thread_method;
6001 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6002 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6004 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6005 addr->klass = mono_class_from_mono_type (tls_field->type);
6006 addr->type = STACK_MP;
6007 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6008 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6010 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6015 * mono_method_to_ir:
6017 * Translate the .net IL into linear IR.
6020 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6021 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6022 guint inline_offset, gboolean is_virtual_call)
6025 MonoInst *ins, **sp, **stack_start;
6026 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6027 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6028 MonoMethod *cmethod, *method_definition;
6029 MonoInst **arg_array;
6030 MonoMethodHeader *header;
6032 guint32 token, ins_flag;
6034 MonoClass *constrained_call = NULL;
6035 unsigned char *ip, *end, *target, *err_pos;
6036 static double r8_0 = 0.0;
6037 MonoMethodSignature *sig;
6038 MonoGenericContext *generic_context = NULL;
6039 MonoGenericContainer *generic_container = NULL;
6040 MonoType **param_types;
6041 int i, n, start_new_bblock, dreg;
6042 int num_calls = 0, inline_costs = 0;
6043 int breakpoint_id = 0;
6045 MonoBoolean security, pinvoke;
6046 MonoSecurityManager* secman = NULL;
6047 MonoDeclSecurityActions actions;
6048 GSList *class_inits = NULL;
6049 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6051 gboolean init_locals, seq_points, skip_dead_blocks;
6052 gboolean disable_inline, sym_seq_points = FALSE;
6053 MonoInst *cached_tls_addr = NULL;
6054 MonoDebugMethodInfo *minfo;
6055 MonoBitSet *seq_point_locs = NULL;
6057 disable_inline = is_jit_optimizer_disabled (method);
6059 /* serialization and xdomain stuff may need access to private fields and methods */
6060 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6061 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6062 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6063 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6064 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6065 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6067 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
6069 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6070 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6071 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6072 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6073 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6075 image = method->klass->image;
6076 header = mono_method_get_header (method);
6078 MonoLoaderError *error;
6080 if ((error = mono_loader_get_last_error ())) {
6081 mono_cfg_set_exception (cfg, error->exception_type);
6083 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6084 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6086 goto exception_exit;
6088 generic_container = mono_method_get_generic_container (method);
6089 sig = mono_method_signature (method);
6090 num_args = sig->hasthis + sig->param_count;
6091 ip = (unsigned char*)header->code;
6092 cfg->cil_start = ip;
6093 end = ip + header->code_size;
6094 cfg->stat_cil_code_size += header->code_size;
6095 init_locals = header->init_locals;
6097 seq_points = cfg->gen_seq_points && cfg->method == method;
6098 #ifdef PLATFORM_ANDROID
6099 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6102 if (cfg->gen_seq_points && cfg->method == method) {
6103 minfo = mono_debug_lookup_method (method);
6105 int i, n_il_offsets;
6109 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6110 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6111 sym_seq_points = TRUE;
6112 for (i = 0; i < n_il_offsets; ++i) {
6113 if (il_offsets [i] < header->code_size)
6114 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6120 * Methods without init_locals set could cause asserts in various passes
6125 method_definition = method;
6126 while (method_definition->is_inflated) {
6127 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6128 method_definition = imethod->declaring;
6131 /* SkipVerification is not allowed if core-clr is enabled */
6132 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6134 dont_verify_stloc = TRUE;
6137 if (mono_debug_using_mono_debugger ())
6138 cfg->keep_cil_nops = TRUE;
6140 if (sig->is_inflated)
6141 generic_context = mono_method_get_context (method);
6142 else if (generic_container)
6143 generic_context = &generic_container->context;
6144 cfg->generic_context = generic_context;
6146 if (!cfg->generic_sharing_context)
6147 g_assert (!sig->has_type_parameters);
6149 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6150 g_assert (method->is_inflated);
6151 g_assert (mono_method_get_context (method)->method_inst);
6153 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6154 g_assert (sig->generic_param_count);
6156 if (cfg->method == method) {
6157 cfg->real_offset = 0;
6159 cfg->real_offset = inline_offset;
6162 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6163 cfg->cil_offset_to_bb_len = header->code_size;
6165 cfg->current_method = method;
6167 if (cfg->verbose_level > 2)
6168 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6170 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6172 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6173 for (n = 0; n < sig->param_count; ++n)
6174 param_types [n + sig->hasthis] = sig->params [n];
6175 cfg->arg_types = param_types;
6177 dont_inline = g_list_prepend (dont_inline, method);
6178 if (cfg->method == method) {
6180 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6181 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6184 NEW_BBLOCK (cfg, start_bblock);
6185 cfg->bb_entry = start_bblock;
6186 start_bblock->cil_code = NULL;
6187 start_bblock->cil_length = 0;
6188 #if defined(__native_client_codegen__)
6189 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6190 ins->dreg = alloc_dreg (cfg, STACK_I4);
6191 MONO_ADD_INS (start_bblock, ins);
6195 NEW_BBLOCK (cfg, end_bblock);
6196 cfg->bb_exit = end_bblock;
6197 end_bblock->cil_code = NULL;
6198 end_bblock->cil_length = 0;
6199 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6200 g_assert (cfg->num_bblocks == 2);
6202 arg_array = cfg->args;
6204 if (header->num_clauses) {
6205 cfg->spvars = g_hash_table_new (NULL, NULL);
6206 cfg->exvars = g_hash_table_new (NULL, NULL);
6208 /* handle exception clauses */
6209 for (i = 0; i < header->num_clauses; ++i) {
6210 MonoBasicBlock *try_bb;
6211 MonoExceptionClause *clause = &header->clauses [i];
6212 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6213 try_bb->real_offset = clause->try_offset;
6214 try_bb->try_start = TRUE;
6215 try_bb->region = ((i + 1) << 8) | clause->flags;
6216 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6217 tblock->real_offset = clause->handler_offset;
6218 tblock->flags |= BB_EXCEPTION_HANDLER;
6220 link_bblock (cfg, try_bb, tblock);
6222 if (*(ip + clause->handler_offset) == CEE_POP)
6223 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6225 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6226 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6227 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6228 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6229 MONO_ADD_INS (tblock, ins);
6231 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6232 /* finally clauses already have a seq point */
6233 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6234 MONO_ADD_INS (tblock, ins);
6237 /* todo: is a fault block unsafe to optimize? */
6238 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6239 tblock->flags |= BB_EXCEPTION_UNSAFE;
6243 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6245 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6247 /* catch and filter blocks get the exception object on the stack */
6248 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6249 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6250 MonoInst *dummy_use;
6252 /* mostly like handle_stack_args (), but just sets the input args */
6253 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6254 tblock->in_scount = 1;
6255 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6256 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6259 * Add a dummy use for the exvar so its liveness info will be
6263 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6265 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6266 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6267 tblock->flags |= BB_EXCEPTION_HANDLER;
6268 tblock->real_offset = clause->data.filter_offset;
6269 tblock->in_scount = 1;
6270 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6271 /* The filter block shares the exvar with the handler block */
6272 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6273 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6274 MONO_ADD_INS (tblock, ins);
6278 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6279 clause->data.catch_class &&
6280 cfg->generic_sharing_context &&
6281 mono_class_check_context_used (clause->data.catch_class)) {
6283 * In shared generic code with catch
6284 * clauses containing type variables
6285 * the exception handling code has to
6286 * be able to get to the rgctx.
6287 * Therefore we have to make sure that
6288 * the vtable/mrgctx argument (for
6289 * static or generic methods) or the
6290 * "this" argument (for non-static
6291 * methods) are live.
6293 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6294 mini_method_get_context (method)->method_inst ||
6295 method->klass->valuetype) {
6296 mono_get_vtable_var (cfg);
6298 MonoInst *dummy_use;
6300 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6305 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6306 cfg->cbb = start_bblock;
6307 cfg->args = arg_array;
6308 mono_save_args (cfg, sig, inline_args);
6311 /* FIRST CODE BLOCK */
6312 NEW_BBLOCK (cfg, bblock);
6313 bblock->cil_code = ip;
6317 ADD_BBLOCK (cfg, bblock);
6319 if (cfg->method == method) {
6320 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6321 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
6322 MONO_INST_NEW (cfg, ins, OP_BREAK);
6323 MONO_ADD_INS (bblock, ins);
6327 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6328 secman = mono_security_manager_get_methods ();
6330 security = (secman && mono_method_has_declsec (method));
6331 /* at this point having security doesn't mean we have any code to generate */
6332 if (security && (cfg->method == method)) {
6333 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6334 * And we do not want to enter the next section (with allocation) if we
6335 * have nothing to generate */
6336 security = mono_declsec_get_demands (method, &actions);
6339 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6340 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6342 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6343 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6344 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6346 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6347 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6351 mono_custom_attrs_free (custom);
6354 custom = mono_custom_attrs_from_class (wrapped->klass);
6355 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6359 mono_custom_attrs_free (custom);
6362 /* not a P/Invoke after all */
6367 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6368 /* we use a separate basic block for the initialization code */
6369 NEW_BBLOCK (cfg, init_localsbb);
6370 cfg->bb_init = init_localsbb;
6371 init_localsbb->real_offset = cfg->real_offset;
6372 start_bblock->next_bb = init_localsbb;
6373 init_localsbb->next_bb = bblock;
6374 link_bblock (cfg, start_bblock, init_localsbb);
6375 link_bblock (cfg, init_localsbb, bblock);
6377 cfg->cbb = init_localsbb;
6379 start_bblock->next_bb = bblock;
6380 link_bblock (cfg, start_bblock, bblock);
6383 /* at this point we know, if security is TRUE, that some code needs to be generated */
6384 if (security && (cfg->method == method)) {
6387 cfg->stat_cas_demand_generation++;
6389 if (actions.demand.blob) {
6390 /* Add code for SecurityAction.Demand */
6391 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6392 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6393 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6394 mono_emit_method_call (cfg, secman->demand, args, NULL);
6396 if (actions.noncasdemand.blob) {
6397 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6398 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6399 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6400 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6401 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6402 mono_emit_method_call (cfg, secman->demand, args, NULL);
6404 if (actions.demandchoice.blob) {
6405 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6406 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6407 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6408 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6409 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6413 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6415 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6418 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6419 /* check if this is native code, e.g. an icall or a p/invoke */
6420 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6421 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6423 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6424 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6426 /* if this ia a native call then it can only be JITted from platform code */
6427 if ((icall || pinvk) && method->klass && method->klass->image) {
6428 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6429 MonoException *ex = icall ? mono_get_exception_security () :
6430 mono_get_exception_method_access ();
6431 emit_throw_exception (cfg, ex);
6438 if (header->code_size == 0)
6441 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6446 if (cfg->method == method)
6447 mono_debug_init_method (cfg, bblock, breakpoint_id);
6449 for (n = 0; n < header->num_locals; ++n) {
6450 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6455 /* We force the vtable variable here for all shared methods
6456 for the possibility that they might show up in a stack
6457 trace where their exact instantiation is needed. */
6458 if (cfg->generic_sharing_context && method == cfg->method) {
6459 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6460 mini_method_get_context (method)->method_inst ||
6461 method->klass->valuetype) {
6462 mono_get_vtable_var (cfg);
6464 /* FIXME: Is there a better way to do this?
6465 We need the variable live for the duration
6466 of the whole method. */
6467 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6471 /* add a check for this != NULL to inlined methods */
6472 if (is_virtual_call) {
6475 NEW_ARGLOAD (cfg, arg_ins, 0);
6476 MONO_ADD_INS (cfg->cbb, arg_ins);
6477 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6480 skip_dead_blocks = !dont_verify;
6481 if (skip_dead_blocks) {
6482 original_bb = bb = mono_basic_block_split (method, &error);
6483 if (!mono_error_ok (&error)) {
6484 mono_error_cleanup (&error);
6490 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6491 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6494 start_new_bblock = 0;
6497 if (cfg->method == method)
6498 cfg->real_offset = ip - header->code;
6500 cfg->real_offset = inline_offset;
6505 if (start_new_bblock) {
6506 bblock->cil_length = ip - bblock->cil_code;
6507 if (start_new_bblock == 2) {
6508 g_assert (ip == tblock->cil_code);
6510 GET_BBLOCK (cfg, tblock, ip);
6512 bblock->next_bb = tblock;
6515 start_new_bblock = 0;
6516 for (i = 0; i < bblock->in_scount; ++i) {
6517 if (cfg->verbose_level > 3)
6518 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6519 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6523 g_slist_free (class_inits);
6526 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6527 link_bblock (cfg, bblock, tblock);
6528 if (sp != stack_start) {
6529 handle_stack_args (cfg, stack_start, sp - stack_start);
6531 CHECK_UNVERIFIABLE (cfg);
6533 bblock->next_bb = tblock;
6536 for (i = 0; i < bblock->in_scount; ++i) {
6537 if (cfg->verbose_level > 3)
6538 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6539 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6542 g_slist_free (class_inits);
6547 if (skip_dead_blocks) {
6548 int ip_offset = ip - header->code;
6550 if (ip_offset == bb->end)
6554 int op_size = mono_opcode_size (ip, end);
6555 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6557 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6559 if (ip_offset + op_size == bb->end) {
6560 MONO_INST_NEW (cfg, ins, OP_NOP);
6561 MONO_ADD_INS (bblock, ins);
6562 start_new_bblock = 1;
6570 * Sequence points are points where the debugger can place a breakpoint.
6571 * Currently, we generate these automatically at points where the IL
6574 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
6576 * Make methods interruptable at the beginning, and at the targets of
6577 * backward branches.
6578 * Also, do this at the start of every bblock in methods with clauses too,
6579 * to be able to handle instructions with inprecise control flow like
6581 * Backward branches are handled at the end of method-to-ir ().
6583 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
6585 /* Avoid sequence points on empty IL like .volatile */
6586 // FIXME: Enable this
6587 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
6588 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
6589 MONO_ADD_INS (cfg->cbb, ins);
6592 bblock->real_offset = cfg->real_offset;
6594 if ((cfg->method == method) && cfg->coverage_info) {
6595 guint32 cil_offset = ip - header->code;
6596 cfg->coverage_info->data [cil_offset].cil_code = ip;
6598 /* TODO: Use an increment here */
6599 #if defined(TARGET_X86)
6600 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6601 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6603 MONO_ADD_INS (cfg->cbb, ins);
6605 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6606 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6610 if (cfg->verbose_level > 3)
6611 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6615 if (seq_points && !sym_seq_points && sp != stack_start) {
6617 * The C# compiler uses these nops to notify the JIT that it should
6618 * insert seq points.
6620 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
6621 MONO_ADD_INS (cfg->cbb, ins);
6623 if (cfg->keep_cil_nops)
6624 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6626 MONO_INST_NEW (cfg, ins, OP_NOP);
6628 MONO_ADD_INS (bblock, ins);
6631 if (should_insert_brekpoint (cfg->method)) {
6632 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6634 MONO_INST_NEW (cfg, ins, OP_NOP);
6637 MONO_ADD_INS (bblock, ins);
6643 CHECK_STACK_OVF (1);
6644 n = (*ip)-CEE_LDARG_0;
6646 EMIT_NEW_ARGLOAD (cfg, ins, n);
6654 CHECK_STACK_OVF (1);
6655 n = (*ip)-CEE_LDLOC_0;
6657 EMIT_NEW_LOCLOAD (cfg, ins, n);
6666 n = (*ip)-CEE_STLOC_0;
6669 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6671 emit_stloc_ir (cfg, sp, header, n);
6678 CHECK_STACK_OVF (1);
6681 EMIT_NEW_ARGLOAD (cfg, ins, n);
6687 CHECK_STACK_OVF (1);
6690 NEW_ARGLOADA (cfg, ins, n);
6691 MONO_ADD_INS (cfg->cbb, ins);
6701 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6703 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6708 CHECK_STACK_OVF (1);
6711 EMIT_NEW_LOCLOAD (cfg, ins, n);
6715 case CEE_LDLOCA_S: {
6716 unsigned char *tmp_ip;
6718 CHECK_STACK_OVF (1);
6719 CHECK_LOCAL (ip [1]);
6721 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6727 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6736 CHECK_LOCAL (ip [1]);
6737 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6739 emit_stloc_ir (cfg, sp, header, ip [1]);
6744 CHECK_STACK_OVF (1);
6745 EMIT_NEW_PCONST (cfg, ins, NULL);
6746 ins->type = STACK_OBJ;
6751 CHECK_STACK_OVF (1);
6752 EMIT_NEW_ICONST (cfg, ins, -1);
6765 CHECK_STACK_OVF (1);
6766 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6772 CHECK_STACK_OVF (1);
6774 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6780 CHECK_STACK_OVF (1);
6781 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6787 CHECK_STACK_OVF (1);
6788 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6789 ins->type = STACK_I8;
6790 ins->dreg = alloc_dreg (cfg, STACK_I8);
6792 ins->inst_l = (gint64)read64 (ip);
6793 MONO_ADD_INS (bblock, ins);
6799 gboolean use_aotconst = FALSE;
6801 #ifdef TARGET_POWERPC
6802 /* FIXME: Clean this up */
6803 if (cfg->compile_aot)
6804 use_aotconst = TRUE;
6807 /* FIXME: we should really allocate this only late in the compilation process */
6808 f = mono_domain_alloc (cfg->domain, sizeof (float));
6810 CHECK_STACK_OVF (1);
6816 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6818 dreg = alloc_freg (cfg);
6819 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6820 ins->type = STACK_R8;
6822 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6823 ins->type = STACK_R8;
6824 ins->dreg = alloc_dreg (cfg, STACK_R8);
6826 MONO_ADD_INS (bblock, ins);
6836 gboolean use_aotconst = FALSE;
6838 #ifdef TARGET_POWERPC
6839 /* FIXME: Clean this up */
6840 if (cfg->compile_aot)
6841 use_aotconst = TRUE;
6844 /* FIXME: we should really allocate this only late in the compilation process */
6845 d = mono_domain_alloc (cfg->domain, sizeof (double));
6847 CHECK_STACK_OVF (1);
6853 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6855 dreg = alloc_freg (cfg);
6856 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6857 ins->type = STACK_R8;
6859 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6860 ins->type = STACK_R8;
6861 ins->dreg = alloc_dreg (cfg, STACK_R8);
6863 MONO_ADD_INS (bblock, ins);
6872 MonoInst *temp, *store;
6874 CHECK_STACK_OVF (1);
6878 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6879 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6881 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6884 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6897 if (sp [0]->type == STACK_R8)
6898 /* we need to pop the value from the x86 FP stack */
6899 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6905 INLINE_FAILURE ("jmp");
6906 GSHAREDVT_FAILURE (*ip);
6909 if (stack_start != sp)
6911 token = read32 (ip + 1);
6912 /* FIXME: check the signature matches */
6913 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6915 if (!cmethod || mono_loader_get_last_error ())
6918 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6919 GENERIC_SHARING_FAILURE (CEE_JMP);
6921 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6922 CHECK_CFG_EXCEPTION;
6924 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6926 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6929 /* Handle tail calls similarly to calls */
6930 n = fsig->param_count + fsig->hasthis;
6932 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6933 call->method = cmethod;
6934 call->tail_call = TRUE;
6935 call->signature = mono_method_signature (cmethod);
6936 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6937 call->inst.inst_p0 = cmethod;
6938 for (i = 0; i < n; ++i)
6939 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6941 mono_arch_emit_call (cfg, call);
6942 MONO_ADD_INS (bblock, (MonoInst*)call);
6945 for (i = 0; i < num_args; ++i)
6946 /* Prevent arguments from being optimized away */
6947 arg_array [i]->flags |= MONO_INST_VOLATILE;
6949 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6950 ins = (MonoInst*)call;
6951 ins->inst_p0 = cmethod;
6952 MONO_ADD_INS (bblock, ins);
6956 start_new_bblock = 1;
6961 case CEE_CALLVIRT: {
6962 MonoInst *addr = NULL;
6963 MonoMethodSignature *fsig = NULL;
6965 int virtual = *ip == CEE_CALLVIRT;
6966 int calli = *ip == CEE_CALLI;
6967 gboolean pass_imt_from_rgctx = FALSE;
6968 MonoInst *imt_arg = NULL;
6969 gboolean pass_vtable = FALSE;
6970 gboolean pass_mrgctx = FALSE;
6971 MonoInst *vtable_arg = NULL;
6972 gboolean check_this = FALSE;
6973 gboolean supported_tail_call = FALSE;
6974 gboolean need_seq_point = FALSE;
6975 guint32 call_opcode = *ip;
6976 gboolean emit_widen = TRUE;
6977 gboolean push_res = TRUE;
6978 gboolean skip_ret = FALSE;
6981 token = read32 (ip + 1);
6986 GSHAREDVT_FAILURE (*ip);
6991 if (method->wrapper_type != MONO_WRAPPER_NONE)
6992 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6994 fsig = mono_metadata_parse_signature (image, token);
6996 n = fsig->param_count + fsig->hasthis;
6998 if (method->dynamic && fsig->pinvoke) {
7002 * This is a call through a function pointer using a pinvoke
7003 * signature. Have to create a wrapper and call that instead.
7004 * FIXME: This is very slow, need to create a wrapper at JIT time
7005 * instead based on the signature.
7007 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7008 EMIT_NEW_PCONST (cfg, args [1], fsig);
7010 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7013 MonoMethod *cil_method;
7015 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7016 if (constrained_call && cfg->verbose_level > 2)
7017 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7018 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
7019 cil_method = cmethod;
7020 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7021 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7022 cfg->generic_sharing_context)) {
7023 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7025 } else if (constrained_call) {
7026 if (cfg->verbose_level > 2)
7027 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7029 GSHAREDVT_FAILURE (*ip);
7031 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7033 * This is needed since get_method_constrained can't find
7034 * the method in klass representing a type var.
7035 * The type var is guaranteed to be a reference type in this
7038 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7039 cil_method = cmethod;
7040 g_assert (!cmethod->klass->valuetype);
7042 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7045 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7046 cil_method = cmethod;
7049 if (!cmethod || mono_loader_get_last_error ())
7051 if (!dont_verify && !cfg->skip_visibility) {
7052 MonoMethod *target_method = cil_method;
7053 if (method->is_inflated) {
7054 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7056 if (!mono_method_can_access_method (method_definition, target_method) &&
7057 !mono_method_can_access_method (method, cil_method))
7058 METHOD_ACCESS_FAILURE;
7061 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7062 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7064 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7065 /* MS.NET seems to silently convert this to a callvirt */
7070 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7071 * converts to a callvirt.
7073 * tests/bug-515884.il is an example of this behavior
7075 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7076 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7077 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7081 if (!cmethod->klass->inited)
7082 if (!mono_class_init (cmethod->klass))
7083 TYPE_LOAD_ERROR (cmethod->klass);
7085 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7086 mini_class_is_system_array (cmethod->klass)) {
7087 array_rank = cmethod->klass->rank;
7088 fsig = mono_method_signature (cmethod);
7090 fsig = mono_method_signature (cmethod);
7095 if (fsig->pinvoke) {
7096 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7097 check_for_pending_exc, FALSE);
7098 fsig = mono_method_signature (wrapper);
7099 } else if (constrained_call) {
7100 fsig = mono_method_signature (cmethod);
7102 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7106 mono_save_token_info (cfg, image, token, cil_method);
7108 if (!MONO_TYPE_IS_VOID (fsig->ret) && !sym_seq_points) {
7110 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7111 * foo (bar (), baz ())
7112 * works correctly. MS does this also:
7113 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7114 * The problem with this approach is that the debugger will stop after all calls returning a value,
7115 * even for simple cases, like:
7118 /* Special case a few common successor opcodes */
7119 if (!(ip + 5 < end && ip [5] == CEE_POP))
7120 need_seq_point = TRUE;
7123 n = fsig->param_count + fsig->hasthis;
7125 /* Don't support calls made using type arguments for now */
7127 if (cfg->gsharedvt) {
7128 if (mini_is_gsharedvt_signature (cfg, fsig))
7129 GSHAREDVT_FAILURE (*ip);
7133 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7134 if (check_linkdemand (cfg, method, cmethod))
7135 INLINE_FAILURE ("linkdemand");
7136 CHECK_CFG_EXCEPTION;
7139 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7140 g_assert_not_reached ();
7143 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7146 if (!cfg->generic_sharing_context && cmethod)
7147 g_assert (!mono_method_check_context_used (cmethod));
7151 //g_assert (!virtual || fsig->hasthis);
7155 if (constrained_call) {
7157 * We have the `constrained.' prefix opcode.
7159 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7161 * The type parameter is instantiated as a valuetype,
7162 * but that type doesn't override the method we're
7163 * calling, so we need to box `this'.
7165 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7166 ins->klass = constrained_call;
7167 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7168 CHECK_CFG_EXCEPTION;
7169 } else if (!constrained_call->valuetype) {
7170 int dreg = alloc_ireg_ref (cfg);
7173 * The type parameter is instantiated as a reference
7174 * type. We have a managed pointer on the stack, so
7175 * we need to dereference it here.
7177 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7178 ins->type = STACK_OBJ;
7181 if (cmethod->klass->valuetype) {
7184 /* Interface method */
7187 mono_class_setup_vtable (constrained_call);
7188 CHECK_TYPELOAD (constrained_call);
7189 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7191 TYPE_LOAD_ERROR (constrained_call);
7192 slot = mono_method_get_vtable_slot (cmethod);
7194 TYPE_LOAD_ERROR (cmethod->klass);
7195 cmethod = constrained_call->vtable [ioffset + slot];
7197 if (cmethod->klass == mono_defaults.enum_class) {
7198 /* Enum implements some interfaces, so treat this as the first case */
7199 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7200 ins->klass = constrained_call;
7201 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
7202 CHECK_CFG_EXCEPTION;
7207 constrained_call = NULL;
7210 if (!calli && check_call_signature (cfg, fsig, sp))
7213 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7215 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7216 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7224 * If the callee is a shared method, then its static cctor
7225 * might not get called after the call was patched.
7227 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7228 emit_generic_class_init (cfg, cmethod->klass);
7229 CHECK_TYPELOAD (cmethod->klass);
7232 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
7233 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
7234 gboolean sharable = FALSE;
7236 if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7239 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7240 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
7241 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7243 sharable = sharing_enabled && context_sharable;
7247 * Pass vtable iff target method might
7248 * be shared, which means that sharing
7249 * is enabled for its class and its
7250 * context is sharable (and it's not a
7253 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
7257 if (cmethod && mini_method_get_context (cmethod) &&
7258 mini_method_get_context (cmethod)->method_inst) {
7259 g_assert (!pass_vtable);
7261 if (mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7264 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
7265 MonoGenericContext *context = mini_method_get_context (cmethod);
7266 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
7268 if (sharing_enabled && context_sharable)
7270 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
7275 if (cfg->generic_sharing_context && cmethod) {
7276 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
7278 context_used = mono_method_check_context_used (cmethod);
7280 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7281 /* Generic method interface
7282 calls are resolved via a
7283 helper function and don't
7285 if (!cmethod_context || !cmethod_context->method_inst)
7286 pass_imt_from_rgctx = TRUE;
7290 * If a shared method calls another
7291 * shared method then the caller must
7292 * have a generic sharing context
7293 * because the magic trampoline
7294 * requires it. FIXME: We shouldn't
7295 * have to force the vtable/mrgctx
7296 * variable here. Instead there
7297 * should be a flag in the cfg to
7298 * request a generic sharing context.
7301 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
7302 mono_get_vtable_var (cfg);
7307 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7309 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7311 CHECK_TYPELOAD (cmethod->klass);
7312 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7317 g_assert (!vtable_arg);
7319 if (!cfg->compile_aot) {
7321 * emit_get_rgctx_method () calls mono_class_vtable () so check
7322 * for type load errors before.
7324 mono_class_setup_vtable (cmethod->klass);
7325 CHECK_TYPELOAD (cmethod->klass);
7328 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7330 /* !marshalbyref is needed to properly handle generic methods + remoting */
7331 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
7332 MONO_METHOD_IS_FINAL (cmethod)) &&
7333 !cmethod->klass->marshalbyref) {
7340 if (pass_imt_from_rgctx) {
7341 g_assert (!pass_vtable);
7344 imt_arg = emit_get_rgctx_method (cfg, context_used,
7345 cmethod, MONO_RGCTX_INFO_METHOD);
7349 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
7351 /* Calling virtual generic methods */
7352 if (cmethod && virtual &&
7353 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
7354 !(MONO_METHOD_IS_FINAL (cmethod) &&
7355 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
7356 mono_method_signature (cmethod)->generic_param_count) {
7357 MonoInst *this_temp, *this_arg_temp, *store;
7358 MonoInst *iargs [4];
7360 g_assert (mono_method_signature (cmethod)->is_inflated);
7362 /* Prevent inlining of methods that contain indirect calls */
7363 INLINE_FAILURE ("virtual generic call");
7365 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7366 GSHAREDVT_FAILURE (*ip);
7368 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
7369 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
7370 g_assert (!imt_arg);
7372 g_assert (cmethod->is_inflated);
7373 imt_arg = emit_get_rgctx_method (cfg, context_used,
7374 cmethod, MONO_RGCTX_INFO_METHOD);
7375 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
7379 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
7380 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
7381 MONO_ADD_INS (bblock, store);
7383 /* FIXME: This should be a managed pointer */
7384 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7386 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
7387 iargs [1] = emit_get_rgctx_method (cfg, context_used,
7388 cmethod, MONO_RGCTX_INFO_METHOD);
7389 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
7390 addr = mono_emit_jit_icall (cfg,
7391 mono_helper_compile_generic_method, iargs);
7393 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
7395 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7402 * Implement a workaround for the inherent races involved in locking:
7408 * If a thread abort happens between the call to Monitor.Enter () and the start of the
7409 * try block, the Exit () won't be executed, see:
7410 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
7411 * To work around this, we extend such try blocks to include the last x bytes
7412 * of the Monitor.Enter () call.
7414 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
7415 MonoBasicBlock *tbb;
7417 GET_BBLOCK (cfg, tbb, ip + 5);
7419 * Only extend try blocks with a finally, to avoid catching exceptions thrown
7420 * from Monitor.Enter like ArgumentNullException.
7422 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
7423 /* Mark this bblock as needing to be extended */
7424 tbb->extend_try_block = TRUE;
7428 /* Conversion to a JIT intrinsic */
7429 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
7431 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7432 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7439 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7440 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7441 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7442 !g_list_find (dont_inline, cmethod)) {
7444 gboolean always = FALSE;
7446 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7447 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7448 /* Prevent inlining of methods that call wrappers */
7449 INLINE_FAILURE ("wrapper call");
7450 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7454 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
7456 cfg->real_offset += 5;
7459 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7460 /* *sp is already set by inline_method */
7465 inline_costs += costs;
7472 * Making generic calls out of gsharedvt methods.
7474 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7478 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
7479 //GSHAREDVT_FAILURE (*ip);
7480 // disable for possible remoting calls
7481 if (fsig->hasthis && (method->klass->marshalbyref || method->klass == mono_defaults.object_class))
7482 GSHAREDVT_FAILURE (*ip);
7483 // virtual generic calls were disabled earlier
7486 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
7487 /* test_0_multi_dim_arrays () in gshared.cs */
7488 GSHAREDVT_FAILURE (*ip);
7490 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
7491 addr = emit_get_rgctx_method (cfg, context_used,
7492 cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT);
7494 addr = emit_get_rgctx_method (cfg, context_used,
7495 cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7496 ins = emit_gsharedvt_call (cfg, fsig, sp, addr, cmethod, imt_arg, vtable_arg);
7501 if (virtual && cmethod && cfg->gsharedvt && cmethod->slot == -1) {
7502 mono_class_setup_vtable (cmethod->klass);
7503 if (cmethod->slot == -1)
7504 // FIXME: How can this happen ?
7505 GSHAREDVT_FAILURE (*ip);
7508 inline_costs += 10 * num_calls++;
7510 /* Tail recursion elimination */
7511 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7512 gboolean has_vtargs = FALSE;
7515 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7516 INLINE_FAILURE ("tail call");
7518 /* keep it simple */
7519 for (i = fsig->param_count - 1; i >= 0; i--) {
7520 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7525 for (i = 0; i < n; ++i)
7526 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7527 MONO_INST_NEW (cfg, ins, OP_BR);
7528 MONO_ADD_INS (bblock, ins);
7529 tblock = start_bblock->out_bb [0];
7530 link_bblock (cfg, bblock, tblock);
7531 ins->inst_target_bb = tblock;
7532 start_new_bblock = 1;
7534 /* skip the CEE_RET, too */
7535 if (ip_in_bb (cfg, bblock, ip + 5))
7542 /* Generic sharing */
7543 /* FIXME: only do this for generic methods if
7544 they are not shared! */
7545 if (context_used && !imt_arg && !array_rank &&
7546 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7547 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7548 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7549 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7550 INLINE_FAILURE ("gshared");
7552 g_assert (cfg->generic_sharing_context && cmethod);
7556 * We are compiling a call to a
7557 * generic method from shared code,
7558 * which means that we have to look up
7559 * the method in the rgctx and do an
7562 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7565 /* Indirect calls */
7567 g_assert (!imt_arg);
7569 if (call_opcode == CEE_CALL)
7570 g_assert (context_used);
7571 else if (call_opcode == CEE_CALLI)
7572 g_assert (!vtable_arg);
7574 /* FIXME: what the hell is this??? */
7575 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7576 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7578 /* Prevent inlining of methods with indirect calls */
7579 INLINE_FAILURE ("indirect call");
7582 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7584 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7586 * Instead of emitting an indirect call, emit a direct call
7587 * with the contents of the aotconst as the patch info.
7589 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7591 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7592 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7595 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7606 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7607 MonoInst *val = sp [fsig->param_count];
7609 if (val->type == STACK_OBJ) {
7610 MonoInst *iargs [2];
7615 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7618 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7619 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7620 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7621 emit_write_barrier (cfg, addr, val, 0);
7622 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7623 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7625 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7626 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7627 if (!cmethod->klass->element_class->valuetype && !readonly)
7628 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7629 CHECK_TYPELOAD (cmethod->klass);
7632 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7635 g_assert_not_reached ();
7642 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7646 /* Tail prefix / tail call optimization */
7648 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7649 /* FIXME: runtime generic context pointer for jumps? */
7650 /* FIXME: handle this for generic sharing eventually */
7652 ((((ins_flag & MONO_INST_TAILCALL) && (call_opcode == CEE_CALL))
7653 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7654 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig))
7655 supported_tail_call = TRUE;
7656 if (supported_tail_call) {
7659 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7660 INLINE_FAILURE ("tail call");
7662 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7664 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7665 /* Handle tail calls similarly to calls */
7666 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE, FALSE);
7668 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7669 call->tail_call = TRUE;
7670 call->method = cmethod;
7671 call->signature = mono_method_signature (cmethod);
7674 * We implement tail calls by storing the actual arguments into the
7675 * argument variables, then emitting a CEE_JMP.
7677 for (i = 0; i < n; ++i) {
7678 /* Prevent argument from being register allocated */
7679 arg_array [i]->flags |= MONO_INST_VOLATILE;
7680 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7684 ins = (MonoInst*)call;
7685 ins->inst_p0 = cmethod;
7686 ins->inst_p1 = arg_array [0];
7687 MONO_ADD_INS (bblock, ins);
7688 link_bblock (cfg, bblock, end_bblock);
7689 start_new_bblock = 1;
7691 // FIXME: Eliminate unreachable epilogs
7694 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7695 * only reachable from this call.
7697 GET_BBLOCK (cfg, tblock, ip + 5);
7698 if (tblock == bblock || tblock->in_count == 0)
7706 * Synchronized wrappers.
7707 * Its hard to determine where to replace a method with its synchronized
7708 * wrapper without causing an infinite recursion. The current solution is
7709 * to add the synchronized wrapper in the trampolines, and to
7710 * change the called method to a dummy wrapper, and resolve that wrapper
7711 * to the real method in mono_jit_compile_method ().
7713 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED && mono_marshal_method_from_wrapper (cfg->method) == cmethod)
7714 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
7717 INLINE_FAILURE ("call");
7718 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7719 imt_arg, vtable_arg);
7723 /* End of call, INS should contain the result of the call, if any */
7725 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
7728 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7733 CHECK_CFG_EXCEPTION;
7737 g_assert (*ip == CEE_RET);
7742 emit_seq_point (cfg, method, ip, FALSE);
7746 if (cfg->method != method) {
7747 /* return from inlined method */
7749 * If in_count == 0, that means the ret is unreachable due to
7750 * being preceeded by a throw. In that case, inline_method () will
7751 * handle setting the return value
7752 * (test case: test_0_inline_throw ()).
7754 if (return_var && cfg->cbb->in_count) {
7755 MonoType *ret_type = mono_method_signature (method)->ret;
7761 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7764 //g_assert (returnvar != -1);
7765 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7766 cfg->ret_var_set = TRUE;
7770 MonoType *ret_type = mono_method_signature (method)->ret;
7772 if (seq_points && !sym_seq_points) {
7774 * Place a seq point here too even through the IL stack is not
7775 * empty, so a step over on
7778 * will work correctly.
7780 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7781 MONO_ADD_INS (cfg->cbb, ins);
7784 g_assert (!return_var);
7788 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7791 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7794 if (!cfg->vret_addr) {
7797 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7799 EMIT_NEW_RETLOADA (cfg, ret_addr);
7801 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7802 ins->klass = mono_class_from_mono_type (ret_type);
7805 #ifdef MONO_ARCH_SOFT_FLOAT
7806 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7807 MonoInst *iargs [1];
7811 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7812 mono_arch_emit_setret (cfg, method, conv);
7814 mono_arch_emit_setret (cfg, method, *sp);
7817 mono_arch_emit_setret (cfg, method, *sp);
7822 if (sp != stack_start)
7824 MONO_INST_NEW (cfg, ins, OP_BR);
7826 ins->inst_target_bb = end_bblock;
7827 MONO_ADD_INS (bblock, ins);
7828 link_bblock (cfg, bblock, end_bblock);
7829 start_new_bblock = 1;
7833 MONO_INST_NEW (cfg, ins, OP_BR);
7835 target = ip + 1 + (signed char)(*ip);
7837 GET_BBLOCK (cfg, tblock, target);
7838 link_bblock (cfg, bblock, tblock);
7839 ins->inst_target_bb = tblock;
7840 if (sp != stack_start) {
7841 handle_stack_args (cfg, stack_start, sp - stack_start);
7843 CHECK_UNVERIFIABLE (cfg);
7845 MONO_ADD_INS (bblock, ins);
7846 start_new_bblock = 1;
7847 inline_costs += BRANCH_COST;
7861 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7863 target = ip + 1 + *(signed char*)ip;
7869 inline_costs += BRANCH_COST;
7873 MONO_INST_NEW (cfg, ins, OP_BR);
7876 target = ip + 4 + (gint32)read32(ip);
7878 GET_BBLOCK (cfg, tblock, target);
7879 link_bblock (cfg, bblock, tblock);
7880 ins->inst_target_bb = tblock;
7881 if (sp != stack_start) {
7882 handle_stack_args (cfg, stack_start, sp - stack_start);
7884 CHECK_UNVERIFIABLE (cfg);
7887 MONO_ADD_INS (bblock, ins);
7889 start_new_bblock = 1;
7890 inline_costs += BRANCH_COST;
7897 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7898 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7899 guint32 opsize = is_short ? 1 : 4;
7901 CHECK_OPSIZE (opsize);
7903 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7906 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7911 GET_BBLOCK (cfg, tblock, target);
7912 link_bblock (cfg, bblock, tblock);
7913 GET_BBLOCK (cfg, tblock, ip);
7914 link_bblock (cfg, bblock, tblock);
7916 if (sp != stack_start) {
7917 handle_stack_args (cfg, stack_start, sp - stack_start);
7918 CHECK_UNVERIFIABLE (cfg);
7921 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7922 cmp->sreg1 = sp [0]->dreg;
7923 type_from_op (cmp, sp [0], NULL);
7926 #if SIZEOF_REGISTER == 4
7927 if (cmp->opcode == OP_LCOMPARE_IMM) {
7928 /* Convert it to OP_LCOMPARE */
7929 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7930 ins->type = STACK_I8;
7931 ins->dreg = alloc_dreg (cfg, STACK_I8);
7933 MONO_ADD_INS (bblock, ins);
7934 cmp->opcode = OP_LCOMPARE;
7935 cmp->sreg2 = ins->dreg;
7938 MONO_ADD_INS (bblock, cmp);
7940 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7941 type_from_op (ins, sp [0], NULL);
7942 MONO_ADD_INS (bblock, ins);
7943 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7944 GET_BBLOCK (cfg, tblock, target);
7945 ins->inst_true_bb = tblock;
7946 GET_BBLOCK (cfg, tblock, ip);
7947 ins->inst_false_bb = tblock;
7948 start_new_bblock = 2;
7951 inline_costs += BRANCH_COST;
7966 MONO_INST_NEW (cfg, ins, *ip);
7968 target = ip + 4 + (gint32)read32(ip);
7974 inline_costs += BRANCH_COST;
7978 MonoBasicBlock **targets;
7979 MonoBasicBlock *default_bblock;
7980 MonoJumpInfoBBTable *table;
7981 int offset_reg = alloc_preg (cfg);
7982 int target_reg = alloc_preg (cfg);
7983 int table_reg = alloc_preg (cfg);
7984 int sum_reg = alloc_preg (cfg);
7985 gboolean use_op_switch;
7989 n = read32 (ip + 1);
7992 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7996 CHECK_OPSIZE (n * sizeof (guint32));
7997 target = ip + n * sizeof (guint32);
7999 GET_BBLOCK (cfg, default_bblock, target);
8000 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8002 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8003 for (i = 0; i < n; ++i) {
8004 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8005 targets [i] = tblock;
8006 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8010 if (sp != stack_start) {
8012 * Link the current bb with the targets as well, so handle_stack_args
8013 * will set their in_stack correctly.
8015 link_bblock (cfg, bblock, default_bblock);
8016 for (i = 0; i < n; ++i)
8017 link_bblock (cfg, bblock, targets [i]);
8019 handle_stack_args (cfg, stack_start, sp - stack_start);
8021 CHECK_UNVERIFIABLE (cfg);
8024 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8025 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8028 for (i = 0; i < n; ++i)
8029 link_bblock (cfg, bblock, targets [i]);
8031 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8032 table->table = targets;
8033 table->table_size = n;
8035 use_op_switch = FALSE;
8037 /* ARM implements SWITCH statements differently */
8038 /* FIXME: Make it use the generic implementation */
8039 if (!cfg->compile_aot)
8040 use_op_switch = TRUE;
8043 if (COMPILE_LLVM (cfg))
8044 use_op_switch = TRUE;
8046 cfg->cbb->has_jump_table = 1;
8048 if (use_op_switch) {
8049 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8050 ins->sreg1 = src1->dreg;
8051 ins->inst_p0 = table;
8052 ins->inst_many_bb = targets;
8053 ins->klass = GUINT_TO_POINTER (n);
8054 MONO_ADD_INS (cfg->cbb, ins);
8056 if (sizeof (gpointer) == 8)
8057 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8059 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8061 #if SIZEOF_REGISTER == 8
8062 /* The upper word might not be zero, and we add it to a 64 bit address later */
8063 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8066 if (cfg->compile_aot) {
8067 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8069 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8070 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8071 ins->inst_p0 = table;
8072 ins->dreg = table_reg;
8073 MONO_ADD_INS (cfg->cbb, ins);
8076 /* FIXME: Use load_memindex */
8077 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8078 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8079 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8081 start_new_bblock = 1;
8082 inline_costs += (BRANCH_COST * 2);
8102 dreg = alloc_freg (cfg);
8105 dreg = alloc_lreg (cfg);
8108 dreg = alloc_ireg_ref (cfg);
8111 dreg = alloc_preg (cfg);
8114 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8115 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8116 ins->flags |= ins_flag;
8118 MONO_ADD_INS (bblock, ins);
8120 if (ins->flags & MONO_INST_VOLATILE) {
8121 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8122 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8123 emit_memory_barrier (cfg, FullBarrier);
8138 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8139 ins->flags |= ins_flag;
8142 if (ins->flags & MONO_INST_VOLATILE) {
8143 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8144 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8145 emit_memory_barrier (cfg, FullBarrier);
8148 MONO_ADD_INS (bblock, ins);
8150 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8151 emit_write_barrier (cfg, sp [0], sp [1], -1);
8160 MONO_INST_NEW (cfg, ins, (*ip));
8162 ins->sreg1 = sp [0]->dreg;
8163 ins->sreg2 = sp [1]->dreg;
8164 type_from_op (ins, sp [0], sp [1]);
8166 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8168 /* Use the immediate opcodes if possible */
8169 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8170 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8171 if (imm_opcode != -1) {
8172 ins->opcode = imm_opcode;
8173 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8176 sp [1]->opcode = OP_NOP;
8180 MONO_ADD_INS ((cfg)->cbb, (ins));
8182 *sp++ = mono_decompose_opcode (cfg, ins);
8199 MONO_INST_NEW (cfg, ins, (*ip));
8201 ins->sreg1 = sp [0]->dreg;
8202 ins->sreg2 = sp [1]->dreg;
8203 type_from_op (ins, sp [0], sp [1]);
8205 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8206 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8208 /* FIXME: Pass opcode to is_inst_imm */
8210 /* Use the immediate opcodes if possible */
8211 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
8214 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8215 if (imm_opcode != -1) {
8216 ins->opcode = imm_opcode;
8217 if (sp [1]->opcode == OP_I8CONST) {
8218 #if SIZEOF_REGISTER == 8
8219 ins->inst_imm = sp [1]->inst_l;
8221 ins->inst_ls_word = sp [1]->inst_ls_word;
8222 ins->inst_ms_word = sp [1]->inst_ms_word;
8226 ins->inst_imm = (gssize)(sp [1]->inst_c0);
8229 /* Might be followed by an instruction added by ADD_WIDEN_OP */
8230 if (sp [1]->next == NULL)
8231 sp [1]->opcode = OP_NOP;
8234 MONO_ADD_INS ((cfg)->cbb, (ins));
8236 *sp++ = mono_decompose_opcode (cfg, ins);
8249 case CEE_CONV_OVF_I8:
8250 case CEE_CONV_OVF_U8:
8254 /* Special case this earlier so we have long constants in the IR */
8255 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
8256 int data = sp [-1]->inst_c0;
8257 sp [-1]->opcode = OP_I8CONST;
8258 sp [-1]->type = STACK_I8;
8259 #if SIZEOF_REGISTER == 8
8260 if ((*ip) == CEE_CONV_U8)
8261 sp [-1]->inst_c0 = (guint32)data;
8263 sp [-1]->inst_c0 = data;
8265 sp [-1]->inst_ls_word = data;
8266 if ((*ip) == CEE_CONV_U8)
8267 sp [-1]->inst_ms_word = 0;
8269 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
8271 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
8278 case CEE_CONV_OVF_I4:
8279 case CEE_CONV_OVF_I1:
8280 case CEE_CONV_OVF_I2:
8281 case CEE_CONV_OVF_I:
8282 case CEE_CONV_OVF_U:
8285 if (sp [-1]->type == STACK_R8) {
8286 ADD_UNOP (CEE_CONV_OVF_I8);
8293 case CEE_CONV_OVF_U1:
8294 case CEE_CONV_OVF_U2:
8295 case CEE_CONV_OVF_U4:
8298 if (sp [-1]->type == STACK_R8) {
8299 ADD_UNOP (CEE_CONV_OVF_U8);
8306 case CEE_CONV_OVF_I1_UN:
8307 case CEE_CONV_OVF_I2_UN:
8308 case CEE_CONV_OVF_I4_UN:
8309 case CEE_CONV_OVF_I8_UN:
8310 case CEE_CONV_OVF_U1_UN:
8311 case CEE_CONV_OVF_U2_UN:
8312 case CEE_CONV_OVF_U4_UN:
8313 case CEE_CONV_OVF_U8_UN:
8314 case CEE_CONV_OVF_I_UN:
8315 case CEE_CONV_OVF_U_UN:
8322 CHECK_CFG_EXCEPTION;
8326 case CEE_ADD_OVF_UN:
8328 case CEE_MUL_OVF_UN:
8330 case CEE_SUB_OVF_UN:
8336 GSHAREDVT_FAILURE (*ip);
8339 token = read32 (ip + 1);
8340 klass = mini_get_class (method, token, generic_context);
8341 CHECK_TYPELOAD (klass);
8343 if (generic_class_is_reference_type (cfg, klass)) {
8344 MonoInst *store, *load;
8345 int dreg = alloc_ireg_ref (cfg);
8347 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
8348 load->flags |= ins_flag;
8349 MONO_ADD_INS (cfg->cbb, load);
8351 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
8352 store->flags |= ins_flag;
8353 MONO_ADD_INS (cfg->cbb, store);
8355 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
8356 emit_write_barrier (cfg, sp [0], sp [1], -1);
8358 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8370 token = read32 (ip + 1);
8371 klass = mini_get_class (method, token, generic_context);
8372 CHECK_TYPELOAD (klass);
8374 /* Optimize the common ldobj+stloc combination */
8384 loc_index = ip [5] - CEE_STLOC_0;
8391 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
8392 CHECK_LOCAL (loc_index);
8394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8395 ins->dreg = cfg->locals [loc_index]->dreg;
8401 /* Optimize the ldobj+stobj combination */
8402 /* The reference case ends up being a load+store anyway */
8403 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
8408 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
8415 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8424 CHECK_STACK_OVF (1);
8426 n = read32 (ip + 1);
8428 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
8429 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
8430 ins->type = STACK_OBJ;
8433 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
8434 MonoInst *iargs [1];
8436 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
8437 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
8439 if (cfg->opt & MONO_OPT_SHARED) {
8440 MonoInst *iargs [3];
8442 if (cfg->compile_aot) {
8443 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
8445 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8446 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
8447 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
8448 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
8449 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8451 if (bblock->out_of_line) {
8452 MonoInst *iargs [2];
8454 if (image == mono_defaults.corlib) {
8456 * Avoid relocations in AOT and save some space by using a
8457 * version of helper_ldstr specialized to mscorlib.
8459 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
8460 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
8462 /* Avoid creating the string object */
8463 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8464 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
8465 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
8469 if (cfg->compile_aot) {
8470 NEW_LDSTRCONST (cfg, ins, image, n);
8472 MONO_ADD_INS (bblock, ins);
8475 NEW_PCONST (cfg, ins, NULL);
8476 ins->type = STACK_OBJ;
8477 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8479 OUT_OF_MEMORY_FAILURE;
8482 MONO_ADD_INS (bblock, ins);
8491 MonoInst *iargs [2];
8492 MonoMethodSignature *fsig;
8495 MonoInst *vtable_arg = NULL;
8498 token = read32 (ip + 1);
8499 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8500 if (!cmethod || mono_loader_get_last_error ())
8502 fsig = mono_method_get_signature (cmethod, image, token);
8506 mono_save_token_info (cfg, image, token, cmethod);
8508 if (!mono_class_init (cmethod->klass))
8509 TYPE_LOAD_ERROR (cmethod->klass);
8511 if (cfg->generic_sharing_context)
8512 context_used = mono_method_check_context_used (cmethod);
8514 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8515 if (check_linkdemand (cfg, method, cmethod))
8516 INLINE_FAILURE ("linkdemand");
8517 CHECK_CFG_EXCEPTION;
8518 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8519 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8522 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8523 emit_generic_class_init (cfg, cmethod->klass);
8524 CHECK_TYPELOAD (cmethod->klass);
8527 if (cmethod->klass->valuetype)
8528 GSHAREDVT_FAILURE (*ip);
8531 if (cfg->gsharedvt) {
8532 if (mini_is_gsharedvt_variable_signature (sig))
8533 GSHAREDVT_FAILURE (*ip);
8537 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8538 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8539 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8540 mono_class_vtable (cfg->domain, cmethod->klass);
8541 CHECK_TYPELOAD (cmethod->klass);
8543 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8544 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8547 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8548 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8550 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8552 CHECK_TYPELOAD (cmethod->klass);
8553 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8558 n = fsig->param_count;
8562 * Generate smaller code for the common newobj <exception> instruction in
8563 * argument checking code.
8565 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8566 is_exception_class (cmethod->klass) && n <= 2 &&
8567 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8568 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8569 MonoInst *iargs [3];
8571 g_assert (!vtable_arg);
8575 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8578 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8582 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8587 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8590 g_assert_not_reached ();
8598 /* move the args to allow room for 'this' in the first position */
8604 /* check_call_signature () requires sp[0] to be set */
8605 this_ins.type = STACK_OBJ;
8607 if (check_call_signature (cfg, fsig, sp))
8612 if (mini_class_is_system_array (cmethod->klass)) {
8613 g_assert (!vtable_arg);
8615 *sp = emit_get_rgctx_method (cfg, context_used,
8616 cmethod, MONO_RGCTX_INFO_METHOD);
8618 /* Avoid varargs in the common case */
8619 if (fsig->param_count == 1)
8620 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8621 else if (fsig->param_count == 2)
8622 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8623 else if (fsig->param_count == 3)
8624 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8626 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8627 } else if (cmethod->string_ctor) {
8628 g_assert (!context_used);
8629 g_assert (!vtable_arg);
8630 /* we simply pass a null pointer */
8631 EMIT_NEW_PCONST (cfg, *sp, NULL);
8632 /* now call the string ctor */
8633 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8635 MonoInst* callvirt_this_arg = NULL;
8637 if (cmethod->klass->valuetype) {
8638 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8639 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8640 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8645 * The code generated by mini_emit_virtual_call () expects
8646 * iargs [0] to be a boxed instance, but luckily the vcall
8647 * will be transformed into a normal call there.
8649 } else if (context_used) {
8650 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8653 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8655 CHECK_TYPELOAD (cmethod->klass);
8658 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8659 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8660 * As a workaround, we call class cctors before allocating objects.
8662 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8663 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8664 if (cfg->verbose_level > 2)
8665 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8666 class_inits = g_slist_prepend (class_inits, vtable);
8669 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8672 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8675 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8677 /* Now call the actual ctor */
8678 /* Avoid virtual calls to ctors if possible */
8679 if (cmethod->klass->marshalbyref)
8680 callvirt_this_arg = sp [0];
8683 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8684 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8685 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8690 CHECK_CFG_EXCEPTION;
8691 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8692 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8693 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8694 !g_list_find (dont_inline, cmethod)) {
8697 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8698 cfg->real_offset += 5;
8701 inline_costs += costs - 5;
8703 INLINE_FAILURE ("inline failure");
8704 // FIXME-VT: Clean this up
8705 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8706 GSHAREDVT_FAILURE(*ip);
8707 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8709 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8712 addr = emit_get_rgctx_method (cfg, context_used,
8713 cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
8714 mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
8715 } else if (context_used &&
8716 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8717 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8718 MonoInst *cmethod_addr;
8720 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8721 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8723 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8725 INLINE_FAILURE ("ctor call");
8726 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8727 callvirt_this_arg, NULL, vtable_arg);
8731 if (alloc == NULL) {
8733 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8734 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8748 token = read32 (ip + 1);
8749 klass = mini_get_class (method, token, generic_context);
8750 CHECK_TYPELOAD (klass);
8751 if (sp [0]->type != STACK_OBJ)
8754 if (cfg->generic_sharing_context)
8755 context_used = mono_class_check_context_used (klass);
8757 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8758 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8765 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8768 if (cfg->compile_aot)
8769 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8771 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8773 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8774 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8777 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8778 MonoMethod *mono_castclass;
8779 MonoInst *iargs [1];
8782 mono_castclass = mono_marshal_get_castclass (klass);
8785 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8786 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8787 CHECK_CFG_EXCEPTION;
8788 g_assert (costs > 0);
8791 cfg->real_offset += 5;
8796 inline_costs += costs;
8799 ins = handle_castclass (cfg, klass, *sp, context_used);
8800 CHECK_CFG_EXCEPTION;
8810 token = read32 (ip + 1);
8811 klass = mini_get_class (method, token, generic_context);
8812 CHECK_TYPELOAD (klass);
8813 if (sp [0]->type != STACK_OBJ)
8816 if (cfg->generic_sharing_context)
8817 context_used = mono_class_check_context_used (klass);
8819 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8820 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8827 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8830 if (cfg->compile_aot)
8831 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8833 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8835 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8838 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8839 MonoMethod *mono_isinst;
8840 MonoInst *iargs [1];
8843 mono_isinst = mono_marshal_get_isinst (klass);
8846 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8847 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8848 CHECK_CFG_EXCEPTION;
8849 g_assert (costs > 0);
8852 cfg->real_offset += 5;
8857 inline_costs += costs;
8860 ins = handle_isinst (cfg, klass, *sp, context_used);
8861 CHECK_CFG_EXCEPTION;
8868 case CEE_UNBOX_ANY: {
8872 token = read32 (ip + 1);
8873 klass = mini_get_class (method, token, generic_context);
8874 CHECK_TYPELOAD (klass);
8876 mono_save_token_info (cfg, image, token, klass);
8878 if (cfg->generic_sharing_context)
8879 context_used = mono_class_check_context_used (klass);
8881 if (mini_is_gsharedvt_klass (cfg, klass))
8882 /* Need to check for nullable types at runtime */
8883 GSHAREDVT_FAILURE (*ip);
8885 if (generic_class_is_reference_type (cfg, klass)) {
8886 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8887 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8888 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8895 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8898 /*FIXME AOT support*/
8899 if (cfg->compile_aot)
8900 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8902 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8904 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8905 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8908 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8909 MonoMethod *mono_castclass;
8910 MonoInst *iargs [1];
8913 mono_castclass = mono_marshal_get_castclass (klass);
8916 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8917 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8918 CHECK_CFG_EXCEPTION;
8919 g_assert (costs > 0);
8922 cfg->real_offset += 5;
8926 inline_costs += costs;
8928 ins = handle_castclass (cfg, klass, *sp, context_used);
8929 CHECK_CFG_EXCEPTION;
8937 if (mono_class_is_nullable (klass)) {
8938 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8945 ins = handle_unbox (cfg, klass, sp, context_used);
8951 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8964 token = read32 (ip + 1);
8965 klass = mini_get_class (method, token, generic_context);
8966 CHECK_TYPELOAD (klass);
8968 mono_save_token_info (cfg, image, token, klass);
8970 if (cfg->generic_sharing_context)
8971 context_used = mono_class_check_context_used (klass);
8973 if (generic_class_is_reference_type (cfg, klass)) {
8979 if (klass == mono_defaults.void_class)
8981 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8983 /* frequent check in generic code: box (struct), brtrue */
8985 // FIXME: LLVM can't handle the inconsistent bb linking
8986 if (!mono_class_is_nullable (klass) &&
8987 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8988 (ip [5] == CEE_BRTRUE ||
8989 ip [5] == CEE_BRTRUE_S ||
8990 ip [5] == CEE_BRFALSE ||
8991 ip [5] == CEE_BRFALSE_S)) {
8992 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8994 MonoBasicBlock *true_bb, *false_bb;
8998 if (cfg->verbose_level > 3) {
8999 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9000 printf ("<box+brtrue opt>\n");
9008 target = ip + 1 + (signed char)(*ip);
9015 target = ip + 4 + (gint)(read32 (ip));
9019 g_assert_not_reached ();
9023 * We need to link both bblocks, since it is needed for handling stack
9024 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9025 * Branching to only one of them would lead to inconsistencies, so
9026 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9028 GET_BBLOCK (cfg, true_bb, target);
9029 GET_BBLOCK (cfg, false_bb, ip);
9031 mono_link_bblock (cfg, cfg->cbb, true_bb);
9032 mono_link_bblock (cfg, cfg->cbb, false_bb);
9034 if (sp != stack_start) {
9035 handle_stack_args (cfg, stack_start, sp - stack_start);
9037 CHECK_UNVERIFIABLE (cfg);
9040 if (COMPILE_LLVM (cfg)) {
9041 dreg = alloc_ireg (cfg);
9042 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9043 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9045 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9047 /* The JIT can't eliminate the iconst+compare */
9048 MONO_INST_NEW (cfg, ins, OP_BR);
9049 ins->inst_target_bb = is_true ? true_bb : false_bb;
9050 MONO_ADD_INS (cfg->cbb, ins);
9053 start_new_bblock = 1;
9057 *sp++ = handle_box (cfg, val, klass, context_used);
9059 CHECK_CFG_EXCEPTION;
9068 token = read32 (ip + 1);
9069 klass = mini_get_class (method, token, generic_context);
9070 CHECK_TYPELOAD (klass);
9072 mono_save_token_info (cfg, image, token, klass);
9074 if (cfg->generic_sharing_context)
9075 context_used = mono_class_check_context_used (klass);
9077 if (mono_class_is_nullable (klass)) {
9080 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9081 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9085 ins = handle_unbox (cfg, klass, sp, context_used);
9098 MonoClassField *field;
9101 gboolean is_instance;
9103 gpointer addr = NULL;
9104 gboolean is_special_static;
9106 MonoInst *store_val = NULL;
9109 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9111 if (op == CEE_STFLD) {
9119 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9121 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9124 if (op == CEE_STSFLD) {
9132 token = read32 (ip + 1);
9133 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9134 field = mono_method_get_wrapper_data (method, token);
9135 klass = field->parent;
9138 field = mono_field_from_token (image, token, &klass, generic_context);
9142 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9143 FIELD_ACCESS_FAILURE;
9144 mono_class_init (klass);
9146 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9149 /* if the class is Critical then transparent code cannot access it's fields */
9150 if (!is_instance && mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
9151 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9153 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9154 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9155 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
9156 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9160 * LDFLD etc. is usable on static fields as well, so convert those cases to
9163 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9175 g_assert_not_reached ();
9177 is_instance = FALSE;
9180 if (cfg->generic_sharing_context)
9181 context_used = mono_class_check_context_used (klass);
9185 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9186 if (op == CEE_STFLD) {
9187 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9189 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
9190 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9191 MonoInst *iargs [5];
9193 GSHAREDVT_FAILURE (op);
9196 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9197 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9198 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
9202 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9203 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
9204 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9205 CHECK_CFG_EXCEPTION;
9206 g_assert (costs > 0);
9208 cfg->real_offset += 5;
9211 inline_costs += costs;
9213 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
9218 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9220 if (mini_is_gsharedvt_klass (cfg, klass)) {
9221 MonoInst *offset_ins;
9223 if (cfg->generic_sharing_context)
9224 context_used = mono_class_check_context_used (klass);
9226 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9227 dreg = alloc_ireg_mp (cfg);
9228 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9229 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
9230 // FIXME-VT: wbarriers ?
9232 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
9234 if (sp [0]->opcode != OP_LDADDR)
9235 store->flags |= MONO_INST_FAULT;
9237 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
9238 /* insert call to write barrier */
9242 dreg = alloc_ireg_mp (cfg);
9243 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9244 emit_write_barrier (cfg, ptr, sp [1], -1);
9247 store->flags |= ins_flag;
9254 if (is_instance && ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class)) {
9255 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
9256 MonoInst *iargs [4];
9258 GSHAREDVT_FAILURE (op);
9261 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9262 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9263 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
9264 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
9265 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
9266 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9267 CHECK_CFG_EXCEPTION;
9269 g_assert (costs > 0);
9271 cfg->real_offset += 5;
9275 inline_costs += costs;
9277 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
9280 } else if (is_instance) {
9281 if (sp [0]->type == STACK_VTYPE) {
9284 /* Have to compute the address of the variable */
9286 var = get_vreg_to_inst (cfg, sp [0]->dreg);
9288 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
9290 g_assert (var->klass == klass);
9292 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
9296 if (op == CEE_LDFLDA) {
9297 if (is_magic_tls_access (field)) {
9298 GSHAREDVT_FAILURE (*ip);
9300 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
9302 if (sp [0]->type == STACK_OBJ) {
9303 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
9304 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
9307 dreg = alloc_ireg_mp (cfg);
9309 if (mini_is_gsharedvt_klass (cfg, klass)) {
9310 MonoInst *offset_ins;
9312 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9313 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9315 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
9317 ins->klass = mono_class_from_mono_type (field->type);
9318 ins->type = STACK_MP;
9324 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
9326 if (mini_is_gsharedvt_klass (cfg, klass)) {
9327 MonoInst *offset_ins;
9329 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9330 dreg = alloc_ireg_mp (cfg);
9331 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
9332 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
9334 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
9336 load->flags |= ins_flag;
9337 if (sp [0]->opcode != OP_LDADDR)
9338 load->flags |= MONO_INST_FAULT;
9352 * We can only support shared generic static
9353 * field access on architectures where the
9354 * trampoline code has been extended to handle
9355 * the generic class init.
9357 #ifndef MONO_ARCH_VTABLE_REG
9358 GENERIC_SHARING_FAILURE (op);
9361 if (cfg->generic_sharing_context)
9362 context_used = mono_class_check_context_used (klass);
9364 ftype = mono_field_get_type (field);
9366 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
9369 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
9370 * to be called here.
9372 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
9373 mono_class_vtable (cfg->domain, klass);
9374 CHECK_TYPELOAD (klass);
9376 mono_domain_lock (cfg->domain);
9377 if (cfg->domain->special_static_fields)
9378 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
9379 mono_domain_unlock (cfg->domain);
9381 is_special_static = mono_class_field_is_special_static (field);
9383 /* Generate IR to compute the field address */
9384 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
9386 * Fast access to TLS data
9387 * Inline version of get_thread_static_data () in
9391 int idx, static_data_reg, array_reg, dreg;
9392 MonoInst *thread_ins;
9394 GSHAREDVT_FAILURE (op);
9396 // offset &= 0x7fffffff;
9397 // idx = (offset >> 24) - 1;
9398 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
9400 thread_ins = mono_get_thread_intrinsic (cfg);
9401 MONO_ADD_INS (cfg->cbb, thread_ins);
9402 static_data_reg = alloc_ireg (cfg);
9403 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
9405 if (cfg->compile_aot) {
9406 int offset_reg, offset2_reg, idx_reg;
9408 /* For TLS variables, this will return the TLS offset */
9409 EMIT_NEW_SFLDACONST (cfg, ins, field);
9410 offset_reg = ins->dreg;
9411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
9412 idx_reg = alloc_ireg (cfg);
9413 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
9414 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
9415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
9416 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
9417 array_reg = alloc_ireg (cfg);
9418 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
9419 offset2_reg = alloc_ireg (cfg);
9420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
9421 dreg = alloc_ireg (cfg);
9422 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
9424 offset = (gsize)addr & 0x7fffffff;
9425 idx = (offset >> 24) - 1;
9427 array_reg = alloc_ireg (cfg);
9428 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
9429 dreg = alloc_ireg (cfg);
9430 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
9432 } else if ((cfg->opt & MONO_OPT_SHARED) ||
9433 (cfg->compile_aot && is_special_static) ||
9434 (context_used && is_special_static)) {
9435 MonoInst *iargs [2];
9437 g_assert (field->parent);
9438 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9440 iargs [1] = emit_get_rgctx_field (cfg, context_used,
9441 field, MONO_RGCTX_INFO_CLASS_FIELD);
9443 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9445 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9446 } else if (context_used) {
9447 MonoInst *static_data;
9450 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
9451 method->klass->name_space, method->klass->name, method->name,
9452 depth, field->offset);
9455 if (mono_class_needs_cctor_run (klass, method))
9456 emit_generic_class_init (cfg, klass);
9459 * The pointer we're computing here is
9461 * super_info.static_data + field->offset
9463 static_data = emit_get_rgctx_klass (cfg, context_used,
9464 klass, MONO_RGCTX_INFO_STATIC_DATA);
9466 if (mini_is_gsharedvt_klass (cfg, klass)) {
9467 MonoInst *offset_ins;
9469 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
9470 dreg = alloc_ireg_mp (cfg);
9471 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
9472 } else if (field->offset == 0) {
9475 int addr_reg = mono_alloc_preg (cfg);
9476 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
9478 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
9479 MonoInst *iargs [2];
9481 g_assert (field->parent);
9482 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9483 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
9484 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
9486 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
9488 CHECK_TYPELOAD (klass);
9490 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
9491 if (!(g_slist_find (class_inits, vtable))) {
9492 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
9493 if (cfg->verbose_level > 2)
9494 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
9495 class_inits = g_slist_prepend (class_inits, vtable);
9498 if (cfg->run_cctors) {
9500 /* This makes so that inline cannot trigger */
9501 /* .cctors: too many apps depend on them */
9502 /* running with a specific order... */
9503 if (! vtable->initialized)
9504 INLINE_FAILURE ("class init");
9505 ex = mono_runtime_class_init_full (vtable, FALSE);
9507 set_exception_object (cfg, ex);
9508 goto exception_exit;
9512 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9514 if (cfg->compile_aot)
9515 EMIT_NEW_SFLDACONST (cfg, ins, field);
9517 EMIT_NEW_PCONST (cfg, ins, addr);
9519 MonoInst *iargs [1];
9520 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
9521 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
9525 /* Generate IR to do the actual load/store operation */
9527 if (op == CEE_LDSFLDA) {
9528 ins->klass = mono_class_from_mono_type (ftype);
9529 ins->type = STACK_PTR;
9531 } else if (op == CEE_STSFLD) {
9534 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
9535 store->flags |= ins_flag;
9537 gboolean is_const = FALSE;
9538 MonoVTable *vtable = NULL;
9539 gpointer addr = NULL;
9541 if (!context_used) {
9542 vtable = mono_class_vtable (cfg->domain, klass);
9543 CHECK_TYPELOAD (klass);
9545 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
9546 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
9547 int ro_type = ftype->type;
9549 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
9550 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
9551 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
9554 GSHAREDVT_FAILURE (op);
9556 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
9559 case MONO_TYPE_BOOLEAN:
9561 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
9565 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
9568 case MONO_TYPE_CHAR:
9570 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
9574 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9579 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9583 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9589 case MONO_TYPE_FNPTR:
9590 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9591 type_to_eval_stack_type ((cfg), field->type, *sp);
9594 case MONO_TYPE_STRING:
9595 case MONO_TYPE_OBJECT:
9596 case MONO_TYPE_CLASS:
9597 case MONO_TYPE_SZARRAY:
9598 case MONO_TYPE_ARRAY:
9599 if (!mono_gc_is_moving ()) {
9600 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9601 type_to_eval_stack_type ((cfg), field->type, *sp);
9609 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9614 case MONO_TYPE_VALUETYPE:
9624 CHECK_STACK_OVF (1);
9626 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9627 load->flags |= ins_flag;
9640 token = read32 (ip + 1);
9641 klass = mini_get_class (method, token, generic_context);
9642 CHECK_TYPELOAD (klass);
9643 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9644 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9645 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9646 generic_class_is_reference_type (cfg, klass)) {
9647 /* insert call to write barrier */
9648 emit_write_barrier (cfg, sp [0], sp [1], -1);
9660 const char *data_ptr;
9662 guint32 field_token;
9668 token = read32 (ip + 1);
9670 klass = mini_get_class (method, token, generic_context);
9671 CHECK_TYPELOAD (klass);
9673 if (cfg->generic_sharing_context)
9674 context_used = mono_class_check_context_used (klass);
9676 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9677 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9678 ins->sreg1 = sp [0]->dreg;
9679 ins->type = STACK_I4;
9680 ins->dreg = alloc_ireg (cfg);
9681 MONO_ADD_INS (cfg->cbb, ins);
9682 *sp = mono_decompose_opcode (cfg, ins);
9687 MonoClass *array_class = mono_array_class_get (klass, 1);
9688 /* FIXME: we cannot get a managed
9689 allocator because we can't get the
9690 open generic class's vtable. We
9691 have the same problem in
9692 handle_alloc(). This
9693 needs to be solved so that we can
9694 have managed allocs of shared
9697 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9698 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9700 MonoMethod *managed_alloc = NULL;
9702 /* FIXME: Decompose later to help abcrem */
9705 args [0] = emit_get_rgctx_klass (cfg, context_used,
9706 array_class, MONO_RGCTX_INFO_VTABLE);
9711 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9713 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9715 if (cfg->opt & MONO_OPT_SHARED) {
9716 /* Decompose now to avoid problems with references to the domainvar */
9717 MonoInst *iargs [3];
9719 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9720 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9723 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9725 /* Decompose later since it is needed by abcrem */
9726 MonoClass *array_type = mono_array_class_get (klass, 1);
9727 mono_class_vtable (cfg->domain, array_type);
9728 CHECK_TYPELOAD (array_type);
9730 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9731 ins->dreg = alloc_ireg_ref (cfg);
9732 ins->sreg1 = sp [0]->dreg;
9733 ins->inst_newa_class = klass;
9734 ins->type = STACK_OBJ;
9735 ins->klass = array_type;
9736 MONO_ADD_INS (cfg->cbb, ins);
9737 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9738 cfg->cbb->has_array_access = TRUE;
9740 /* Needed so mono_emit_load_get_addr () gets called */
9741 mono_get_got_var (cfg);
9751 * we inline/optimize the initialization sequence if possible.
9752 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9753 * for small sizes open code the memcpy
9754 * ensure the rva field is big enough
9756 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9757 MonoMethod *memcpy_method = get_memcpy_method ();
9758 MonoInst *iargs [3];
9759 int add_reg = alloc_ireg_mp (cfg);
9761 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9762 if (cfg->compile_aot) {
9763 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9765 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9767 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9768 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9777 if (sp [0]->type != STACK_OBJ)
9780 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9781 ins->dreg = alloc_preg (cfg);
9782 ins->sreg1 = sp [0]->dreg;
9783 ins->type = STACK_I4;
9784 /* This flag will be inherited by the decomposition */
9785 ins->flags |= MONO_INST_FAULT;
9786 MONO_ADD_INS (cfg->cbb, ins);
9787 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9788 cfg->cbb->has_array_access = TRUE;
9796 if (sp [0]->type != STACK_OBJ)
9799 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9801 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9802 CHECK_TYPELOAD (klass);
9803 /* we need to make sure that this array is exactly the type it needs
9804 * to be for correctness. the wrappers are lax with their usage
9805 * so we need to ignore them here
9807 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9808 MonoClass *array_class = mono_array_class_get (klass, 1);
9809 mini_emit_check_array_type (cfg, sp [0], array_class);
9810 CHECK_TYPELOAD (array_class);
9814 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9829 case CEE_LDELEM_REF: {
9835 if (*ip == CEE_LDELEM) {
9837 token = read32 (ip + 1);
9838 klass = mini_get_class (method, token, generic_context);
9839 CHECK_TYPELOAD (klass);
9840 mono_class_init (klass);
9843 klass = array_access_to_klass (*ip);
9845 if (sp [0]->type != STACK_OBJ)
9848 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9850 if (mini_is_gsharedvt_klass (cfg, klass)) {
9851 // FIXME-VT: OP_ICONST optimization
9852 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9853 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9854 ins->opcode = OP_LOADV_MEMBASE;
9855 } else if (sp [1]->opcode == OP_ICONST) {
9856 int array_reg = sp [0]->dreg;
9857 int index_reg = sp [1]->dreg;
9858 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9860 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9861 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9863 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9864 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9867 if (*ip == CEE_LDELEM)
9880 case CEE_STELEM_REF:
9885 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9887 if (*ip == CEE_STELEM) {
9889 token = read32 (ip + 1);
9890 klass = mini_get_class (method, token, generic_context);
9891 CHECK_TYPELOAD (klass);
9892 mono_class_init (klass);
9895 klass = array_access_to_klass (*ip);
9897 if (sp [0]->type != STACK_OBJ)
9900 emit_array_store (cfg, klass, sp, TRUE);
9902 if (*ip == CEE_STELEM)
9909 case CEE_CKFINITE: {
9913 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9914 ins->sreg1 = sp [0]->dreg;
9915 ins->dreg = alloc_freg (cfg);
9916 ins->type = STACK_R8;
9917 MONO_ADD_INS (bblock, ins);
9919 *sp++ = mono_decompose_opcode (cfg, ins);
9924 case CEE_REFANYVAL: {
9925 MonoInst *src_var, *src;
9927 int klass_reg = alloc_preg (cfg);
9928 int dreg = alloc_preg (cfg);
9930 GSHAREDVT_FAILURE (*ip);
9933 MONO_INST_NEW (cfg, ins, *ip);
9936 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9937 CHECK_TYPELOAD (klass);
9938 mono_class_init (klass);
9940 if (cfg->generic_sharing_context)
9941 context_used = mono_class_check_context_used (klass);
9944 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9946 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9947 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9948 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9951 MonoInst *klass_ins;
9953 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9954 klass, MONO_RGCTX_INFO_KLASS);
9957 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9958 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9960 mini_emit_class_check (cfg, klass_reg, klass);
9962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9963 ins->type = STACK_MP;
9968 case CEE_MKREFANY: {
9969 MonoInst *loc, *addr;
9971 GSHAREDVT_FAILURE (*ip);
9974 MONO_INST_NEW (cfg, ins, *ip);
9977 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9978 CHECK_TYPELOAD (klass);
9979 mono_class_init (klass);
9981 if (cfg->generic_sharing_context)
9982 context_used = mono_class_check_context_used (klass);
9984 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9985 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9988 MonoInst *const_ins;
9989 int type_reg = alloc_preg (cfg);
9991 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9992 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9993 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9994 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9995 } else if (cfg->compile_aot) {
9996 int const_reg = alloc_preg (cfg);
9997 int type_reg = alloc_preg (cfg);
9999 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10000 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10002 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10004 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10005 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10007 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10009 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10010 ins->type = STACK_VTYPE;
10011 ins->klass = mono_defaults.typed_reference_class;
10016 case CEE_LDTOKEN: {
10018 MonoClass *handle_class;
10020 CHECK_STACK_OVF (1);
10023 n = read32 (ip + 1);
10025 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10026 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10027 handle = mono_method_get_wrapper_data (method, n);
10028 handle_class = mono_method_get_wrapper_data (method, n + 1);
10029 if (handle_class == mono_defaults.typehandle_class)
10030 handle = &((MonoClass*)handle)->byval_arg;
10033 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10037 mono_class_init (handle_class);
10038 if (cfg->generic_sharing_context) {
10039 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10040 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10041 /* This case handles ldtoken
10042 of an open type, like for
10045 } else if (handle_class == mono_defaults.typehandle_class) {
10046 /* If we get a MONO_TYPE_CLASS
10047 then we need to provide the
10049 instantiation of it. */
10050 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10053 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
10054 } else if (handle_class == mono_defaults.fieldhandle_class)
10055 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
10056 else if (handle_class == mono_defaults.methodhandle_class)
10057 context_used = mono_method_check_context_used (handle);
10059 g_assert_not_reached ();
10062 if ((cfg->opt & MONO_OPT_SHARED) &&
10063 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10064 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10065 MonoInst *addr, *vtvar, *iargs [3];
10066 int method_context_used;
10068 if (cfg->generic_sharing_context)
10069 method_context_used = mono_method_check_context_used (method);
10071 method_context_used = 0;
10073 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10075 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10076 EMIT_NEW_ICONST (cfg, iargs [1], n);
10077 if (method_context_used) {
10078 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10079 method, MONO_RGCTX_INFO_METHOD);
10080 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10082 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10083 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10085 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10087 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10089 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10091 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10092 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10093 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10094 (cmethod->klass == mono_defaults.monotype_class->parent) &&
10095 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10096 MonoClass *tclass = mono_class_from_mono_type (handle);
10098 mono_class_init (tclass);
10099 if (context_used) {
10100 ins = emit_get_rgctx_klass (cfg, context_used,
10101 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10102 } else if (cfg->compile_aot) {
10103 if (method->wrapper_type) {
10104 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10105 /* Special case for static synchronized wrappers */
10106 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10108 /* FIXME: n is not a normal token */
10109 cfg->disable_aot = TRUE;
10110 EMIT_NEW_PCONST (cfg, ins, NULL);
10113 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10116 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10118 ins->type = STACK_OBJ;
10119 ins->klass = cmethod->klass;
10122 MonoInst *addr, *vtvar;
10124 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10126 if (context_used) {
10127 if (handle_class == mono_defaults.typehandle_class) {
10128 ins = emit_get_rgctx_klass (cfg, context_used,
10129 mono_class_from_mono_type (handle),
10130 MONO_RGCTX_INFO_TYPE);
10131 } else if (handle_class == mono_defaults.methodhandle_class) {
10132 ins = emit_get_rgctx_method (cfg, context_used,
10133 handle, MONO_RGCTX_INFO_METHOD);
10134 } else if (handle_class == mono_defaults.fieldhandle_class) {
10135 ins = emit_get_rgctx_field (cfg, context_used,
10136 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10138 g_assert_not_reached ();
10140 } else if (cfg->compile_aot) {
10141 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
10143 EMIT_NEW_PCONST (cfg, ins, handle);
10145 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10146 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10147 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10157 MONO_INST_NEW (cfg, ins, OP_THROW);
10159 ins->sreg1 = sp [0]->dreg;
10161 bblock->out_of_line = TRUE;
10162 MONO_ADD_INS (bblock, ins);
10163 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10164 MONO_ADD_INS (bblock, ins);
10167 link_bblock (cfg, bblock, end_bblock);
10168 start_new_bblock = 1;
10170 case CEE_ENDFINALLY:
10171 /* mono_save_seq_point_info () depends on this */
10172 if (sp != stack_start)
10173 emit_seq_point (cfg, method, ip, FALSE);
10174 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10175 MONO_ADD_INS (bblock, ins);
10177 start_new_bblock = 1;
10180 * Control will leave the method so empty the stack, otherwise
10181 * the next basic block will start with a nonempty stack.
10183 while (sp != stack_start) {
10188 case CEE_LEAVE_S: {
10191 if (*ip == CEE_LEAVE) {
10193 target = ip + 5 + (gint32)read32(ip + 1);
10196 target = ip + 2 + (signed char)(ip [1]);
10199 /* empty the stack */
10200 while (sp != stack_start) {
10205 * If this leave statement is in a catch block, check for a
10206 * pending exception, and rethrow it if necessary.
10207 * We avoid doing this in runtime invoke wrappers, since those are called
10208 * by native code which excepts the wrapper to catch all exceptions.
10210 for (i = 0; i < header->num_clauses; ++i) {
10211 MonoExceptionClause *clause = &header->clauses [i];
10214 * Use <= in the final comparison to handle clauses with multiple
10215 * leave statements, like in bug #78024.
10216 * The ordering of the exception clauses guarantees that we find the
10217 * innermost clause.
10219 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
10221 MonoBasicBlock *dont_throw;
10226 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
10229 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
10231 NEW_BBLOCK (cfg, dont_throw);
10234 * Currently, we always rethrow the abort exception, despite the
10235 * fact that this is not correct. See thread6.cs for an example.
10236 * But propagating the abort exception is more important than
10237 * getting the sematics right.
10239 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
10240 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
10241 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
10243 MONO_START_BB (cfg, dont_throw);
10248 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
10250 MonoExceptionClause *clause;
10252 for (tmp = handlers; tmp; tmp = tmp->next) {
10253 clause = tmp->data;
10254 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
10256 link_bblock (cfg, bblock, tblock);
10257 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
10258 ins->inst_target_bb = tblock;
10259 ins->inst_eh_block = clause;
10260 MONO_ADD_INS (bblock, ins);
10261 bblock->has_call_handler = 1;
10262 if (COMPILE_LLVM (cfg)) {
10263 MonoBasicBlock *target_bb;
10266 * Link the finally bblock with the target, since it will
10267 * conceptually branch there.
10268 * FIXME: Have to link the bblock containing the endfinally.
10270 GET_BBLOCK (cfg, target_bb, target);
10271 link_bblock (cfg, tblock, target_bb);
10274 g_list_free (handlers);
10277 MONO_INST_NEW (cfg, ins, OP_BR);
10278 MONO_ADD_INS (bblock, ins);
10279 GET_BBLOCK (cfg, tblock, target);
10280 link_bblock (cfg, bblock, tblock);
10281 ins->inst_target_bb = tblock;
10282 start_new_bblock = 1;
10284 if (*ip == CEE_LEAVE)
10293 * Mono specific opcodes
10295 case MONO_CUSTOM_PREFIX: {
10297 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
10301 case CEE_MONO_ICALL: {
10303 MonoJitICallInfo *info;
10305 token = read32 (ip + 2);
10306 func = mono_method_get_wrapper_data (method, token);
10307 info = mono_find_jit_icall_by_addr (func);
10309 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
10312 CHECK_STACK (info->sig->param_count);
10313 sp -= info->sig->param_count;
10315 ins = mono_emit_jit_icall (cfg, info->func, sp);
10316 if (!MONO_TYPE_IS_VOID (info->sig->ret))
10320 inline_costs += 10 * num_calls++;
10324 case CEE_MONO_LDPTR: {
10327 CHECK_STACK_OVF (1);
10329 token = read32 (ip + 2);
10331 ptr = mono_method_get_wrapper_data (method, token);
10332 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
10333 MonoJitICallInfo *callinfo;
10334 const char *icall_name;
10336 icall_name = method->name + strlen ("__icall_wrapper_");
10337 g_assert (icall_name);
10338 callinfo = mono_find_jit_icall_by_name (icall_name);
10339 g_assert (callinfo);
10341 if (ptr == callinfo->func) {
10342 /* Will be transformed into an AOTCONST later */
10343 EMIT_NEW_PCONST (cfg, ins, ptr);
10349 /* FIXME: Generalize this */
10350 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
10351 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
10356 EMIT_NEW_PCONST (cfg, ins, ptr);
10359 inline_costs += 10 * num_calls++;
10360 /* Can't embed random pointers into AOT code */
10361 cfg->disable_aot = 1;
10364 case CEE_MONO_ICALL_ADDR: {
10365 MonoMethod *cmethod;
10368 CHECK_STACK_OVF (1);
10370 token = read32 (ip + 2);
10372 cmethod = mono_method_get_wrapper_data (method, token);
10374 if (cfg->compile_aot) {
10375 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
10377 ptr = mono_lookup_internal_call (cmethod);
10379 EMIT_NEW_PCONST (cfg, ins, ptr);
10385 case CEE_MONO_VTADDR: {
10386 MonoInst *src_var, *src;
10392 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10393 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
10398 case CEE_MONO_NEWOBJ: {
10399 MonoInst *iargs [2];
10401 CHECK_STACK_OVF (1);
10403 token = read32 (ip + 2);
10404 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10405 mono_class_init (klass);
10406 NEW_DOMAINCONST (cfg, iargs [0]);
10407 MONO_ADD_INS (cfg->cbb, iargs [0]);
10408 NEW_CLASSCONST (cfg, iargs [1], klass);
10409 MONO_ADD_INS (cfg->cbb, iargs [1]);
10410 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
10412 inline_costs += 10 * num_calls++;
10415 case CEE_MONO_OBJADDR:
10418 MONO_INST_NEW (cfg, ins, OP_MOVE);
10419 ins->dreg = alloc_ireg_mp (cfg);
10420 ins->sreg1 = sp [0]->dreg;
10421 ins->type = STACK_MP;
10422 MONO_ADD_INS (cfg->cbb, ins);
10426 case CEE_MONO_LDNATIVEOBJ:
10428 * Similar to LDOBJ, but instead load the unmanaged
10429 * representation of the vtype to the stack.
10434 token = read32 (ip + 2);
10435 klass = mono_method_get_wrapper_data (method, token);
10436 g_assert (klass->valuetype);
10437 mono_class_init (klass);
10440 MonoInst *src, *dest, *temp;
10443 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
10444 temp->backend.is_pinvoke = 1;
10445 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
10446 mini_emit_stobj (cfg, dest, src, klass, TRUE);
10448 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
10449 dest->type = STACK_VTYPE;
10450 dest->klass = klass;
10456 case CEE_MONO_RETOBJ: {
10458 * Same as RET, but return the native representation of a vtype
10461 g_assert (cfg->ret);
10462 g_assert (mono_method_signature (method)->pinvoke);
10467 token = read32 (ip + 2);
10468 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10470 if (!cfg->vret_addr) {
10471 g_assert (cfg->ret_var_is_local);
10473 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
10475 EMIT_NEW_RETLOADA (cfg, ins);
10477 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
10479 if (sp != stack_start)
10482 MONO_INST_NEW (cfg, ins, OP_BR);
10483 ins->inst_target_bb = end_bblock;
10484 MONO_ADD_INS (bblock, ins);
10485 link_bblock (cfg, bblock, end_bblock);
10486 start_new_bblock = 1;
10490 case CEE_MONO_CISINST:
10491 case CEE_MONO_CCASTCLASS: {
10496 token = read32 (ip + 2);
10497 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
10498 if (ip [1] == CEE_MONO_CISINST)
10499 ins = handle_cisinst (cfg, klass, sp [0]);
10501 ins = handle_ccastclass (cfg, klass, sp [0]);
10507 case CEE_MONO_SAVE_LMF:
10508 case CEE_MONO_RESTORE_LMF:
10509 #ifdef MONO_ARCH_HAVE_LMF_OPS
10510 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
10511 MONO_ADD_INS (bblock, ins);
10512 cfg->need_lmf_area = TRUE;
10516 case CEE_MONO_CLASSCONST:
10517 CHECK_STACK_OVF (1);
10519 token = read32 (ip + 2);
10520 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
10523 inline_costs += 10 * num_calls++;
10525 case CEE_MONO_NOT_TAKEN:
10526 bblock->out_of_line = TRUE;
10530 CHECK_STACK_OVF (1);
10532 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
10533 ins->dreg = alloc_preg (cfg);
10534 ins->inst_offset = (gint32)read32 (ip + 2);
10535 ins->type = STACK_PTR;
10536 MONO_ADD_INS (bblock, ins);
10540 case CEE_MONO_DYN_CALL: {
10541 MonoCallInst *call;
10543 /* It would be easier to call a trampoline, but that would put an
10544 * extra frame on the stack, confusing exception handling. So
10545 * implement it inline using an opcode for now.
10548 if (!cfg->dyn_call_var) {
10549 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10550 /* prevent it from being register allocated */
10551 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
10554 /* Has to use a call inst since it local regalloc expects it */
10555 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10556 ins = (MonoInst*)call;
10558 ins->sreg1 = sp [0]->dreg;
10559 ins->sreg2 = sp [1]->dreg;
10560 MONO_ADD_INS (bblock, ins);
10562 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10563 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10567 inline_costs += 10 * num_calls++;
10571 case CEE_MONO_MEMORY_BARRIER: {
10573 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10577 case CEE_MONO_JIT_ATTACH: {
10578 MonoInst *args [16];
10579 MonoInst *ad_ins, *lmf_ins;
10580 MonoBasicBlock *next_bb = NULL;
10582 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
10584 EMIT_NEW_PCONST (cfg, ins, NULL);
10585 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10591 ad_ins = mono_get_domain_intrinsic (cfg);
10592 lmf_ins = mono_get_lmf_intrinsic (cfg);
10595 #ifdef MONO_ARCH_HAVE_TLS_GET
10596 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
10597 NEW_BBLOCK (cfg, next_bb);
10599 MONO_ADD_INS (cfg->cbb, ad_ins);
10600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
10601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10603 MONO_ADD_INS (cfg->cbb, lmf_ins);
10604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
10605 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
10609 if (cfg->compile_aot) {
10610 /* AOT code is only used in the root domain */
10611 EMIT_NEW_PCONST (cfg, args [0], NULL);
10613 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
10615 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
10616 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
10619 MONO_START_BB (cfg, next_bb);
10625 case CEE_MONO_JIT_DETACH: {
10626 MonoInst *args [16];
10628 /* Restore the original domain */
10629 dreg = alloc_ireg (cfg);
10630 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
10631 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
10636 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10642 case CEE_PREFIX1: {
10645 case CEE_ARGLIST: {
10646 /* somewhat similar to LDTOKEN */
10647 MonoInst *addr, *vtvar;
10648 CHECK_STACK_OVF (1);
10649 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10651 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10652 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10654 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10655 ins->type = STACK_VTYPE;
10656 ins->klass = mono_defaults.argumenthandle_class;
10669 * The following transforms:
10670 * CEE_CEQ into OP_CEQ
10671 * CEE_CGT into OP_CGT
10672 * CEE_CGT_UN into OP_CGT_UN
10673 * CEE_CLT into OP_CLT
10674 * CEE_CLT_UN into OP_CLT_UN
10676 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10678 MONO_INST_NEW (cfg, ins, cmp->opcode);
10680 cmp->sreg1 = sp [0]->dreg;
10681 cmp->sreg2 = sp [1]->dreg;
10682 type_from_op (cmp, sp [0], sp [1]);
10684 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10685 cmp->opcode = OP_LCOMPARE;
10686 else if (sp [0]->type == STACK_R8)
10687 cmp->opcode = OP_FCOMPARE;
10689 cmp->opcode = OP_ICOMPARE;
10690 MONO_ADD_INS (bblock, cmp);
10691 ins->type = STACK_I4;
10692 ins->dreg = alloc_dreg (cfg, ins->type);
10693 type_from_op (ins, sp [0], sp [1]);
10695 if (cmp->opcode == OP_FCOMPARE) {
10697 * The backends expect the fceq opcodes to do the
10700 cmp->opcode = OP_NOP;
10701 ins->sreg1 = cmp->sreg1;
10702 ins->sreg2 = cmp->sreg2;
10704 MONO_ADD_INS (bblock, ins);
10710 MonoInst *argconst;
10711 MonoMethod *cil_method;
10713 CHECK_STACK_OVF (1);
10715 n = read32 (ip + 2);
10716 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10717 if (!cmethod || mono_loader_get_last_error ())
10719 mono_class_init (cmethod->klass);
10721 mono_save_token_info (cfg, image, n, cmethod);
10723 if (cfg->generic_sharing_context)
10724 context_used = mono_method_check_context_used (cmethod);
10726 cil_method = cmethod;
10727 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10728 METHOD_ACCESS_FAILURE;
10730 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10731 if (check_linkdemand (cfg, method, cmethod))
10732 INLINE_FAILURE ("linkdemand");
10733 CHECK_CFG_EXCEPTION;
10734 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10735 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10739 * Optimize the common case of ldftn+delegate creation
10741 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10742 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10743 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10744 MonoInst *target_ins;
10745 MonoMethod *invoke;
10746 int invoke_context_used = 0;
10748 invoke = mono_get_delegate_invoke (ctor_method->klass);
10749 if (!invoke || !mono_method_signature (invoke))
10752 if (cfg->generic_sharing_context)
10753 invoke_context_used = mono_method_check_context_used (invoke);
10755 target_ins = sp [-1];
10757 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10758 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10760 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10761 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10762 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10764 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10768 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10769 /* FIXME: SGEN support */
10770 if (invoke_context_used == 0) {
10772 if (cfg->verbose_level > 3)
10773 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10775 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10776 CHECK_CFG_EXCEPTION;
10785 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10786 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10790 inline_costs += 10 * num_calls++;
10793 case CEE_LDVIRTFTN: {
10794 MonoInst *args [2];
10796 GSHAREDVT_FAILURE (*ip);
10800 n = read32 (ip + 2);
10801 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10802 if (!cmethod || mono_loader_get_last_error ())
10804 mono_class_init (cmethod->klass);
10806 if (cfg->generic_sharing_context)
10807 context_used = mono_method_check_context_used (cmethod);
10809 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10810 if (check_linkdemand (cfg, method, cmethod))
10811 INLINE_FAILURE ("linkdemand");
10812 CHECK_CFG_EXCEPTION;
10813 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10814 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10820 args [1] = emit_get_rgctx_method (cfg, context_used,
10821 cmethod, MONO_RGCTX_INFO_METHOD);
10824 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10826 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10829 inline_costs += 10 * num_calls++;
10833 CHECK_STACK_OVF (1);
10835 n = read16 (ip + 2);
10837 EMIT_NEW_ARGLOAD (cfg, ins, n);
10842 CHECK_STACK_OVF (1);
10844 n = read16 (ip + 2);
10846 NEW_ARGLOADA (cfg, ins, n);
10847 MONO_ADD_INS (cfg->cbb, ins);
10855 n = read16 (ip + 2);
10857 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10859 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10863 CHECK_STACK_OVF (1);
10865 n = read16 (ip + 2);
10867 EMIT_NEW_LOCLOAD (cfg, ins, n);
10872 unsigned char *tmp_ip;
10873 CHECK_STACK_OVF (1);
10875 n = read16 (ip + 2);
10878 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10884 EMIT_NEW_LOCLOADA (cfg, ins, n);
10893 n = read16 (ip + 2);
10895 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10897 emit_stloc_ir (cfg, sp, header, n);
10904 if (sp != stack_start)
10906 if (cfg->method != method)
10908 * Inlining this into a loop in a parent could lead to
10909 * stack overflows which is different behavior than the
10910 * non-inlined case, thus disable inlining in this case.
10912 goto inline_failure;
10914 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10915 ins->dreg = alloc_preg (cfg);
10916 ins->sreg1 = sp [0]->dreg;
10917 ins->type = STACK_PTR;
10918 MONO_ADD_INS (cfg->cbb, ins);
10920 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10922 ins->flags |= MONO_INST_INIT;
10927 case CEE_ENDFILTER: {
10928 MonoExceptionClause *clause, *nearest;
10929 int cc, nearest_num;
10933 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10935 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10936 ins->sreg1 = (*sp)->dreg;
10937 MONO_ADD_INS (bblock, ins);
10938 start_new_bblock = 1;
10943 for (cc = 0; cc < header->num_clauses; ++cc) {
10944 clause = &header->clauses [cc];
10945 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10946 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10947 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10952 g_assert (nearest);
10953 if ((ip - header->code) != nearest->handler_offset)
10958 case CEE_UNALIGNED_:
10959 ins_flag |= MONO_INST_UNALIGNED;
10960 /* FIXME: record alignment? we can assume 1 for now */
10964 case CEE_VOLATILE_:
10965 ins_flag |= MONO_INST_VOLATILE;
10969 ins_flag |= MONO_INST_TAILCALL;
10970 cfg->flags |= MONO_CFG_HAS_TAIL;
10971 /* Can't inline tail calls at this time */
10972 inline_costs += 100000;
10979 token = read32 (ip + 2);
10980 klass = mini_get_class (method, token, generic_context);
10981 CHECK_TYPELOAD (klass);
10982 if (generic_class_is_reference_type (cfg, klass))
10983 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10985 mini_emit_initobj (cfg, *sp, NULL, klass);
10989 case CEE_CONSTRAINED_:
10991 token = read32 (ip + 2);
10992 if (method->wrapper_type != MONO_WRAPPER_NONE)
10993 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10995 constrained_call = mono_class_get_full (image, token, generic_context);
10996 CHECK_TYPELOAD (constrained_call);
11000 case CEE_INITBLK: {
11001 MonoInst *iargs [3];
11005 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11006 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11007 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11008 /* emit_memset only works when val == 0 */
11009 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11011 iargs [0] = sp [0];
11012 iargs [1] = sp [1];
11013 iargs [2] = sp [2];
11014 if (ip [1] == CEE_CPBLK) {
11015 MonoMethod *memcpy_method = get_memcpy_method ();
11016 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11018 MonoMethod *memset_method = get_memset_method ();
11019 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11029 ins_flag |= MONO_INST_NOTYPECHECK;
11031 ins_flag |= MONO_INST_NORANGECHECK;
11032 /* we ignore the no-nullcheck for now since we
11033 * really do it explicitly only when doing callvirt->call
11037 case CEE_RETHROW: {
11039 int handler_offset = -1;
11041 for (i = 0; i < header->num_clauses; ++i) {
11042 MonoExceptionClause *clause = &header->clauses [i];
11043 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11044 handler_offset = clause->handler_offset;
11049 bblock->flags |= BB_EXCEPTION_UNSAFE;
11051 g_assert (handler_offset != -1);
11053 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11054 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11055 ins->sreg1 = load->dreg;
11056 MONO_ADD_INS (bblock, ins);
11058 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11059 MONO_ADD_INS (bblock, ins);
11062 link_bblock (cfg, bblock, end_bblock);
11063 start_new_bblock = 1;
11071 GSHAREDVT_FAILURE (*ip);
11073 CHECK_STACK_OVF (1);
11075 token = read32 (ip + 2);
11076 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11077 MonoType *type = mono_type_create_from_typespec (image, token);
11078 val = mono_type_size (type, &ialign);
11080 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11081 CHECK_TYPELOAD (klass);
11082 mono_class_init (klass);
11083 val = mono_type_size (&klass->byval_arg, &ialign);
11085 EMIT_NEW_ICONST (cfg, ins, val);
11090 case CEE_REFANYTYPE: {
11091 MonoInst *src_var, *src;
11093 GSHAREDVT_FAILURE (*ip);
11099 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11101 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11102 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11103 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11108 case CEE_READONLY_:
11121 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11131 g_warning ("opcode 0x%02x not handled", *ip);
11135 if (start_new_bblock != 1)
11138 bblock->cil_length = ip - bblock->cil_code;
11139 if (bblock->next_bb) {
11140 /* This could already be set because of inlining, #693905 */
11141 MonoBasicBlock *bb = bblock;
11143 while (bb->next_bb)
11145 bb->next_bb = end_bblock;
11147 bblock->next_bb = end_bblock;
11150 if (cfg->method == method && cfg->domainvar) {
11152 MonoInst *get_domain;
11154 cfg->cbb = init_localsbb;
11156 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
11157 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11160 get_domain->dreg = alloc_preg (cfg);
11161 MONO_ADD_INS (cfg->cbb, get_domain);
11163 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11164 MONO_ADD_INS (cfg->cbb, store);
11167 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11168 if (cfg->compile_aot)
11169 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11170 mono_get_got_var (cfg);
11173 if (cfg->method == method && cfg->got_var)
11174 mono_emit_load_got_addr (cfg);
11179 cfg->cbb = init_localsbb;
11181 for (i = 0; i < header->num_locals; ++i) {
11182 MonoType *ptype = header->locals [i];
11183 int t = ptype->type;
11184 dreg = cfg->locals [i]->dreg;
11186 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
11187 t = mono_class_enum_basetype (ptype->data.klass)->type;
11188 if (ptype->byref) {
11189 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11190 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
11191 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
11192 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
11193 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
11194 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
11195 MONO_INST_NEW (cfg, ins, OP_R8CONST);
11196 ins->type = STACK_R8;
11197 ins->inst_p0 = (void*)&r8_0;
11198 ins->dreg = alloc_dreg (cfg, STACK_R8);
11199 MONO_ADD_INS (init_localsbb, ins);
11200 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
11201 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
11202 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
11203 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11204 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, ptype)) {
11205 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
11207 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
11212 if (cfg->init_ref_vars && cfg->method == method) {
11213 /* Emit initialization for ref vars */
11214 // FIXME: Avoid duplication initialization for IL locals.
11215 for (i = 0; i < cfg->num_varinfo; ++i) {
11216 MonoInst *ins = cfg->varinfo [i];
11218 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11219 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11224 MonoBasicBlock *bb;
11227 * Make seq points at backward branch targets interruptable.
11229 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
11230 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
11231 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
11234 /* Add a sequence point for method entry/exit events */
11236 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
11237 MONO_ADD_INS (init_localsbb, ins);
11238 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
11239 MONO_ADD_INS (cfg->bb_exit, ins);
11244 if (cfg->method == method) {
11245 MonoBasicBlock *bb;
11246 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11247 bb->region = mono_find_block_region (cfg, bb->real_offset);
11249 mono_create_spvar_for_region (cfg, bb->region);
11250 if (cfg->verbose_level > 2)
11251 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
11255 g_slist_free (class_inits);
11256 dont_inline = g_list_remove (dont_inline, method);
11258 if (inline_costs < 0) {
11261 /* Method is too large */
11262 mname = mono_method_full_name (method, TRUE);
11263 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
11264 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
11266 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11267 mono_basic_block_free (original_bb);
11271 if ((cfg->verbose_level > 2) && (cfg->method == method))
11272 mono_print_code (cfg, "AFTER METHOD-TO-IR");
11274 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11275 mono_basic_block_free (original_bb);
11276 return inline_costs;
11279 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
11286 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
11290 set_exception_type_from_invalid_il (cfg, method, ip);
11294 g_slist_free (class_inits);
11295 mono_basic_block_free (original_bb);
11296 dont_inline = g_list_remove (dont_inline, method);
11297 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
11302 store_membase_reg_to_store_membase_imm (int opcode)
11305 case OP_STORE_MEMBASE_REG:
11306 return OP_STORE_MEMBASE_IMM;
11307 case OP_STOREI1_MEMBASE_REG:
11308 return OP_STOREI1_MEMBASE_IMM;
11309 case OP_STOREI2_MEMBASE_REG:
11310 return OP_STOREI2_MEMBASE_IMM;
11311 case OP_STOREI4_MEMBASE_REG:
11312 return OP_STOREI4_MEMBASE_IMM;
11313 case OP_STOREI8_MEMBASE_REG:
11314 return OP_STOREI8_MEMBASE_IMM;
11316 g_assert_not_reached ();
11322 #endif /* DISABLE_JIT */
11325 mono_op_to_op_imm (int opcode)
11329 return OP_IADD_IMM;
11331 return OP_ISUB_IMM;
11333 return OP_IDIV_IMM;
11335 return OP_IDIV_UN_IMM;
11337 return OP_IREM_IMM;
11339 return OP_IREM_UN_IMM;
11341 return OP_IMUL_IMM;
11343 return OP_IAND_IMM;
11347 return OP_IXOR_IMM;
11349 return OP_ISHL_IMM;
11351 return OP_ISHR_IMM;
11353 return OP_ISHR_UN_IMM;
11356 return OP_LADD_IMM;
11358 return OP_LSUB_IMM;
11360 return OP_LAND_IMM;
11364 return OP_LXOR_IMM;
11366 return OP_LSHL_IMM;
11368 return OP_LSHR_IMM;
11370 return OP_LSHR_UN_IMM;
11373 return OP_COMPARE_IMM;
11375 return OP_ICOMPARE_IMM;
11377 return OP_LCOMPARE_IMM;
11379 case OP_STORE_MEMBASE_REG:
11380 return OP_STORE_MEMBASE_IMM;
11381 case OP_STOREI1_MEMBASE_REG:
11382 return OP_STOREI1_MEMBASE_IMM;
11383 case OP_STOREI2_MEMBASE_REG:
11384 return OP_STOREI2_MEMBASE_IMM;
11385 case OP_STOREI4_MEMBASE_REG:
11386 return OP_STOREI4_MEMBASE_IMM;
11388 #if defined(TARGET_X86) || defined (TARGET_AMD64)
11390 return OP_X86_PUSH_IMM;
11391 case OP_X86_COMPARE_MEMBASE_REG:
11392 return OP_X86_COMPARE_MEMBASE_IMM;
11394 #if defined(TARGET_AMD64)
11395 case OP_AMD64_ICOMPARE_MEMBASE_REG:
11396 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11398 case OP_VOIDCALL_REG:
11399 return OP_VOIDCALL;
11407 return OP_LOCALLOC_IMM;
11414 ldind_to_load_membase (int opcode)
11418 return OP_LOADI1_MEMBASE;
11420 return OP_LOADU1_MEMBASE;
11422 return OP_LOADI2_MEMBASE;
11424 return OP_LOADU2_MEMBASE;
11426 return OP_LOADI4_MEMBASE;
11428 return OP_LOADU4_MEMBASE;
11430 return OP_LOAD_MEMBASE;
11431 case CEE_LDIND_REF:
11432 return OP_LOAD_MEMBASE;
11434 return OP_LOADI8_MEMBASE;
11436 return OP_LOADR4_MEMBASE;
11438 return OP_LOADR8_MEMBASE;
11440 g_assert_not_reached ();
11447 stind_to_store_membase (int opcode)
11451 return OP_STOREI1_MEMBASE_REG;
11453 return OP_STOREI2_MEMBASE_REG;
11455 return OP_STOREI4_MEMBASE_REG;
11457 case CEE_STIND_REF:
11458 return OP_STORE_MEMBASE_REG;
11460 return OP_STOREI8_MEMBASE_REG;
11462 return OP_STORER4_MEMBASE_REG;
11464 return OP_STORER8_MEMBASE_REG;
11466 g_assert_not_reached ();
11473 mono_load_membase_to_load_mem (int opcode)
11475 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
11476 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11478 case OP_LOAD_MEMBASE:
11479 return OP_LOAD_MEM;
11480 case OP_LOADU1_MEMBASE:
11481 return OP_LOADU1_MEM;
11482 case OP_LOADU2_MEMBASE:
11483 return OP_LOADU2_MEM;
11484 case OP_LOADI4_MEMBASE:
11485 return OP_LOADI4_MEM;
11486 case OP_LOADU4_MEMBASE:
11487 return OP_LOADU4_MEM;
11488 #if SIZEOF_REGISTER == 8
11489 case OP_LOADI8_MEMBASE:
11490 return OP_LOADI8_MEM;
11499 op_to_op_dest_membase (int store_opcode, int opcode)
11501 #if defined(TARGET_X86)
11502 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
11507 return OP_X86_ADD_MEMBASE_REG;
11509 return OP_X86_SUB_MEMBASE_REG;
11511 return OP_X86_AND_MEMBASE_REG;
11513 return OP_X86_OR_MEMBASE_REG;
11515 return OP_X86_XOR_MEMBASE_REG;
11518 return OP_X86_ADD_MEMBASE_IMM;
11521 return OP_X86_SUB_MEMBASE_IMM;
11524 return OP_X86_AND_MEMBASE_IMM;
11527 return OP_X86_OR_MEMBASE_IMM;
11530 return OP_X86_XOR_MEMBASE_IMM;
11536 #if defined(TARGET_AMD64)
11537 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
11542 return OP_X86_ADD_MEMBASE_REG;
11544 return OP_X86_SUB_MEMBASE_REG;
11546 return OP_X86_AND_MEMBASE_REG;
11548 return OP_X86_OR_MEMBASE_REG;
11550 return OP_X86_XOR_MEMBASE_REG;
11552 return OP_X86_ADD_MEMBASE_IMM;
11554 return OP_X86_SUB_MEMBASE_IMM;
11556 return OP_X86_AND_MEMBASE_IMM;
11558 return OP_X86_OR_MEMBASE_IMM;
11560 return OP_X86_XOR_MEMBASE_IMM;
11562 return OP_AMD64_ADD_MEMBASE_REG;
11564 return OP_AMD64_SUB_MEMBASE_REG;
11566 return OP_AMD64_AND_MEMBASE_REG;
11568 return OP_AMD64_OR_MEMBASE_REG;
11570 return OP_AMD64_XOR_MEMBASE_REG;
11573 return OP_AMD64_ADD_MEMBASE_IMM;
11576 return OP_AMD64_SUB_MEMBASE_IMM;
11579 return OP_AMD64_AND_MEMBASE_IMM;
11582 return OP_AMD64_OR_MEMBASE_IMM;
11585 return OP_AMD64_XOR_MEMBASE_IMM;
11595 op_to_op_store_membase (int store_opcode, int opcode)
11597 #if defined(TARGET_X86) || defined(TARGET_AMD64)
11600 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11601 return OP_X86_SETEQ_MEMBASE;
11603 if (store_opcode == OP_STOREI1_MEMBASE_REG)
11604 return OP_X86_SETNE_MEMBASE;
11612 op_to_op_src1_membase (int load_opcode, int opcode)
11615 /* FIXME: This has sign extension issues */
11617 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11618 return OP_X86_COMPARE_MEMBASE8_IMM;
11621 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11626 return OP_X86_PUSH_MEMBASE;
11627 case OP_COMPARE_IMM:
11628 case OP_ICOMPARE_IMM:
11629 return OP_X86_COMPARE_MEMBASE_IMM;
11632 return OP_X86_COMPARE_MEMBASE_REG;
11636 #ifdef TARGET_AMD64
11637 /* FIXME: This has sign extension issues */
11639 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11640 return OP_X86_COMPARE_MEMBASE8_IMM;
11645 #ifdef __mono_ilp32__
11646 if (load_opcode == OP_LOADI8_MEMBASE)
11648 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11650 return OP_X86_PUSH_MEMBASE;
11652 /* FIXME: This only works for 32 bit immediates
11653 case OP_COMPARE_IMM:
11654 case OP_LCOMPARE_IMM:
11655 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11656 return OP_AMD64_COMPARE_MEMBASE_IMM;
11658 case OP_ICOMPARE_IMM:
11659 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11660 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11664 #ifdef __mono_ilp32__
11665 if (load_opcode == OP_LOAD_MEMBASE)
11666 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11667 if (load_opcode == OP_LOADI8_MEMBASE)
11669 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11671 return OP_AMD64_COMPARE_MEMBASE_REG;
11674 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11675 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11684 op_to_op_src2_membase (int load_opcode, int opcode)
11687 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11693 return OP_X86_COMPARE_REG_MEMBASE;
11695 return OP_X86_ADD_REG_MEMBASE;
11697 return OP_X86_SUB_REG_MEMBASE;
11699 return OP_X86_AND_REG_MEMBASE;
11701 return OP_X86_OR_REG_MEMBASE;
11703 return OP_X86_XOR_REG_MEMBASE;
11707 #ifdef TARGET_AMD64
11708 #ifdef __mono_ilp32__
11709 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11711 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11715 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11717 return OP_X86_ADD_REG_MEMBASE;
11719 return OP_X86_SUB_REG_MEMBASE;
11721 return OP_X86_AND_REG_MEMBASE;
11723 return OP_X86_OR_REG_MEMBASE;
11725 return OP_X86_XOR_REG_MEMBASE;
11727 #ifdef __mono_ilp32__
11728 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11730 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11735 return OP_AMD64_COMPARE_REG_MEMBASE;
11737 return OP_AMD64_ADD_REG_MEMBASE;
11739 return OP_AMD64_SUB_REG_MEMBASE;
11741 return OP_AMD64_AND_REG_MEMBASE;
11743 return OP_AMD64_OR_REG_MEMBASE;
11745 return OP_AMD64_XOR_REG_MEMBASE;
11754 mono_op_to_op_imm_noemul (int opcode)
11757 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11763 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11770 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
11775 return mono_op_to_op_imm (opcode);
11779 #ifndef DISABLE_JIT
11782 * mono_handle_global_vregs:
11784 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11788 mono_handle_global_vregs (MonoCompile *cfg)
11790 gint32 *vreg_to_bb;
11791 MonoBasicBlock *bb;
11794 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11796 #ifdef MONO_ARCH_SIMD_INTRINSICS
11797 if (cfg->uses_simd_intrinsics)
11798 mono_simd_simplify_indirection (cfg);
11801 /* Find local vregs used in more than one bb */
11802 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11803 MonoInst *ins = bb->code;
11804 int block_num = bb->block_num;
11806 if (cfg->verbose_level > 2)
11807 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11810 for (; ins; ins = ins->next) {
11811 const char *spec = INS_INFO (ins->opcode);
11812 int regtype = 0, regindex;
11815 if (G_UNLIKELY (cfg->verbose_level > 2))
11816 mono_print_ins (ins);
11818 g_assert (ins->opcode >= MONO_CEE_LAST);
11820 for (regindex = 0; regindex < 4; regindex ++) {
11823 if (regindex == 0) {
11824 regtype = spec [MONO_INST_DEST];
11825 if (regtype == ' ')
11828 } else if (regindex == 1) {
11829 regtype = spec [MONO_INST_SRC1];
11830 if (regtype == ' ')
11833 } else if (regindex == 2) {
11834 regtype = spec [MONO_INST_SRC2];
11835 if (regtype == ' ')
11838 } else if (regindex == 3) {
11839 regtype = spec [MONO_INST_SRC3];
11840 if (regtype == ' ')
11845 #if SIZEOF_REGISTER == 4
11846 /* In the LLVM case, the long opcodes are not decomposed */
11847 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11849 * Since some instructions reference the original long vreg,
11850 * and some reference the two component vregs, it is quite hard
11851 * to determine when it needs to be global. So be conservative.
11853 if (!get_vreg_to_inst (cfg, vreg)) {
11854 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11856 if (cfg->verbose_level > 2)
11857 printf ("LONG VREG R%d made global.\n", vreg);
11861 * Make the component vregs volatile since the optimizations can
11862 * get confused otherwise.
11864 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11865 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11869 g_assert (vreg != -1);
11871 prev_bb = vreg_to_bb [vreg];
11872 if (prev_bb == 0) {
11873 /* 0 is a valid block num */
11874 vreg_to_bb [vreg] = block_num + 1;
11875 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11876 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11879 if (!get_vreg_to_inst (cfg, vreg)) {
11880 if (G_UNLIKELY (cfg->verbose_level > 2))
11881 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11885 if (vreg_is_ref (cfg, vreg))
11886 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11888 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11891 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11894 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11897 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11900 g_assert_not_reached ();
11904 /* Flag as having been used in more than one bb */
11905 vreg_to_bb [vreg] = -1;
11911 /* If a variable is used in only one bblock, convert it into a local vreg */
11912 for (i = 0; i < cfg->num_varinfo; i++) {
11913 MonoInst *var = cfg->varinfo [i];
11914 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11916 switch (var->type) {
11922 #if SIZEOF_REGISTER == 8
11925 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11926 /* Enabling this screws up the fp stack on x86 */
11929 /* Arguments are implicitly global */
11930 /* Putting R4 vars into registers doesn't work currently */
11931 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11933 * Make that the variable's liveness interval doesn't contain a call, since
11934 * that would cause the lvreg to be spilled, making the whole optimization
11937 /* This is too slow for JIT compilation */
11939 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11941 int def_index, call_index, ins_index;
11942 gboolean spilled = FALSE;
11947 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11948 const char *spec = INS_INFO (ins->opcode);
11950 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11951 def_index = ins_index;
11953 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11954 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11955 if (call_index > def_index) {
11961 if (MONO_IS_CALL (ins))
11962 call_index = ins_index;
11972 if (G_UNLIKELY (cfg->verbose_level > 2))
11973 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11974 var->flags |= MONO_INST_IS_DEAD;
11975 cfg->vreg_to_inst [var->dreg] = NULL;
11982 * Compress the varinfo and vars tables so the liveness computation is faster and
11983 * takes up less space.
11986 for (i = 0; i < cfg->num_varinfo; ++i) {
11987 MonoInst *var = cfg->varinfo [i];
11988 if (pos < i && cfg->locals_start == i)
11989 cfg->locals_start = pos;
11990 if (!(var->flags & MONO_INST_IS_DEAD)) {
11992 cfg->varinfo [pos] = cfg->varinfo [i];
11993 cfg->varinfo [pos]->inst_c0 = pos;
11994 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11995 cfg->vars [pos].idx = pos;
11996 #if SIZEOF_REGISTER == 4
11997 if (cfg->varinfo [pos]->type == STACK_I8) {
11998 /* Modify the two component vars too */
12001 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12002 var1->inst_c0 = pos;
12003 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12004 var1->inst_c0 = pos;
12011 cfg->num_varinfo = pos;
12012 if (cfg->locals_start > cfg->num_varinfo)
12013 cfg->locals_start = cfg->num_varinfo;
12017 * mono_spill_global_vars:
12019 * Generate spill code for variables which are not allocated to registers,
12020 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12021 * code is generated which could be optimized by the local optimization passes.
12024 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12026 MonoBasicBlock *bb;
12028 int orig_next_vreg;
12029 guint32 *vreg_to_lvreg;
12031 guint32 i, lvregs_len;
12032 gboolean dest_has_lvreg = FALSE;
12033 guint32 stacktypes [128];
12034 MonoInst **live_range_start, **live_range_end;
12035 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12037 *need_local_opts = FALSE;
12039 memset (spec2, 0, sizeof (spec2));
12041 /* FIXME: Move this function to mini.c */
12042 stacktypes ['i'] = STACK_PTR;
12043 stacktypes ['l'] = STACK_I8;
12044 stacktypes ['f'] = STACK_R8;
12045 #ifdef MONO_ARCH_SIMD_INTRINSICS
12046 stacktypes ['x'] = STACK_VTYPE;
12049 #if SIZEOF_REGISTER == 4
12050 /* Create MonoInsts for longs */
12051 for (i = 0; i < cfg->num_varinfo; i++) {
12052 MonoInst *ins = cfg->varinfo [i];
12054 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12055 switch (ins->type) {
12060 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12063 g_assert (ins->opcode == OP_REGOFFSET);
12065 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12067 tree->opcode = OP_REGOFFSET;
12068 tree->inst_basereg = ins->inst_basereg;
12069 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12071 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12073 tree->opcode = OP_REGOFFSET;
12074 tree->inst_basereg = ins->inst_basereg;
12075 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12085 if (cfg->compute_gc_maps) {
12086 /* registers need liveness info even for !non refs */
12087 for (i = 0; i < cfg->num_varinfo; i++) {
12088 MonoInst *ins = cfg->varinfo [i];
12090 if (ins->opcode == OP_REGVAR)
12091 ins->flags |= MONO_INST_GC_TRACK;
12095 /* FIXME: widening and truncation */
12098 * As an optimization, when a variable allocated to the stack is first loaded into
12099 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12100 * the variable again.
12102 orig_next_vreg = cfg->next_vreg;
12103 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12104 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12108 * These arrays contain the first and last instructions accessing a given
12110 * Since we emit bblocks in the same order we process them here, and we
12111 * don't split live ranges, these will precisely describe the live range of
12112 * the variable, i.e. the instruction range where a valid value can be found
12113 * in the variables location.
12114 * The live range is computed using the liveness info computed by the liveness pass.
12115 * We can't use vmv->range, since that is an abstract live range, and we need
12116 * one which is instruction precise.
12117 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12119 /* FIXME: Only do this if debugging info is requested */
12120 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12121 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12122 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12123 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12125 /* Add spill loads/stores */
12126 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12129 if (cfg->verbose_level > 2)
12130 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12132 /* Clear vreg_to_lvreg array */
12133 for (i = 0; i < lvregs_len; i++)
12134 vreg_to_lvreg [lvregs [i]] = 0;
12138 MONO_BB_FOR_EACH_INS (bb, ins) {
12139 const char *spec = INS_INFO (ins->opcode);
12140 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12141 gboolean store, no_lvreg;
12142 int sregs [MONO_MAX_SRC_REGS];
12144 if (G_UNLIKELY (cfg->verbose_level > 2))
12145 mono_print_ins (ins);
12147 if (ins->opcode == OP_NOP)
12151 * We handle LDADDR here as well, since it can only be decomposed
12152 * when variable addresses are known.
12154 if (ins->opcode == OP_LDADDR) {
12155 MonoInst *var = ins->inst_p0;
12157 if (var->opcode == OP_VTARG_ADDR) {
12158 /* Happens on SPARC/S390 where vtypes are passed by reference */
12159 MonoInst *vtaddr = var->inst_left;
12160 if (vtaddr->opcode == OP_REGVAR) {
12161 ins->opcode = OP_MOVE;
12162 ins->sreg1 = vtaddr->dreg;
12164 else if (var->inst_left->opcode == OP_REGOFFSET) {
12165 ins->opcode = OP_LOAD_MEMBASE;
12166 ins->inst_basereg = vtaddr->inst_basereg;
12167 ins->inst_offset = vtaddr->inst_offset;
12171 g_assert (var->opcode == OP_REGOFFSET);
12173 ins->opcode = OP_ADD_IMM;
12174 ins->sreg1 = var->inst_basereg;
12175 ins->inst_imm = var->inst_offset;
12178 *need_local_opts = TRUE;
12179 spec = INS_INFO (ins->opcode);
12182 if (ins->opcode < MONO_CEE_LAST) {
12183 mono_print_ins (ins);
12184 g_assert_not_reached ();
12188 * Store opcodes have destbasereg in the dreg, but in reality, it is an
12192 if (MONO_IS_STORE_MEMBASE (ins)) {
12193 tmp_reg = ins->dreg;
12194 ins->dreg = ins->sreg2;
12195 ins->sreg2 = tmp_reg;
12198 spec2 [MONO_INST_DEST] = ' ';
12199 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12200 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12201 spec2 [MONO_INST_SRC3] = ' ';
12203 } else if (MONO_IS_STORE_MEMINDEX (ins))
12204 g_assert_not_reached ();
12209 if (G_UNLIKELY (cfg->verbose_level > 2)) {
12210 printf ("\t %.3s %d", spec, ins->dreg);
12211 num_sregs = mono_inst_get_src_registers (ins, sregs);
12212 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
12213 printf (" %d", sregs [srcindex]);
12220 regtype = spec [MONO_INST_DEST];
12221 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
12224 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
12225 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
12226 MonoInst *store_ins;
12228 MonoInst *def_ins = ins;
12229 int dreg = ins->dreg; /* The original vreg */
12231 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
12233 if (var->opcode == OP_REGVAR) {
12234 ins->dreg = var->dreg;
12235 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
12237 * Instead of emitting a load+store, use a _membase opcode.
12239 g_assert (var->opcode == OP_REGOFFSET);
12240 if (ins->opcode == OP_MOVE) {
12244 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
12245 ins->inst_basereg = var->inst_basereg;
12246 ins->inst_offset = var->inst_offset;
12249 spec = INS_INFO (ins->opcode);
12253 g_assert (var->opcode == OP_REGOFFSET);
12255 prev_dreg = ins->dreg;
12257 /* Invalidate any previous lvreg for this vreg */
12258 vreg_to_lvreg [ins->dreg] = 0;
12262 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
12264 store_opcode = OP_STOREI8_MEMBASE_REG;
12267 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
12269 if (regtype == 'l') {
12270 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
12271 mono_bblock_insert_after_ins (bb, ins, store_ins);
12272 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
12273 mono_bblock_insert_after_ins (bb, ins, store_ins);
12274 def_ins = store_ins;
12277 g_assert (store_opcode != OP_STOREV_MEMBASE);
12279 /* Try to fuse the store into the instruction itself */
12280 /* FIXME: Add more instructions */
12281 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
12282 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
12283 ins->inst_imm = ins->inst_c0;
12284 ins->inst_destbasereg = var->inst_basereg;
12285 ins->inst_offset = var->inst_offset;
12286 spec = INS_INFO (ins->opcode);
12287 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
12288 ins->opcode = store_opcode;
12289 ins->inst_destbasereg = var->inst_basereg;
12290 ins->inst_offset = var->inst_offset;
12294 tmp_reg = ins->dreg;
12295 ins->dreg = ins->sreg2;
12296 ins->sreg2 = tmp_reg;
12299 spec2 [MONO_INST_DEST] = ' ';
12300 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
12301 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
12302 spec2 [MONO_INST_SRC3] = ' ';
12304 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
12305 // FIXME: The backends expect the base reg to be in inst_basereg
12306 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
12308 ins->inst_basereg = var->inst_basereg;
12309 ins->inst_offset = var->inst_offset;
12310 spec = INS_INFO (ins->opcode);
12312 /* printf ("INS: "); mono_print_ins (ins); */
12313 /* Create a store instruction */
12314 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
12316 /* Insert it after the instruction */
12317 mono_bblock_insert_after_ins (bb, ins, store_ins);
12319 def_ins = store_ins;
12322 * We can't assign ins->dreg to var->dreg here, since the
12323 * sregs could use it. So set a flag, and do it after
12326 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
12327 dest_has_lvreg = TRUE;
12332 if (def_ins && !live_range_start [dreg]) {
12333 live_range_start [dreg] = def_ins;
12334 live_range_start_bb [dreg] = bb;
12337 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
12340 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
12341 tmp->inst_c1 = dreg;
12342 mono_bblock_insert_after_ins (bb, def_ins, tmp);
12349 num_sregs = mono_inst_get_src_registers (ins, sregs);
12350 for (srcindex = 0; srcindex < 3; ++srcindex) {
12351 regtype = spec [MONO_INST_SRC1 + srcindex];
12352 sreg = sregs [srcindex];
12354 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
12355 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
12356 MonoInst *var = get_vreg_to_inst (cfg, sreg);
12357 MonoInst *use_ins = ins;
12358 MonoInst *load_ins;
12359 guint32 load_opcode;
12361 if (var->opcode == OP_REGVAR) {
12362 sregs [srcindex] = var->dreg;
12363 //mono_inst_set_src_registers (ins, sregs);
12364 live_range_end [sreg] = use_ins;
12365 live_range_end_bb [sreg] = bb;
12367 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12370 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12371 /* var->dreg is a hreg */
12372 tmp->inst_c1 = sreg;
12373 mono_bblock_insert_after_ins (bb, ins, tmp);
12379 g_assert (var->opcode == OP_REGOFFSET);
12381 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
12383 g_assert (load_opcode != OP_LOADV_MEMBASE);
12385 if (vreg_to_lvreg [sreg]) {
12386 g_assert (vreg_to_lvreg [sreg] != -1);
12388 /* The variable is already loaded to an lvreg */
12389 if (G_UNLIKELY (cfg->verbose_level > 2))
12390 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
12391 sregs [srcindex] = vreg_to_lvreg [sreg];
12392 //mono_inst_set_src_registers (ins, sregs);
12396 /* Try to fuse the load into the instruction */
12397 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
12398 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
12399 sregs [0] = var->inst_basereg;
12400 //mono_inst_set_src_registers (ins, sregs);
12401 ins->inst_offset = var->inst_offset;
12402 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
12403 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
12404 sregs [1] = var->inst_basereg;
12405 //mono_inst_set_src_registers (ins, sregs);
12406 ins->inst_offset = var->inst_offset;
12408 if (MONO_IS_REAL_MOVE (ins)) {
12409 ins->opcode = OP_NOP;
12412 //printf ("%d ", srcindex); mono_print_ins (ins);
12414 sreg = alloc_dreg (cfg, stacktypes [regtype]);
12416 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
12417 if (var->dreg == prev_dreg) {
12419 * sreg refers to the value loaded by the load
12420 * emitted below, but we need to use ins->dreg
12421 * since it refers to the store emitted earlier.
12425 g_assert (sreg != -1);
12426 vreg_to_lvreg [var->dreg] = sreg;
12427 g_assert (lvregs_len < 1024);
12428 lvregs [lvregs_len ++] = var->dreg;
12432 sregs [srcindex] = sreg;
12433 //mono_inst_set_src_registers (ins, sregs);
12435 if (regtype == 'l') {
12436 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
12437 mono_bblock_insert_before_ins (bb, ins, load_ins);
12438 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
12439 mono_bblock_insert_before_ins (bb, ins, load_ins);
12440 use_ins = load_ins;
12443 #if SIZEOF_REGISTER == 4
12444 g_assert (load_opcode != OP_LOADI8_MEMBASE);
12446 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
12447 mono_bblock_insert_before_ins (bb, ins, load_ins);
12448 use_ins = load_ins;
12452 if (var->dreg < orig_next_vreg) {
12453 live_range_end [var->dreg] = use_ins;
12454 live_range_end_bb [var->dreg] = bb;
12457 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
12460 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
12461 tmp->inst_c1 = var->dreg;
12462 mono_bblock_insert_after_ins (bb, ins, tmp);
12466 mono_inst_set_src_registers (ins, sregs);
12468 if (dest_has_lvreg) {
12469 g_assert (ins->dreg != -1);
12470 vreg_to_lvreg [prev_dreg] = ins->dreg;
12471 g_assert (lvregs_len < 1024);
12472 lvregs [lvregs_len ++] = prev_dreg;
12473 dest_has_lvreg = FALSE;
12477 tmp_reg = ins->dreg;
12478 ins->dreg = ins->sreg2;
12479 ins->sreg2 = tmp_reg;
12482 if (MONO_IS_CALL (ins)) {
12483 /* Clear vreg_to_lvreg array */
12484 for (i = 0; i < lvregs_len; i++)
12485 vreg_to_lvreg [lvregs [i]] = 0;
12487 } else if (ins->opcode == OP_NOP) {
12489 MONO_INST_NULLIFY_SREGS (ins);
12492 if (cfg->verbose_level > 2)
12493 mono_print_ins_index (1, ins);
12496 /* Extend the live range based on the liveness info */
12497 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
12498 for (i = 0; i < cfg->num_varinfo; i ++) {
12499 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
12501 if (vreg_is_volatile (cfg, vi->vreg))
12502 /* The liveness info is incomplete */
12505 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
12506 /* Live from at least the first ins of this bb */
12507 live_range_start [vi->vreg] = bb->code;
12508 live_range_start_bb [vi->vreg] = bb;
12511 if (mono_bitset_test_fast (bb->live_out_set, i)) {
12512 /* Live at least until the last ins of this bb */
12513 live_range_end [vi->vreg] = bb->last_ins;
12514 live_range_end_bb [vi->vreg] = bb;
12520 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
12522 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
12523 * by storing the current native offset into MonoMethodVar->live_range_start/end.
12525 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
12526 for (i = 0; i < cfg->num_varinfo; ++i) {
12527 int vreg = MONO_VARINFO (cfg, i)->vreg;
12530 if (live_range_start [vreg]) {
12531 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
12533 ins->inst_c1 = vreg;
12534 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
12536 if (live_range_end [vreg]) {
12537 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
12539 ins->inst_c1 = vreg;
12540 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
12541 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
12543 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
12549 g_free (live_range_start);
12550 g_free (live_range_end);
12551 g_free (live_range_start_bb);
12552 g_free (live_range_end_bb);
12557 * - use 'iadd' instead of 'int_add'
12558 * - handling ovf opcodes: decompose in method_to_ir.
12559 * - unify iregs/fregs
12560 * -> partly done, the missing parts are:
12561 * - a more complete unification would involve unifying the hregs as well, so
12562 * code wouldn't need if (fp) all over the place. but that would mean the hregs
12563 * would no longer map to the machine hregs, so the code generators would need to
12564 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
12565 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
12566 * fp/non-fp branches speeds it up by about 15%.
12567 * - use sext/zext opcodes instead of shifts
12569 * - get rid of TEMPLOADs if possible and use vregs instead
12570 * - clean up usage of OP_P/OP_ opcodes
12571 * - cleanup usage of DUMMY_USE
12572 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
12574 * - set the stack type and allocate a dreg in the EMIT_NEW macros
12575 * - get rid of all the <foo>2 stuff when the new JIT is ready.
12576 * - make sure handle_stack_args () is called before the branch is emitted
12577 * - when the new IR is done, get rid of all unused stuff
12578 * - COMPARE/BEQ as separate instructions or unify them ?
12579 * - keeping them separate allows specialized compare instructions like
12580 * compare_imm, compare_membase
12581 * - most back ends unify fp compare+branch, fp compare+ceq
12582 * - integrate mono_save_args into inline_method
12583 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
12584 * - handle long shift opts on 32 bit platforms somehow: they require
12585 * 3 sregs (2 for arg1 and 1 for arg2)
12586 * - make byref a 'normal' type.
12587 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
12588 * variable if needed.
12589 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
12590 * like inline_method.
12591 * - remove inlining restrictions
12592 * - fix LNEG and enable cfold of INEG
12593 * - generalize x86 optimizations like ldelema as a peephole optimization
12594 * - add store_mem_imm for amd64
12595 * - optimize the loading of the interruption flag in the managed->native wrappers
12596 * - avoid special handling of OP_NOP in passes
12597 * - move code inserting instructions into one function/macro.
12598 * - try a coalescing phase after liveness analysis
12599 * - add float -> vreg conversion + local optimizations on !x86
12600 * - figure out how to handle decomposed branches during optimizations, ie.
12601 * compare+branch, op_jump_table+op_br etc.
12602 * - promote RuntimeXHandles to vregs
12603 * - vtype cleanups:
12604 * - add a NEW_VARLOADA_VREG macro
12605 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
12606 * accessing vtype fields.
12607 * - get rid of I8CONST on 64 bit platforms
12608 * - dealing with the increase in code size due to branches created during opcode
12610 * - use extended basic blocks
12611 * - all parts of the JIT
12612 * - handle_global_vregs () && local regalloc
12613 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
12614 * - sources of increase in code size:
12617 * - isinst and castclass
12618 * - lvregs not allocated to global registers even if used multiple times
12619 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
12621 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
12622 * - add all micro optimizations from the old JIT
12623 * - put tree optimizations into the deadce pass
12624 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
12625 * specific function.
12626 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
12627 * fcompare + branchCC.
12628 * - create a helper function for allocating a stack slot, taking into account
12629 * MONO_CFG_HAS_SPILLUP.
12631 * - merge the ia64 switch changes.
12632 * - optimize mono_regstate2_alloc_int/float.
12633 * - fix the pessimistic handling of variables accessed in exception handler blocks.
12634 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
12635 * parts of the tree could be separated by other instructions, killing the tree
12636 * arguments, or stores killing loads etc. Also, should we fold loads into other
12637 * instructions if the result of the load is used multiple times ?
12638 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12639 * - LAST MERGE: 108395.
12640 * - when returning vtypes in registers, generate IR and append it to the end of the
12641 * last bb instead of doing it in the epilog.
12642 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12650 - When to decompose opcodes:
12651 - earlier: this makes some optimizations hard to implement, since the low level IR
12652 no longer contains the neccessary information. But it is easier to do.
12653 - later: harder to implement, enables more optimizations.
12654 - Branches inside bblocks:
12655 - created when decomposing complex opcodes.
12656 - branches to another bblock: harmless, but not tracked by the branch
12657 optimizations, so need to branch to a label at the start of the bblock.
12658 - branches to inside the same bblock: very problematic, trips up the local
12659 reg allocator. Can be fixed by spitting the current bblock, but that is a
12660 complex operation, since some local vregs can become global vregs etc.
12661 - Local/global vregs:
12662 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12663 local register allocator.
12664 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12665 structure, created by mono_create_var (). Assigned to hregs or the stack by
12666 the global register allocator.
12667 - When to do optimizations like alu->alu_imm:
12668 - earlier -> saves work later on since the IR will be smaller/simpler
12669 - later -> can work on more instructions
12670 - Handling of valuetypes:
12671 - When a vtype is pushed on the stack, a new temporary is created, an
12672 instruction computing its address (LDADDR) is emitted and pushed on
12673 the stack. Need to optimize cases when the vtype is used immediately as in
12674 argument passing, stloc etc.
12675 - Instead of the to_end stuff in the old JIT, simply call the function handling
12676 the values on the stack before emitting the last instruction of the bb.
12679 #endif /* DISABLE_JIT */