2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_replace_type (type);
275 switch (type->type) {
278 case MONO_TYPE_BOOLEAN:
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->generic_sharing_context);
325 if (mini_type_var_is_vt (cfg, type))
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
357 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
358 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
361 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
365 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
366 * foo<T> (int i) { ldarg.0; box T; }
368 #define UNVERIFIED do { \
369 if (cfg->gsharedvt) { \
370 if (cfg->verbose_level > 2) \
371 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
373 goto exception_exit; \
375 if (mini_get_debug_options ()->break_on_unverified) \
381 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
383 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
385 #define GET_BBLOCK(cfg,tblock,ip) do { \
386 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
388 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
389 NEW_BBLOCK (cfg, (tblock)); \
390 (tblock)->cil_code = (ip); \
391 ADD_BBLOCK (cfg, (tblock)); \
395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
396 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
397 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
398 (dest)->dreg = alloc_ireg_mp ((cfg)); \
399 (dest)->sreg1 = (sr1); \
400 (dest)->sreg2 = (sr2); \
401 (dest)->inst_imm = (imm); \
402 (dest)->backend.shift_amount = (shift); \
403 MONO_ADD_INS ((cfg)->cbb, (dest)); \
407 #if SIZEOF_REGISTER == 8
408 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
409 /* FIXME: Need to add many more cases */ \
410 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
412 int dr = alloc_preg (cfg); \
413 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
414 (ins)->sreg2 = widen->dreg; \
418 #define ADD_WIDEN_OP(ins, arg1, arg2)
421 #define ADD_BINOP(op) do { \
422 MONO_INST_NEW (cfg, ins, (op)); \
424 ins->sreg1 = sp [0]->dreg; \
425 ins->sreg2 = sp [1]->dreg; \
426 type_from_op (ins, sp [0], sp [1]); \
428 /* Have to insert a widening op */ \
429 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
430 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
431 MONO_ADD_INS ((cfg)->cbb, (ins)); \
432 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
435 #define ADD_UNOP(op) do { \
436 MONO_INST_NEW (cfg, ins, (op)); \
438 ins->sreg1 = sp [0]->dreg; \
439 type_from_op (ins, sp [0], NULL); \
441 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
442 MONO_ADD_INS ((cfg)->cbb, (ins)); \
443 *sp++ = mono_decompose_opcode (cfg, ins); \
446 #define ADD_BINCOND(next_block) do { \
449 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
450 cmp->sreg1 = sp [0]->dreg; \
451 cmp->sreg2 = sp [1]->dreg; \
452 type_from_op (cmp, sp [0], sp [1]); \
454 type_from_op (ins, sp [0], sp [1]); \
455 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
456 GET_BBLOCK (cfg, tblock, target); \
457 link_bblock (cfg, bblock, tblock); \
458 ins->inst_true_bb = tblock; \
459 if ((next_block)) { \
460 link_bblock (cfg, bblock, (next_block)); \
461 ins->inst_false_bb = (next_block); \
462 start_new_bblock = 1; \
464 GET_BBLOCK (cfg, tblock, ip); \
465 link_bblock (cfg, bblock, tblock); \
466 ins->inst_false_bb = tblock; \
467 start_new_bblock = 2; \
469 if (sp != stack_start) { \
470 handle_stack_args (cfg, stack_start, sp - stack_start); \
471 CHECK_UNVERIFIABLE (cfg); \
473 MONO_ADD_INS (bblock, cmp); \
474 MONO_ADD_INS (bblock, ins); \
478 * link_bblock: Links two basic blocks
480 * links two basic blocks in the control flow graph, the 'from'
481 * argument is the starting block and the 'to' argument is the block
482 * the control flow ends to after 'from'.
485 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
487 MonoBasicBlock **newa;
491 if (from->cil_code) {
493 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
495 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
498 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
500 printf ("edge from entry to exit\n");
505 for (i = 0; i < from->out_count; ++i) {
506 if (to == from->out_bb [i]) {
512 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
513 for (i = 0; i < from->out_count; ++i) {
514 newa [i] = from->out_bb [i];
522 for (i = 0; i < to->in_count; ++i) {
523 if (from == to->in_bb [i]) {
529 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
530 for (i = 0; i < to->in_count; ++i) {
531 newa [i] = to->in_bb [i];
540 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 link_bblock (cfg, from, to);
546 * mono_find_block_region:
548 * We mark each basic block with a region ID. We use that to avoid BB
549 * optimizations when blocks are in different regions.
552 * A region token that encodes where this region is, and information
553 * about the clause owner for this block.
555 * The region encodes the try/catch/filter clause that owns this block
556 * as well as the type. -1 is a special value that represents a block
557 * that is in none of try/catch/filter.
560 mono_find_block_region (MonoCompile *cfg, int offset)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
566 for (i = 0; i < header->num_clauses; ++i) {
567 clause = &header->clauses [i];
568 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
569 (offset < (clause->handler_offset)))
570 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
572 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
574 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
575 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
576 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
578 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
581 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
582 return ((i + 1) << 8) | clause->flags;
589 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
591 MonoMethodHeader *header = cfg->header;
592 MonoExceptionClause *clause;
596 for (i = 0; i < header->num_clauses; ++i) {
597 clause = &header->clauses [i];
598 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
599 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
600 if (clause->flags == type)
601 res = g_list_append (res, clause);
608 mono_create_spvar_for_region (MonoCompile *cfg, int region)
612 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
616 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
617 /* prevent it from being register allocated */
618 var->flags |= MONO_INST_VOLATILE;
620 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
624 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
626 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
630 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
634 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
638 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
639 /* prevent it from being register allocated */
640 var->flags |= MONO_INST_VOLATILE;
642 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
656 type = mini_replace_type (type);
657 inst->klass = klass = mono_class_from_mono_type (type);
659 inst->type = STACK_MP;
664 switch (type->type) {
666 inst->type = STACK_INV;
670 case MONO_TYPE_BOOLEAN:
676 inst->type = STACK_I4;
681 case MONO_TYPE_FNPTR:
682 inst->type = STACK_PTR;
684 case MONO_TYPE_CLASS:
685 case MONO_TYPE_STRING:
686 case MONO_TYPE_OBJECT:
687 case MONO_TYPE_SZARRAY:
688 case MONO_TYPE_ARRAY:
689 inst->type = STACK_OBJ;
693 inst->type = STACK_I8;
697 inst->type = STACK_R8;
699 case MONO_TYPE_VALUETYPE:
700 if (type->data.klass->enumtype) {
701 type = mono_class_enum_basetype (type->data.klass);
705 inst->type = STACK_VTYPE;
708 case MONO_TYPE_TYPEDBYREF:
709 inst->klass = mono_defaults.typed_reference_class;
710 inst->type = STACK_VTYPE;
712 case MONO_TYPE_GENERICINST:
713 type = &type->data.generic_class->container_class->byval_arg;
717 g_assert (cfg->generic_sharing_context);
718 if (mini_is_gsharedvt_type (cfg, type)) {
719 g_assert (cfg->gsharedvt);
720 inst->type = STACK_VTYPE;
722 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_ICONV_TO_U;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to monotype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 t = mono_type_get_underlying_type (t);
1196 case MONO_TYPE_BOOLEAN:
1199 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_FNPTR:
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1220 case MONO_TYPE_VALUETYPE:
1221 case MONO_TYPE_TYPEDBYREF:
1223 case MONO_TYPE_GENERICINST:
1224 if (mono_type_generic_inst_is_valuetype (t))
1230 g_assert_not_reached ();
1237 array_access_to_klass (int opcode)
1241 return mono_defaults.byte_class;
1243 return mono_defaults.uint16_class;
1246 return mono_defaults.int_class;
1249 return mono_defaults.sbyte_class;
1252 return mono_defaults.int16_class;
1255 return mono_defaults.int32_class;
1257 return mono_defaults.uint32_class;
1260 return mono_defaults.int64_class;
1263 return mono_defaults.single_class;
1266 return mono_defaults.double_class;
1267 case CEE_LDELEM_REF:
1268 case CEE_STELEM_REF:
1269 return mono_defaults.object_class;
1271 g_assert_not_reached ();
1277 * We try to share variables when possible
1280 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1285 /* inlining can result in deeper stacks */
1286 if (slot >= cfg->header->max_stack)
1287 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1289 pos = ins->type - 1 + slot * STACK_MAX;
1291 switch (ins->type) {
1298 if ((vnum = cfg->intvars [pos]))
1299 return cfg->varinfo [vnum];
1300 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 cfg->intvars [pos] = res->inst_c0;
1304 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1310 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1313 * Don't use this if a generic_context is set, since that means AOT can't
1314 * look up the method using just the image+token.
1315 * table == 0 means this is a reference made from a wrapper.
1317 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1318 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1319 jump_info_token->image = image;
1320 jump_info_token->token = token;
1321 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1326 * This function is called to handle items that are left on the evaluation stack
1327 * at basic block boundaries. What happens is that we save the values to local variables
1328 * and we reload them later when first entering the target basic block (with the
1329 * handle_loaded_temps () function).
1330 * A single joint point will use the same variables (stored in the array bb->out_stack or
1331 * bb->in_stack, if the basic block is before or after the joint point).
1333 * This function needs to be called _before_ emitting the last instruction of
1334 * the bb (i.e. before emitting a branch).
1335 * If the stack merge fails at a join point, cfg->unverifiable is set.
1338 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1341 MonoBasicBlock *bb = cfg->cbb;
1342 MonoBasicBlock *outb;
1343 MonoInst *inst, **locals;
1348 if (cfg->verbose_level > 3)
1349 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1350 if (!bb->out_scount) {
1351 bb->out_scount = count;
1352 //printf ("bblock %d has out:", bb->block_num);
1354 for (i = 0; i < bb->out_count; ++i) {
1355 outb = bb->out_bb [i];
1356 /* exception handlers are linked, but they should not be considered for stack args */
1357 if (outb->flags & BB_EXCEPTION_HANDLER)
1359 //printf (" %d", outb->block_num);
1360 if (outb->in_stack) {
1362 bb->out_stack = outb->in_stack;
1368 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1369 for (i = 0; i < count; ++i) {
1371 * try to reuse temps already allocated for this purpouse, if they occupy the same
1372 * stack slot and if they are of the same type.
1373 * This won't cause conflicts since if 'local' is used to
1374 * store one of the values in the in_stack of a bblock, then
1375 * the same variable will be used for the same outgoing stack
1377 * This doesn't work when inlining methods, since the bblocks
1378 * in the inlined methods do not inherit their in_stack from
1379 * the bblock they are inlined to. See bug #58863 for an
1382 if (cfg->inlined_method)
1383 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1385 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1390 for (i = 0; i < bb->out_count; ++i) {
1391 outb = bb->out_bb [i];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER)
1395 if (outb->in_scount) {
1396 if (outb->in_scount != bb->out_scount) {
1397 cfg->unverifiable = TRUE;
1400 continue; /* check they are the same locals */
1402 outb->in_scount = count;
1403 outb->in_stack = bb->out_stack;
1406 locals = bb->out_stack;
1408 for (i = 0; i < count; ++i) {
1409 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1410 inst->cil_code = sp [i]->cil_code;
1411 sp [i] = locals [i];
1412 if (cfg->verbose_level > 3)
1413 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1417 * It is possible that the out bblocks already have in_stack assigned, and
1418 * the in_stacks differ. In this case, we will store to all the different
1425 /* Find a bblock which has a different in_stack */
1427 while (bindex < bb->out_count) {
1428 outb = bb->out_bb [bindex];
1429 /* exception handlers are linked, but they should not be considered for stack args */
1430 if (outb->flags & BB_EXCEPTION_HANDLER) {
1434 if (outb->in_stack != locals) {
1435 for (i = 0; i < count; ++i) {
1436 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1437 inst->cil_code = sp [i]->cil_code;
1438 sp [i] = locals [i];
1439 if (cfg->verbose_level > 3)
1440 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1442 locals = outb->in_stack;
1451 /* Emit code which loads interface_offsets [klass->interface_id]
1452 * The array is stored in memory before vtable.
1455 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1457 if (cfg->compile_aot) {
1458 int ioffset_reg = alloc_preg (cfg);
1459 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1471 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1473 int ibitmap_reg = alloc_preg (cfg);
1474 #ifdef COMPRESSED_INTERFACE_BITMAP
1476 MonoInst *res, *ins;
1477 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1478 MONO_ADD_INS (cfg->cbb, ins);
1480 if (cfg->compile_aot)
1481 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1483 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1484 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1487 int ibitmap_byte_reg = alloc_preg (cfg);
1489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1491 if (cfg->compile_aot) {
1492 int iid_reg = alloc_preg (cfg);
1493 int shifted_iid_reg = alloc_preg (cfg);
1494 int ibitmap_byte_address_reg = alloc_preg (cfg);
1495 int masked_iid_reg = alloc_preg (cfg);
1496 int iid_one_bit_reg = alloc_preg (cfg);
1497 int iid_bit_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1503 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1514 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1515 * stored in "klass_reg" implements the interface "klass".
1518 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1520 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1524 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1525 * stored in "vtable_reg" implements the interface "klass".
1528 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1530 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1534 * Emit code which checks whenever the interface id of @klass is smaller than
1535 * than the value given by max_iid_reg.
1538 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1539 MonoBasicBlock *false_target)
1541 if (cfg->compile_aot) {
1542 int iid_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1554 /* Same as above, but obtains max_iid from a vtable */
1556 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1557 MonoBasicBlock *false_target)
1559 int max_iid_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1562 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1565 /* Same as above, but obtains max_iid from a klass */
1567 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1568 MonoBasicBlock *false_target)
1570 int max_iid_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1573 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1577 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int idepth_reg = alloc_preg (cfg);
1580 int stypes_reg = alloc_preg (cfg);
1581 int stype = alloc_preg (cfg);
1583 mono_class_setup_supertypes (klass);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1594 } else if (cfg->compile_aot) {
1595 int const_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1605 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1611 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 int intf_reg = alloc_preg (cfg);
1615 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1616 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1621 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1625 * Variant of the above that takes a register to the class, not the vtable.
1628 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1630 int intf_bit_reg = alloc_preg (cfg);
1632 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1633 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1642 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1653 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1657 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1659 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1663 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1665 if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1676 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1679 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1682 int rank_reg = alloc_preg (cfg);
1683 int eclass_reg = alloc_preg (cfg);
1685 g_assert (!klass_inst);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1688 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1689 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1691 if (klass->cast_class == mono_defaults.object_class) {
1692 int parent_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1694 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1697 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1698 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1699 } else if (klass->cast_class == mono_defaults.enum_class) {
1700 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1701 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1702 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1704 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1705 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1708 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1709 /* Check that the object is a vector too */
1710 int bounds_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1713 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1716 int idepth_reg = alloc_preg (cfg);
1717 int stypes_reg = alloc_preg (cfg);
1718 int stype = alloc_preg (cfg);
1720 mono_class_setup_supertypes (klass);
1722 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1725 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1729 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1734 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1736 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1740 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1744 g_assert (val == 0);
1749 if ((size <= 4) && (size <= align)) {
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1755 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1760 #if SIZEOF_REGISTER == 8
1762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1768 val_reg = alloc_preg (cfg);
1770 if (SIZEOF_REGISTER == 8)
1771 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1773 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1776 /* This could be optimized further if neccesary */
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 #if !NO_UNALIGNED_ACCESS
1786 if (SIZEOF_REGISTER == 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1825 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1826 g_assert (size < 10000);
1829 /* This could be optimized further if neccesary */
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 cur_reg = alloc_preg (cfg);
1844 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1870 cur_reg = alloc_preg (cfg);
1871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1880 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1884 if (cfg->compile_aot) {
1885 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1886 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1888 ins->sreg2 = c->dreg;
1889 MONO_ADD_INS (cfg->cbb, ins);
1891 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1893 ins->inst_offset = mini_get_tls_offset (tls_key);
1894 MONO_ADD_INS (cfg->cbb, ins);
1901 * Emit IR to push the current LMF onto the LMF stack.
1904 emit_push_lmf (MonoCompile *cfg)
1907 * Emit IR to push the LMF:
1908 * lmf_addr = <lmf_addr from tls>
1909 * lmf->lmf_addr = lmf_addr
1910 * lmf->prev_lmf = *lmf_addr
1913 int lmf_reg, prev_lmf_reg;
1914 MonoInst *ins, *lmf_ins;
1919 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1920 /* Load current lmf */
1921 lmf_ins = mono_get_lmf_intrinsic (cfg);
1923 MONO_ADD_INS (cfg->cbb, lmf_ins);
1924 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1925 lmf_reg = ins->dreg;
1926 /* Save previous_lmf */
1927 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1929 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1932 * Store lmf_addr in a variable, so it can be allocated to a global register.
1934 if (!cfg->lmf_addr_var)
1935 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1938 ins = mono_get_jit_tls_intrinsic (cfg);
1940 int jit_tls_dreg = ins->dreg;
1942 MONO_ADD_INS (cfg->cbb, ins);
1943 lmf_reg = alloc_preg (cfg);
1944 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1946 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1949 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1951 MONO_ADD_INS (cfg->cbb, lmf_ins);
1953 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1957 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1958 lmf_reg = ins->dreg;
1960 prev_lmf_reg = alloc_preg (cfg);
1961 /* Save previous_lmf */
1962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1963 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1965 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1972 * Emit IR to pop the current LMF from the LMF stack.
1975 emit_pop_lmf (MonoCompile *cfg)
1977 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1983 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1984 lmf_reg = ins->dreg;
1986 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1987 /* Load previous_lmf */
1988 prev_lmf_reg = alloc_preg (cfg);
1989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1991 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1994 * Emit IR to pop the LMF:
1995 * *(lmf->lmf_addr) = lmf->prev_lmf
1997 /* This could be called before emit_push_lmf () */
1998 if (!cfg->lmf_addr_var)
1999 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2000 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2002 prev_lmf_reg = alloc_preg (cfg);
2003 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2009 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2012 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2015 type = mini_get_basic_type_from_generic (gsctx, type);
2016 type = mini_replace_type (type);
2017 switch (type->type) {
2018 case MONO_TYPE_VOID:
2019 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2022 case MONO_TYPE_BOOLEAN:
2025 case MONO_TYPE_CHAR:
2028 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2032 case MONO_TYPE_FNPTR:
2033 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2034 case MONO_TYPE_CLASS:
2035 case MONO_TYPE_STRING:
2036 case MONO_TYPE_OBJECT:
2037 case MONO_TYPE_SZARRAY:
2038 case MONO_TYPE_ARRAY:
2039 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2042 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2045 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2046 case MONO_TYPE_VALUETYPE:
2047 if (type->data.klass->enumtype) {
2048 type = mono_class_enum_basetype (type->data.klass);
2051 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2052 case MONO_TYPE_TYPEDBYREF:
2053 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2054 case MONO_TYPE_GENERICINST:
2055 type = &type->data.generic_class->container_class->byval_arg;
2058 case MONO_TYPE_MVAR:
2060 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2062 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2068 * target_type_is_incompatible:
2069 * @cfg: MonoCompile context
2071 * Check that the item @arg on the evaluation stack can be stored
2072 * in the target type (can be a local, or field, etc).
2073 * The cfg arg can be used to check if we need verification or just
2076 * Returns: non-0 value if arg can't be stored on a target.
2079 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2081 MonoType *simple_type;
2084 target = mini_replace_type (target);
2085 if (target->byref) {
2086 /* FIXME: check that the pointed to types match */
2087 if (arg->type == STACK_MP)
2088 return arg->klass != mono_class_from_mono_type (target);
2089 if (arg->type == STACK_PTR)
2094 simple_type = mono_type_get_underlying_type (target);
2095 switch (simple_type->type) {
2096 case MONO_TYPE_VOID:
2100 case MONO_TYPE_BOOLEAN:
2103 case MONO_TYPE_CHAR:
2106 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2110 /* STACK_MP is needed when setting pinned locals */
2111 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2116 case MONO_TYPE_FNPTR:
2118 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2119 * in native int. (#688008).
2121 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2124 case MONO_TYPE_CLASS:
2125 case MONO_TYPE_STRING:
2126 case MONO_TYPE_OBJECT:
2127 case MONO_TYPE_SZARRAY:
2128 case MONO_TYPE_ARRAY:
2129 if (arg->type != STACK_OBJ)
2131 /* FIXME: check type compatibility */
2135 if (arg->type != STACK_I8)
2140 if (arg->type != STACK_R8)
2143 case MONO_TYPE_VALUETYPE:
2144 if (arg->type != STACK_VTYPE)
2146 klass = mono_class_from_mono_type (simple_type);
2147 if (klass != arg->klass)
2150 case MONO_TYPE_TYPEDBYREF:
2151 if (arg->type != STACK_VTYPE)
2153 klass = mono_class_from_mono_type (simple_type);
2154 if (klass != arg->klass)
2157 case MONO_TYPE_GENERICINST:
2158 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2159 if (arg->type != STACK_VTYPE)
2161 klass = mono_class_from_mono_type (simple_type);
2162 if (klass != arg->klass)
2166 if (arg->type != STACK_OBJ)
2168 /* FIXME: check type compatibility */
2172 case MONO_TYPE_MVAR:
2173 g_assert (cfg->generic_sharing_context);
2174 if (mini_type_var_is_vt (cfg, simple_type)) {
2175 if (arg->type != STACK_VTYPE)
2178 if (arg->type != STACK_OBJ)
2183 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2189 * Prepare arguments for passing to a function call.
2190 * Return a non-zero value if the arguments can't be passed to the given
2192 * The type checks are not yet complete and some conversions may need
2193 * casts on 32 or 64 bit architectures.
2195 * FIXME: implement this using target_type_is_incompatible ()
2198 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2200 MonoType *simple_type;
2204 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2208 for (i = 0; i < sig->param_count; ++i) {
2209 if (sig->params [i]->byref) {
2210 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2214 simple_type = sig->params [i];
2215 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2217 switch (simple_type->type) {
2218 case MONO_TYPE_VOID:
2223 case MONO_TYPE_BOOLEAN:
2226 case MONO_TYPE_CHAR:
2229 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2235 case MONO_TYPE_FNPTR:
2236 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2239 case MONO_TYPE_CLASS:
2240 case MONO_TYPE_STRING:
2241 case MONO_TYPE_OBJECT:
2242 case MONO_TYPE_SZARRAY:
2243 case MONO_TYPE_ARRAY:
2244 if (args [i]->type != STACK_OBJ)
2249 if (args [i]->type != STACK_I8)
2254 if (args [i]->type != STACK_R8)
2257 case MONO_TYPE_VALUETYPE:
2258 if (simple_type->data.klass->enumtype) {
2259 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2262 if (args [i]->type != STACK_VTYPE)
2265 case MONO_TYPE_TYPEDBYREF:
2266 if (args [i]->type != STACK_VTYPE)
2269 case MONO_TYPE_GENERICINST:
2270 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2273 case MONO_TYPE_MVAR:
2275 if (args [i]->type != STACK_VTYPE)
2279 g_error ("unknown type 0x%02x in check_call_signature",
2287 callvirt_to_call (int opcode)
2290 case OP_CALL_MEMBASE:
2292 case OP_VOIDCALL_MEMBASE:
2294 case OP_FCALL_MEMBASE:
2296 case OP_VCALL_MEMBASE:
2298 case OP_LCALL_MEMBASE:
2301 g_assert_not_reached ();
2307 #ifdef MONO_ARCH_HAVE_IMT
2308 /* Either METHOD or IMT_ARG needs to be set */
2310 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2314 if (COMPILE_LLVM (cfg)) {
2315 method_reg = alloc_preg (cfg);
2318 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2319 } else if (cfg->compile_aot) {
2320 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2323 MONO_INST_NEW (cfg, ins, OP_PCONST);
2324 ins->inst_p0 = method;
2325 ins->dreg = method_reg;
2326 MONO_ADD_INS (cfg->cbb, ins);
2330 call->imt_arg_reg = method_reg;
2332 #ifdef MONO_ARCH_IMT_REG
2333 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2335 /* Need this to keep the IMT arg alive */
2336 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2341 #ifdef MONO_ARCH_IMT_REG
2342 method_reg = alloc_preg (cfg);
2345 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2346 } else if (cfg->compile_aot) {
2347 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2350 MONO_INST_NEW (cfg, ins, OP_PCONST);
2351 ins->inst_p0 = method;
2352 ins->dreg = method_reg;
2353 MONO_ADD_INS (cfg->cbb, ins);
2356 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2358 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2363 static MonoJumpInfo *
2364 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2366 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2370 ji->data.target = target;
2376 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2378 if (cfg->generic_sharing_context)
2379 return mono_class_check_context_used (klass);
2385 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2387 if (cfg->generic_sharing_context)
2388 return mono_method_check_context_used (method);
2394 * check_method_sharing:
2396 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2399 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2401 gboolean pass_vtable = FALSE;
2402 gboolean pass_mrgctx = FALSE;
2404 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2405 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2406 gboolean sharable = FALSE;
2408 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2411 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2412 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2413 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2415 sharable = sharing_enabled && context_sharable;
2419 * Pass vtable iff target method might
2420 * be shared, which means that sharing
2421 * is enabled for its class and its
2422 * context is sharable (and it's not a
2425 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2429 if (mini_method_get_context (cmethod) &&
2430 mini_method_get_context (cmethod)->method_inst) {
2431 g_assert (!pass_vtable);
2433 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2436 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2437 MonoGenericContext *context = mini_method_get_context (cmethod);
2438 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2440 if (sharing_enabled && context_sharable)
2442 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2447 if (out_pass_vtable)
2448 *out_pass_vtable = pass_vtable;
2449 if (out_pass_mrgctx)
2450 *out_pass_mrgctx = pass_mrgctx;
2453 inline static MonoCallInst *
2454 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2455 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2459 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2464 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2466 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2469 call->signature = sig;
2470 call->rgctx_reg = rgctx;
2471 sig_ret = mini_replace_type (sig->ret);
2473 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2476 if (mini_type_is_vtype (cfg, sig_ret)) {
2477 call->vret_var = cfg->vret_addr;
2478 //g_assert_not_reached ();
2480 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2481 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2484 temp->backend.is_pinvoke = sig->pinvoke;
2487 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2488 * address of return value to increase optimization opportunities.
2489 * Before vtype decomposition, the dreg of the call ins itself represents the
2490 * fact the call modifies the return value. After decomposition, the call will
2491 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2492 * will be transformed into an LDADDR.
2494 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2495 loada->dreg = alloc_preg (cfg);
2496 loada->inst_p0 = temp;
2497 /* We reference the call too since call->dreg could change during optimization */
2498 loada->inst_p1 = call;
2499 MONO_ADD_INS (cfg->cbb, loada);
2501 call->inst.dreg = temp->dreg;
2503 call->vret_var = loada;
2504 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2505 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2507 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2508 if (COMPILE_SOFT_FLOAT (cfg)) {
2510 * If the call has a float argument, we would need to do an r8->r4 conversion using
2511 * an icall, but that cannot be done during the call sequence since it would clobber
2512 * the call registers + the stack. So we do it before emitting the call.
2514 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2516 MonoInst *in = call->args [i];
2518 if (i >= sig->hasthis)
2519 t = sig->params [i - sig->hasthis];
2521 t = &mono_defaults.int_class->byval_arg;
2522 t = mono_type_get_underlying_type (t);
2524 if (!t->byref && t->type == MONO_TYPE_R4) {
2525 MonoInst *iargs [1];
2529 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2531 /* The result will be in an int vreg */
2532 call->args [i] = conv;
2538 call->need_unbox_trampoline = unbox_trampoline;
2541 if (COMPILE_LLVM (cfg))
2542 mono_llvm_emit_call (cfg, call);
2544 mono_arch_emit_call (cfg, call);
2546 mono_arch_emit_call (cfg, call);
2549 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2550 cfg->flags |= MONO_CFG_HAS_CALLS;
2556 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2558 #ifdef MONO_ARCH_RGCTX_REG
2559 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2560 cfg->uses_rgctx_reg = TRUE;
2561 call->rgctx_reg = TRUE;
2563 call->rgctx_arg_reg = rgctx_reg;
2570 inline static MonoInst*
2571 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2577 rgctx_reg = mono_alloc_preg (cfg);
2578 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2581 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2583 call->inst.sreg1 = addr->dreg;
2586 emit_imt_argument (cfg, call, NULL, imt_arg);
2588 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2591 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2593 return (MonoInst*)call;
2597 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2600 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2602 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2605 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2606 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2608 #ifndef DISABLE_REMOTING
2609 gboolean might_be_remote = FALSE;
2611 gboolean virtual = this != NULL;
2612 gboolean enable_for_aot = TRUE;
2616 gboolean need_unbox_trampoline;
2619 sig = mono_method_signature (method);
2622 rgctx_reg = mono_alloc_preg (cfg);
2623 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2626 if (method->string_ctor) {
2627 /* Create the real signature */
2628 /* FIXME: Cache these */
2629 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2630 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2635 context_used = mini_method_check_context_used (cfg, method);
2637 #ifndef DISABLE_REMOTING
2638 might_be_remote = this && sig->hasthis &&
2639 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2640 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2642 if (might_be_remote && context_used) {
2645 g_assert (cfg->generic_sharing_context);
2647 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2649 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2653 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2655 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2657 #ifndef DISABLE_REMOTING
2658 if (might_be_remote)
2659 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2662 call->method = method;
2663 call->inst.flags |= MONO_INST_HAS_METHOD;
2664 call->inst.inst_left = this;
2665 call->tail_call = tail;
2668 int vtable_reg, slot_reg, this_reg;
2671 this_reg = this->dreg;
2673 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2674 MonoInst *dummy_use;
2676 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2678 /* Make a call to delegate->invoke_impl */
2679 call->inst.inst_basereg = this_reg;
2680 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2681 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2683 /* We must emit a dummy use here because the delegate trampoline will
2684 replace the 'this' argument with the delegate target making this activation
2685 no longer a root for the delegate.
2686 This is an issue for delegates that target collectible code such as dynamic
2687 methods of GC'able assemblies.
2689 For a test case look into #667921.
2691 FIXME: a dummy use is not the best way to do it as the local register allocator
2692 will put it on a caller save register and spil it around the call.
2693 Ideally, we would either put it on a callee save register or only do the store part.
2695 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2697 return (MonoInst*)call;
2700 if ((!cfg->compile_aot || enable_for_aot) &&
2701 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2702 (MONO_METHOD_IS_FINAL (method) &&
2703 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2704 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2706 * the method is not virtual, we just need to ensure this is not null
2707 * and then we can call the method directly.
2709 #ifndef DISABLE_REMOTING
2710 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2712 * The check above ensures method is not gshared, this is needed since
2713 * gshared methods can't have wrappers.
2715 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2719 if (!method->string_ctor)
2720 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2722 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2723 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2725 * the method is virtual, but we can statically dispatch since either
2726 * it's class or the method itself are sealed.
2727 * But first we need to ensure it's not a null reference.
2729 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2731 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2733 vtable_reg = alloc_preg (cfg);
2734 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2735 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2737 #ifdef MONO_ARCH_HAVE_IMT
2739 guint32 imt_slot = mono_method_get_imt_slot (method);
2740 emit_imt_argument (cfg, call, call->method, imt_arg);
2741 slot_reg = vtable_reg;
2742 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2745 if (slot_reg == -1) {
2746 slot_reg = alloc_preg (cfg);
2747 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2748 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2751 slot_reg = vtable_reg;
2752 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2753 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2754 #ifdef MONO_ARCH_HAVE_IMT
2756 g_assert (mono_method_signature (method)->generic_param_count);
2757 emit_imt_argument (cfg, call, call->method, imt_arg);
2762 call->inst.sreg1 = slot_reg;
2763 call->inst.inst_offset = offset;
2764 call->virtual = TRUE;
2768 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2771 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2773 return (MonoInst*)call;
2777 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2779 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2783 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2790 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2793 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2795 return (MonoInst*)call;
2799 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2801 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2805 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2809 * mono_emit_abs_call:
2811 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2813 inline static MonoInst*
2814 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2815 MonoMethodSignature *sig, MonoInst **args)
2817 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2821 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2824 if (cfg->abs_patches == NULL)
2825 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2826 g_hash_table_insert (cfg->abs_patches, ji, ji);
2827 ins = mono_emit_native_call (cfg, ji, sig, args);
2828 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2833 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2835 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2836 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2840 * Native code might return non register sized integers
2841 * without initializing the upper bits.
2843 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2844 case OP_LOADI1_MEMBASE:
2845 widen_op = OP_ICONV_TO_I1;
2847 case OP_LOADU1_MEMBASE:
2848 widen_op = OP_ICONV_TO_U1;
2850 case OP_LOADI2_MEMBASE:
2851 widen_op = OP_ICONV_TO_I2;
2853 case OP_LOADU2_MEMBASE:
2854 widen_op = OP_ICONV_TO_U2;
2860 if (widen_op != -1) {
2861 int dreg = alloc_preg (cfg);
2864 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2865 widen->type = ins->type;
2875 get_memcpy_method (void)
2877 static MonoMethod *memcpy_method = NULL;
2878 if (!memcpy_method) {
2879 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2881 g_error ("Old corlib found. Install a new one");
2883 return memcpy_method;
2887 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2889 MonoClassField *field;
2890 gpointer iter = NULL;
2892 while ((field = mono_class_get_fields (klass, &iter))) {
2895 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2897 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2898 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2899 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2900 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2902 MonoClass *field_class = mono_class_from_mono_type (field->type);
2903 if (field_class->has_references)
2904 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2910 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2912 int card_table_shift_bits;
2913 gpointer card_table_mask;
2915 MonoInst *dummy_use;
2916 int nursery_shift_bits;
2917 size_t nursery_size;
2918 gboolean has_card_table_wb = FALSE;
2920 if (!cfg->gen_write_barriers)
2923 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2925 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2927 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2928 has_card_table_wb = TRUE;
2931 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2934 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2935 wbarrier->sreg1 = ptr->dreg;
2936 wbarrier->sreg2 = value->dreg;
2937 MONO_ADD_INS (cfg->cbb, wbarrier);
2938 } else if (card_table) {
2939 int offset_reg = alloc_preg (cfg);
2940 int card_reg = alloc_preg (cfg);
2943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2944 if (card_table_mask)
2945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2947 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2948 * IMM's larger than 32bits.
2950 if (cfg->compile_aot) {
2951 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2953 MONO_INST_NEW (cfg, ins, OP_PCONST);
2954 ins->inst_p0 = card_table;
2955 ins->dreg = card_reg;
2956 MONO_ADD_INS (cfg->cbb, ins);
2959 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2960 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2962 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2963 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2966 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2970 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2972 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2973 unsigned need_wb = 0;
2978 /*types with references can't have alignment smaller than sizeof(void*) */
2979 if (align < SIZEOF_VOID_P)
2982 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2983 if (size > 32 * SIZEOF_VOID_P)
2986 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2988 /* We don't unroll more than 5 stores to avoid code bloat. */
2989 if (size > 5 * SIZEOF_VOID_P) {
2990 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2991 size += (SIZEOF_VOID_P - 1);
2992 size &= ~(SIZEOF_VOID_P - 1);
2994 EMIT_NEW_ICONST (cfg, iargs [2], size);
2995 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2996 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3000 destreg = iargs [0]->dreg;
3001 srcreg = iargs [1]->dreg;
3004 dest_ptr_reg = alloc_preg (cfg);
3005 tmp_reg = alloc_preg (cfg);
3008 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3010 while (size >= SIZEOF_VOID_P) {
3011 MonoInst *load_inst;
3012 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3013 load_inst->dreg = tmp_reg;
3014 load_inst->inst_basereg = srcreg;
3015 load_inst->inst_offset = offset;
3016 MONO_ADD_INS (cfg->cbb, load_inst);
3018 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3021 emit_write_barrier (cfg, iargs [0], load_inst);
3023 offset += SIZEOF_VOID_P;
3024 size -= SIZEOF_VOID_P;
3027 /*tmp += sizeof (void*)*/
3028 if (size >= SIZEOF_VOID_P) {
3029 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3030 MONO_ADD_INS (cfg->cbb, iargs [0]);
3034 /* Those cannot be references since size < sizeof (void*) */
3036 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3037 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3043 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3050 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3060 * Emit code to copy a valuetype of type @klass whose address is stored in
3061 * @src->dreg to memory whose address is stored at @dest->dreg.
3064 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3066 MonoInst *iargs [4];
3067 int context_used, n;
3069 MonoMethod *memcpy_method;
3070 MonoInst *size_ins = NULL;
3071 MonoInst *memcpy_ins = NULL;
3075 * This check breaks with spilled vars... need to handle it during verification anyway.
3076 * g_assert (klass && klass == src->klass && klass == dest->klass);
3079 if (mini_is_gsharedvt_klass (cfg, klass)) {
3081 context_used = mini_class_check_context_used (cfg, klass);
3082 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3083 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3087 n = mono_class_native_size (klass, &align);
3089 n = mono_class_value_size (klass, &align);
3091 /* if native is true there should be no references in the struct */
3092 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3093 /* Avoid barriers when storing to the stack */
3094 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3095 (dest->opcode == OP_LDADDR))) {
3101 context_used = mini_class_check_context_used (cfg, klass);
3103 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3104 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3106 } else if (context_used) {
3107 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3109 if (cfg->compile_aot) {
3110 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3112 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3113 mono_class_compute_gc_descriptor (klass);
3118 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3120 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3125 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3126 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3127 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3132 iargs [2] = size_ins;
3134 EMIT_NEW_ICONST (cfg, iargs [2], n);
3136 memcpy_method = get_memcpy_method ();
3138 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3140 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3145 get_memset_method (void)
3147 static MonoMethod *memset_method = NULL;
3148 if (!memset_method) {
3149 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3151 g_error ("Old corlib found. Install a new one");
3153 return memset_method;
3157 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3159 MonoInst *iargs [3];
3160 int n, context_used;
3162 MonoMethod *memset_method;
3163 MonoInst *size_ins = NULL;
3164 MonoInst *bzero_ins = NULL;
3165 static MonoMethod *bzero_method;
3167 /* FIXME: Optimize this for the case when dest is an LDADDR */
3169 mono_class_init (klass);
3170 if (mini_is_gsharedvt_klass (cfg, klass)) {
3171 context_used = mini_class_check_context_used (cfg, klass);
3172 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3173 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3175 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3176 g_assert (bzero_method);
3178 iargs [1] = size_ins;
3179 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3183 n = mono_class_value_size (klass, &align);
3185 if (n <= sizeof (gpointer) * 5) {
3186 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3189 memset_method = get_memset_method ();
3191 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3192 EMIT_NEW_ICONST (cfg, iargs [2], n);
3193 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3198 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3200 MonoInst *this = NULL;
3202 g_assert (cfg->generic_sharing_context);
3204 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3205 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3206 !method->klass->valuetype)
3207 EMIT_NEW_ARGLOAD (cfg, this, 0);
3209 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3210 MonoInst *mrgctx_loc, *mrgctx_var;
3213 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3215 mrgctx_loc = mono_get_vtable_var (cfg);
3216 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3219 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3220 MonoInst *vtable_loc, *vtable_var;
3224 vtable_loc = mono_get_vtable_var (cfg);
3225 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3227 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3228 MonoInst *mrgctx_var = vtable_var;
3231 vtable_reg = alloc_preg (cfg);
3232 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3233 vtable_var->type = STACK_PTR;
3241 vtable_reg = alloc_preg (cfg);
3242 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3247 static MonoJumpInfoRgctxEntry *
3248 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3250 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3251 res->method = method;
3252 res->in_mrgctx = in_mrgctx;
3253 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3254 res->data->type = patch_type;
3255 res->data->data.target = patch_data;
3256 res->info_type = info_type;
3261 static inline MonoInst*
3262 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3264 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3269 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3271 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3272 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3274 return emit_rgctx_fetch (cfg, rgctx, entry);
3278 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3279 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3281 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3282 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3284 return emit_rgctx_fetch (cfg, rgctx, entry);
3288 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3289 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3291 MonoJumpInfoGSharedVtCall *call_info;
3292 MonoJumpInfoRgctxEntry *entry;
3295 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3296 call_info->sig = sig;
3297 call_info->method = cmethod;
3299 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3300 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3302 return emit_rgctx_fetch (cfg, rgctx, entry);
3307 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3308 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3310 MonoJumpInfoRgctxEntry *entry;
3313 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3314 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3316 return emit_rgctx_fetch (cfg, rgctx, entry);
3320 * emit_get_rgctx_method:
3322 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3323 * normal constants, else emit a load from the rgctx.
3326 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3327 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3329 if (!context_used) {
3332 switch (rgctx_type) {
3333 case MONO_RGCTX_INFO_METHOD:
3334 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3336 case MONO_RGCTX_INFO_METHOD_RGCTX:
3337 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3340 g_assert_not_reached ();
3343 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3344 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3346 return emit_rgctx_fetch (cfg, rgctx, entry);
3351 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3352 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3354 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3355 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3357 return emit_rgctx_fetch (cfg, rgctx, entry);
3361 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3363 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3364 MonoRuntimeGenericContextInfoTemplate *template;
3369 for (i = 0; i < info->num_entries; ++i) {
3370 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3372 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3376 if (info->num_entries == info->count_entries) {
3377 MonoRuntimeGenericContextInfoTemplate *new_entries;
3378 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3380 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3382 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3383 info->entries = new_entries;
3384 info->count_entries = new_count_entries;
3387 idx = info->num_entries;
3388 template = &info->entries [idx];
3389 template->info_type = rgctx_type;
3390 template->data = data;
3392 info->num_entries ++;
3398 * emit_get_gsharedvt_info:
3400 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3403 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3408 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3409 /* Load info->entries [idx] */
3410 dreg = alloc_preg (cfg);
3411 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3417 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3419 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3423 * On return the caller must check @klass for load errors.
3426 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3428 MonoInst *vtable_arg;
3432 context_used = mini_class_check_context_used (cfg, klass);
3435 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3436 klass, MONO_RGCTX_INFO_VTABLE);
3438 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3442 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3445 if (COMPILE_LLVM (cfg))
3446 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3448 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3449 #ifdef MONO_ARCH_VTABLE_REG
3450 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3451 cfg->uses_vtable_reg = TRUE;
3458 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3462 if (cfg->gen_seq_points && cfg->method == method) {
3463 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3465 ins->flags |= MONO_INST_NONEMPTY_STACK;
3466 MONO_ADD_INS (cfg->cbb, ins);
3471 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3473 if (mini_get_debug_options ()->better_cast_details) {
3474 int to_klass_reg = alloc_preg (cfg);
3475 int vtable_reg = alloc_preg (cfg);
3476 int klass_reg = alloc_preg (cfg);
3477 MonoBasicBlock *is_null_bb = NULL;
3481 NEW_BBLOCK (cfg, is_null_bb);
3483 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3487 tls_get = mono_get_jit_tls_intrinsic (cfg);
3489 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3493 MONO_ADD_INS (cfg->cbb, tls_get);
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3497 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3498 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3499 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3502 MONO_START_BB (cfg, is_null_bb);
3504 *out_bblock = cfg->cbb;
3510 reset_cast_details (MonoCompile *cfg)
3512 /* Reset the variables holding the cast details */
3513 if (mini_get_debug_options ()->better_cast_details) {
3514 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3516 MONO_ADD_INS (cfg->cbb, tls_get);
3517 /* It is enough to reset the from field */
3518 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3523 * On return the caller must check @array_class for load errors
3526 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3528 int vtable_reg = alloc_preg (cfg);
3531 context_used = mini_class_check_context_used (cfg, array_class);
3533 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3535 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3537 if (cfg->opt & MONO_OPT_SHARED) {
3538 int class_reg = alloc_preg (cfg);
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3540 if (cfg->compile_aot) {
3541 int klass_reg = alloc_preg (cfg);
3542 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3543 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3547 } else if (context_used) {
3548 MonoInst *vtable_ins;
3550 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3553 if (cfg->compile_aot) {
3557 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3559 vt_reg = alloc_preg (cfg);
3560 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3561 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3564 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3570 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3572 reset_cast_details (cfg);
3576 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3577 * generic code is generated.
3580 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3582 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3585 MonoInst *rgctx, *addr;
3587 /* FIXME: What if the class is shared? We might not
3588 have to get the address of the method from the
3590 addr = emit_get_rgctx_method (cfg, context_used, method,
3591 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3593 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3595 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3597 gboolean pass_vtable, pass_mrgctx;
3598 MonoInst *rgctx_arg = NULL;
3600 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3601 g_assert (!pass_mrgctx);
3604 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3607 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3610 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3615 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3619 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3620 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3621 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3622 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3624 obj_reg = sp [0]->dreg;
3625 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3626 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3628 /* FIXME: generics */
3629 g_assert (klass->rank == 0);
3632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3633 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3639 MonoInst *element_class;
3641 /* This assertion is from the unboxcast insn */
3642 g_assert (klass->rank == 0);
3644 element_class = emit_get_rgctx_klass (cfg, context_used,
3645 klass->element_class, MONO_RGCTX_INFO_KLASS);
3647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3650 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3651 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3652 reset_cast_details (cfg);
3655 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3656 MONO_ADD_INS (cfg->cbb, add);
3657 add->type = STACK_MP;
3664 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3666 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3667 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3671 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3677 args [1] = klass_inst;
3680 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3682 NEW_BBLOCK (cfg, is_ref_bb);
3683 NEW_BBLOCK (cfg, is_nullable_bb);
3684 NEW_BBLOCK (cfg, end_bb);
3685 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3692 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3693 addr_reg = alloc_dreg (cfg, STACK_MP);
3697 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3698 MONO_ADD_INS (cfg->cbb, addr);
3700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3703 MONO_START_BB (cfg, is_ref_bb);
3705 /* Save the ref to a temporary */
3706 dreg = alloc_ireg (cfg);
3707 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3708 addr->dreg = addr_reg;
3709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3713 MONO_START_BB (cfg, is_nullable_bb);
3716 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3717 MonoInst *unbox_call;
3718 MonoMethodSignature *unbox_sig;
3721 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3723 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3724 unbox_sig->ret = &klass->byval_arg;
3725 unbox_sig->param_count = 1;
3726 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3727 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3729 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3730 addr->dreg = addr_reg;
3733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3736 MONO_START_BB (cfg, end_bb);
3739 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3741 *out_cbb = cfg->cbb;
3747 * Returns NULL and set the cfg exception on error.
3750 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3752 MonoInst *iargs [2];
3758 MonoInst *iargs [2];
3760 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3762 if (cfg->opt & MONO_OPT_SHARED)
3763 rgctx_info = MONO_RGCTX_INFO_KLASS;
3765 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3766 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3768 if (cfg->opt & MONO_OPT_SHARED) {
3769 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3771 alloc_ftn = mono_object_new;
3774 alloc_ftn = mono_object_new_specific;
3777 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3778 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3780 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3783 if (cfg->opt & MONO_OPT_SHARED) {
3784 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3785 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3787 alloc_ftn = mono_object_new;
3788 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3789 /* This happens often in argument checking code, eg. throw new FooException... */
3790 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3791 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3792 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3794 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3795 MonoMethod *managed_alloc = NULL;
3799 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3800 cfg->exception_ptr = klass;
3804 #ifndef MONO_CROSS_COMPILE
3805 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3808 if (managed_alloc) {
3809 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3810 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3812 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3814 guint32 lw = vtable->klass->instance_size;
3815 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3816 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3817 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3820 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3824 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3828 * Returns NULL and set the cfg exception on error.
3831 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3833 MonoInst *alloc, *ins;
3835 *out_cbb = cfg->cbb;
3837 if (mono_class_is_nullable (klass)) {
3838 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3841 /* FIXME: What if the class is shared? We might not
3842 have to get the method address from the RGCTX. */
3843 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3844 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3845 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3847 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3849 gboolean pass_vtable, pass_mrgctx;
3850 MonoInst *rgctx_arg = NULL;
3852 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3853 g_assert (!pass_mrgctx);
3856 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3859 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3862 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3866 if (mini_is_gsharedvt_klass (cfg, klass)) {
3867 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3868 MonoInst *res, *is_ref, *src_var, *addr;
3871 dreg = alloc_ireg (cfg);
3873 NEW_BBLOCK (cfg, is_ref_bb);
3874 NEW_BBLOCK (cfg, is_nullable_bb);
3875 NEW_BBLOCK (cfg, end_bb);
3876 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3878 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3884 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3887 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3888 ins->opcode = OP_STOREV_MEMBASE;
3890 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3891 res->type = STACK_OBJ;
3893 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3896 MONO_START_BB (cfg, is_ref_bb);
3897 addr_reg = alloc_ireg (cfg);
3899 /* val is a vtype, so has to load the value manually */
3900 src_var = get_vreg_to_inst (cfg, val->dreg);
3902 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3903 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3905 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3908 MONO_START_BB (cfg, is_nullable_bb);
3911 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3912 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3914 MonoMethodSignature *box_sig;
3917 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3918 * construct that method at JIT time, so have to do things by hand.
3920 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3921 box_sig->ret = &mono_defaults.object_class->byval_arg;
3922 box_sig->param_count = 1;
3923 box_sig->params [0] = &klass->byval_arg;
3924 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3925 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3926 res->type = STACK_OBJ;
3930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3932 MONO_START_BB (cfg, end_bb);
3934 *out_cbb = cfg->cbb;
3938 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3942 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3949 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3952 MonoGenericContainer *container;
3953 MonoGenericInst *ginst;
3955 if (klass->generic_class) {
3956 container = klass->generic_class->container_class->generic_container;
3957 ginst = klass->generic_class->context.class_inst;
3958 } else if (klass->generic_container && context_used) {
3959 container = klass->generic_container;
3960 ginst = container->context.class_inst;
3965 for (i = 0; i < container->type_argc; ++i) {
3967 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3969 type = ginst->type_argv [i];
3970 if (mini_type_is_reference (cfg, type))
3976 // FIXME: This doesn't work yet (class libs tests fail?)
3977 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3980 * Returns NULL and set the cfg exception on error.
3983 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3985 MonoBasicBlock *is_null_bb;
3986 int obj_reg = src->dreg;
3987 int vtable_reg = alloc_preg (cfg);
3988 MonoInst *klass_inst = NULL;
3993 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
3994 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3995 MonoInst *cache_ins;
3997 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4002 /* klass - it's the second element of the cache entry*/
4003 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4006 args [2] = cache_ins;
4008 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
4011 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4014 NEW_BBLOCK (cfg, is_null_bb);
4016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4017 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4019 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4021 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4022 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4023 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4025 int klass_reg = alloc_preg (cfg);
4027 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4029 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4030 /* the remoting code is broken, access the class for now */
4031 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4032 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4034 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4035 cfg->exception_ptr = klass;
4038 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4040 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4041 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4043 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4045 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4046 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4050 MONO_START_BB (cfg, is_null_bb);
4052 reset_cast_details (cfg);
4058 * Returns NULL and set the cfg exception on error.
4061 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4064 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4065 int obj_reg = src->dreg;
4066 int vtable_reg = alloc_preg (cfg);
4067 int res_reg = alloc_ireg_ref (cfg);
4068 MonoInst *klass_inst = NULL;
4073 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4074 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4075 MonoInst *cache_ins;
4077 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4082 /* klass - it's the second element of the cache entry*/
4083 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4086 args [2] = cache_ins;
4088 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4091 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4094 NEW_BBLOCK (cfg, is_null_bb);
4095 NEW_BBLOCK (cfg, false_bb);
4096 NEW_BBLOCK (cfg, end_bb);
4098 /* Do the assignment at the beginning, so the other assignment can be if converted */
4099 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4100 ins->type = STACK_OBJ;
4103 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4104 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4106 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4108 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4109 g_assert (!context_used);
4110 /* the is_null_bb target simply copies the input register to the output */
4111 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4113 int klass_reg = alloc_preg (cfg);
4116 int rank_reg = alloc_preg (cfg);
4117 int eclass_reg = alloc_preg (cfg);
4119 g_assert (!context_used);
4120 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4123 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4124 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4125 if (klass->cast_class == mono_defaults.object_class) {
4126 int parent_reg = alloc_preg (cfg);
4127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4128 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4129 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4130 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4131 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4132 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4133 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4134 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4135 } else if (klass->cast_class == mono_defaults.enum_class) {
4136 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4138 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4139 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4141 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4142 /* Check that the object is a vector too */
4143 int bounds_reg = alloc_preg (cfg);
4144 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4149 /* the is_null_bb target simply copies the input register to the output */
4150 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4152 } else if (mono_class_is_nullable (klass)) {
4153 g_assert (!context_used);
4154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4155 /* the is_null_bb target simply copies the input register to the output */
4156 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4158 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4159 g_assert (!context_used);
4160 /* the remoting code is broken, access the class for now */
4161 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4162 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4164 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4165 cfg->exception_ptr = klass;
4168 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4170 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4174 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4176 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4177 /* the is_null_bb target simply copies the input register to the output */
4178 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4183 MONO_START_BB (cfg, false_bb);
4185 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4186 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4188 MONO_START_BB (cfg, is_null_bb);
4190 MONO_START_BB (cfg, end_bb);
4196 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4198 /* This opcode takes as input an object reference and a class, and returns:
4199 0) if the object is an instance of the class,
4200 1) if the object is not instance of the class,
4201 2) if the object is a proxy whose type cannot be determined */
4204 #ifndef DISABLE_REMOTING
4205 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4207 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4209 int obj_reg = src->dreg;
4210 int dreg = alloc_ireg (cfg);
4212 #ifndef DISABLE_REMOTING
4213 int klass_reg = alloc_preg (cfg);
4216 NEW_BBLOCK (cfg, true_bb);
4217 NEW_BBLOCK (cfg, false_bb);
4218 NEW_BBLOCK (cfg, end_bb);
4219 #ifndef DISABLE_REMOTING
4220 NEW_BBLOCK (cfg, false2_bb);
4221 NEW_BBLOCK (cfg, no_proxy_bb);
4224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4228 #ifndef DISABLE_REMOTING
4229 NEW_BBLOCK (cfg, interface_fail_bb);
4232 tmp_reg = alloc_preg (cfg);
4233 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4234 #ifndef DISABLE_REMOTING
4235 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4236 MONO_START_BB (cfg, interface_fail_bb);
4237 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4239 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4241 tmp_reg = alloc_preg (cfg);
4242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4243 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4244 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4246 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4249 #ifndef DISABLE_REMOTING
4250 tmp_reg = alloc_preg (cfg);
4251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4252 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4254 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4255 tmp_reg = alloc_preg (cfg);
4256 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4257 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4259 tmp_reg = alloc_preg (cfg);
4260 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4262 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4264 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4267 MONO_START_BB (cfg, no_proxy_bb);
4269 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4271 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4275 MONO_START_BB (cfg, false_bb);
4277 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4278 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4280 #ifndef DISABLE_REMOTING
4281 MONO_START_BB (cfg, false2_bb);
4283 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4284 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4287 MONO_START_BB (cfg, true_bb);
4289 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4291 MONO_START_BB (cfg, end_bb);
4294 MONO_INST_NEW (cfg, ins, OP_ICONST);
4296 ins->type = STACK_I4;
4302 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4304 /* This opcode takes as input an object reference and a class, and returns:
4305 0) if the object is an instance of the class,
4306 1) if the object is a proxy whose type cannot be determined
4307 an InvalidCastException exception is thrown otherwhise*/
4310 #ifndef DISABLE_REMOTING
4311 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4313 MonoBasicBlock *ok_result_bb;
4315 int obj_reg = src->dreg;
4316 int dreg = alloc_ireg (cfg);
4317 int tmp_reg = alloc_preg (cfg);
4319 #ifndef DISABLE_REMOTING
4320 int klass_reg = alloc_preg (cfg);
4321 NEW_BBLOCK (cfg, end_bb);
4324 NEW_BBLOCK (cfg, ok_result_bb);
4326 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4327 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4329 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4331 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4332 #ifndef DISABLE_REMOTING
4333 NEW_BBLOCK (cfg, interface_fail_bb);
4335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4336 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4337 MONO_START_BB (cfg, interface_fail_bb);
4338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4340 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4342 tmp_reg = alloc_preg (cfg);
4343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4344 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4345 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4347 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4351 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4352 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4355 #ifndef DISABLE_REMOTING
4356 NEW_BBLOCK (cfg, no_proxy_bb);
4358 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4359 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4360 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4362 tmp_reg = alloc_preg (cfg);
4363 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4366 tmp_reg = alloc_preg (cfg);
4367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4368 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4371 NEW_BBLOCK (cfg, fail_1_bb);
4373 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4375 MONO_START_BB (cfg, fail_1_bb);
4377 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4378 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4380 MONO_START_BB (cfg, no_proxy_bb);
4382 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4384 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4388 MONO_START_BB (cfg, ok_result_bb);
4390 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4392 #ifndef DISABLE_REMOTING
4393 MONO_START_BB (cfg, end_bb);
4397 MONO_INST_NEW (cfg, ins, OP_ICONST);
4399 ins->type = STACK_I4;
4405 * Returns NULL and set the cfg exception on error.
4407 static G_GNUC_UNUSED MonoInst*
4408 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4412 gpointer *trampoline;
4413 MonoInst *obj, *method_ins, *tramp_ins;
4417 obj = handle_alloc (cfg, klass, FALSE, 0);
4421 /* Inline the contents of mono_delegate_ctor */
4423 /* Set target field */
4424 /* Optimize away setting of NULL target */
4425 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4426 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4427 if (cfg->gen_write_barriers) {
4428 dreg = alloc_preg (cfg);
4429 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4430 emit_write_barrier (cfg, ptr, target);
4434 /* Set method field */
4435 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4436 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4437 if (cfg->gen_write_barriers) {
4438 dreg = alloc_preg (cfg);
4439 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4440 emit_write_barrier (cfg, ptr, method_ins);
4443 * To avoid looking up the compiled code belonging to the target method
4444 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4445 * store it, and we fill it after the method has been compiled.
4447 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4448 MonoInst *code_slot_ins;
4451 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4453 domain = mono_domain_get ();
4454 mono_domain_lock (domain);
4455 if (!domain_jit_info (domain)->method_code_hash)
4456 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4457 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4459 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4460 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4462 mono_domain_unlock (domain);
4464 if (cfg->compile_aot)
4465 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4467 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4469 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4472 /* Set invoke_impl field */
4473 if (cfg->compile_aot) {
4474 MonoClassMethodPair *del_tramp;
4476 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4477 del_tramp->klass = klass;
4478 del_tramp->method = context_used ? NULL : method;
4479 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4481 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4482 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4484 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4486 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4492 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4494 MonoJitICallInfo *info;
4496 /* Need to register the icall so it gets an icall wrapper */
4497 info = mono_get_array_new_va_icall (rank);
4499 cfg->flags |= MONO_CFG_HAS_VARARGS;
4501 /* mono_array_new_va () needs a vararg calling convention */
4502 cfg->disable_llvm = TRUE;
4504 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4505 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4509 mono_emit_load_got_addr (MonoCompile *cfg)
4511 MonoInst *getaddr, *dummy_use;
4513 if (!cfg->got_var || cfg->got_var_allocated)
4516 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4517 getaddr->cil_code = cfg->header->code;
4518 getaddr->dreg = cfg->got_var->dreg;
4520 /* Add it to the start of the first bblock */
4521 if (cfg->bb_entry->code) {
4522 getaddr->next = cfg->bb_entry->code;
4523 cfg->bb_entry->code = getaddr;
4526 MONO_ADD_INS (cfg->bb_entry, getaddr);
4528 cfg->got_var_allocated = TRUE;
4531 * Add a dummy use to keep the got_var alive, since real uses might
4532 * only be generated by the back ends.
4533 * Add it to end_bblock, so the variable's lifetime covers the whole
4535 * It would be better to make the usage of the got var explicit in all
4536 * cases when the backend needs it (i.e. calls, throw etc.), so this
4537 * wouldn't be needed.
4539 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4540 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4543 static int inline_limit;
4544 static gboolean inline_limit_inited;
4547 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4549 MonoMethodHeaderSummary header;
4551 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4552 MonoMethodSignature *sig = mono_method_signature (method);
4556 if (cfg->generic_sharing_context)
4559 if (cfg->inline_depth > 10)
4562 #ifdef MONO_ARCH_HAVE_LMF_OPS
4563 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4564 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4565 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4570 if (!mono_method_get_header_summary (method, &header))
4573 /*runtime, icall and pinvoke are checked by summary call*/
4574 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4575 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4576 (mono_class_is_marshalbyref (method->klass)) ||
4580 /* also consider num_locals? */
4581 /* Do the size check early to avoid creating vtables */
4582 if (!inline_limit_inited) {
4583 if (g_getenv ("MONO_INLINELIMIT"))
4584 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4586 inline_limit = INLINE_LENGTH_LIMIT;
4587 inline_limit_inited = TRUE;
4589 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4593 * if we can initialize the class of the method right away, we do,
4594 * otherwise we don't allow inlining if the class needs initialization,
4595 * since it would mean inserting a call to mono_runtime_class_init()
4596 * inside the inlined code
4598 if (!(cfg->opt & MONO_OPT_SHARED)) {
4599 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4600 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4601 vtable = mono_class_vtable (cfg->domain, method->klass);
4604 if (!cfg->compile_aot)
4605 mono_runtime_class_init (vtable);
4606 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4607 if (cfg->run_cctors && method->klass->has_cctor) {
4608 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4609 if (!method->klass->runtime_info)
4610 /* No vtable created yet */
4612 vtable = mono_class_vtable (cfg->domain, method->klass);
4615 /* This makes so that inline cannot trigger */
4616 /* .cctors: too many apps depend on them */
4617 /* running with a specific order... */
4618 if (! vtable->initialized)
4620 mono_runtime_class_init (vtable);
4622 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4623 if (!method->klass->runtime_info)
4624 /* No vtable created yet */
4626 vtable = mono_class_vtable (cfg->domain, method->klass);
4629 if (!vtable->initialized)
4634 * If we're compiling for shared code
4635 * the cctor will need to be run at aot method load time, for example,
4636 * or at the end of the compilation of the inlining method.
4638 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4643 * CAS - do not inline methods with declarative security
4644 * Note: this has to be before any possible return TRUE;
4646 if (mono_security_method_has_declsec (method))
4649 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4650 if (mono_arch_is_soft_float ()) {
4652 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4654 for (i = 0; i < sig->param_count; ++i)
4655 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4664 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4666 if (!cfg->compile_aot) {
4668 if (vtable->initialized)
4672 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4673 if (cfg->method == method)
4677 if (!mono_class_needs_cctor_run (klass, method))
4680 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4681 /* The initialization is already done before the method is called */
4688 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4692 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4695 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4698 mono_class_init (klass);
4699 size = mono_class_array_element_size (klass);
4702 mult_reg = alloc_preg (cfg);
4703 array_reg = arr->dreg;
4704 index_reg = index->dreg;
4706 #if SIZEOF_REGISTER == 8
4707 /* The array reg is 64 bits but the index reg is only 32 */
4708 if (COMPILE_LLVM (cfg)) {
4710 index2_reg = index_reg;
4712 index2_reg = alloc_preg (cfg);
4713 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4716 if (index->type == STACK_I8) {
4717 index2_reg = alloc_preg (cfg);
4718 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4720 index2_reg = index_reg;
4725 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4727 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4728 if (size == 1 || size == 2 || size == 4 || size == 8) {
4729 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4731 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4732 ins->klass = mono_class_get_element_class (klass);
4733 ins->type = STACK_MP;
4739 add_reg = alloc_ireg_mp (cfg);
4742 MonoInst *rgctx_ins;
4745 g_assert (cfg->generic_sharing_context);
4746 context_used = mini_class_check_context_used (cfg, klass);
4747 g_assert (context_used);
4748 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4749 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4751 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4753 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4754 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4755 ins->klass = mono_class_get_element_class (klass);
4756 ins->type = STACK_MP;
4757 MONO_ADD_INS (cfg->cbb, ins);
4762 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4764 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4766 int bounds_reg = alloc_preg (cfg);
4767 int add_reg = alloc_ireg_mp (cfg);
4768 int mult_reg = alloc_preg (cfg);
4769 int mult2_reg = alloc_preg (cfg);
4770 int low1_reg = alloc_preg (cfg);
4771 int low2_reg = alloc_preg (cfg);
4772 int high1_reg = alloc_preg (cfg);
4773 int high2_reg = alloc_preg (cfg);
4774 int realidx1_reg = alloc_preg (cfg);
4775 int realidx2_reg = alloc_preg (cfg);
4776 int sum_reg = alloc_preg (cfg);
4777 int index1, index2, tmpreg;
4781 mono_class_init (klass);
4782 size = mono_class_array_element_size (klass);
4784 index1 = index_ins1->dreg;
4785 index2 = index_ins2->dreg;
4787 #if SIZEOF_REGISTER == 8
4788 /* The array reg is 64 bits but the index reg is only 32 */
4789 if (COMPILE_LLVM (cfg)) {
4792 tmpreg = alloc_preg (cfg);
4793 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4795 tmpreg = alloc_preg (cfg);
4796 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4800 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4804 /* range checking */
4805 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4806 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4808 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4809 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4810 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4811 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4812 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4813 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4814 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4816 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4817 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4818 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4819 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4820 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4821 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4822 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4824 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4825 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4826 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4827 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4828 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4830 ins->type = STACK_MP;
4832 MONO_ADD_INS (cfg->cbb, ins);
4839 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4843 MonoMethod *addr_method;
4846 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4849 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4851 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4852 /* emit_ldelema_2 depends on OP_LMUL */
4853 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4854 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4858 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4859 addr_method = mono_marshal_get_array_address (rank, element_size);
4860 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4865 static MonoBreakPolicy
4866 always_insert_breakpoint (MonoMethod *method)
4868 return MONO_BREAK_POLICY_ALWAYS;
4871 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4874 * mono_set_break_policy:
4875 * policy_callback: the new callback function
4877 * Allow embedders to decide wherther to actually obey breakpoint instructions
4878 * (both break IL instructions and Debugger.Break () method calls), for example
4879 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4880 * untrusted or semi-trusted code.
4882 * @policy_callback will be called every time a break point instruction needs to
4883 * be inserted with the method argument being the method that calls Debugger.Break()
4884 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4885 * if it wants the breakpoint to not be effective in the given method.
4886 * #MONO_BREAK_POLICY_ALWAYS is the default.
4889 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4891 if (policy_callback)
4892 break_policy_func = policy_callback;
4894 break_policy_func = always_insert_breakpoint;
4898 should_insert_brekpoint (MonoMethod *method) {
4899 switch (break_policy_func (method)) {
4900 case MONO_BREAK_POLICY_ALWAYS:
4902 case MONO_BREAK_POLICY_NEVER:
4904 case MONO_BREAK_POLICY_ON_DBG:
4905 g_warning ("mdb no longer supported");
4908 g_warning ("Incorrect value returned from break policy callback");
4913 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4915 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4917 MonoInst *addr, *store, *load;
4918 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4920 /* the bounds check is already done by the callers */
4921 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4923 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4924 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4925 if (mini_type_is_reference (cfg, fsig->params [2]))
4926 emit_write_barrier (cfg, addr, load);
4928 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4929 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4936 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4938 return mini_type_is_reference (cfg, &klass->byval_arg);
4942 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4944 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4945 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4946 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4947 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4948 MonoInst *iargs [3];
4951 mono_class_setup_vtable (obj_array);
4952 g_assert (helper->slot);
4954 if (sp [0]->type != STACK_OBJ)
4956 if (sp [2]->type != STACK_OBJ)
4963 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4967 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4970 // FIXME-VT: OP_ICONST optimization
4971 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4972 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4973 ins->opcode = OP_STOREV_MEMBASE;
4974 } else if (sp [1]->opcode == OP_ICONST) {
4975 int array_reg = sp [0]->dreg;
4976 int index_reg = sp [1]->dreg;
4977 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4980 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4981 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4983 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4984 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4985 if (generic_class_is_reference_type (cfg, klass))
4986 emit_write_barrier (cfg, addr, sp [2]);
4993 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4998 eklass = mono_class_from_mono_type (fsig->params [2]);
5000 eklass = mono_class_from_mono_type (fsig->ret);
5004 return emit_array_store (cfg, eklass, args, FALSE);
5006 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5007 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5013 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5015 #ifdef MONO_ARCH_SIMD_INTRINSICS
5016 MonoInst *ins = NULL;
5018 if (cfg->opt & MONO_OPT_SIMD) {
5019 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5025 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5029 emit_memory_barrier (MonoCompile *cfg, int kind)
5031 MonoInst *ins = NULL;
5032 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5033 MONO_ADD_INS (cfg->cbb, ins);
5034 ins->backend.memory_barrier_kind = kind;
5040 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5042 MonoInst *ins = NULL;
5045 /* The LLVM backend supports these intrinsics */
5046 if (cmethod->klass == mono_defaults.math_class) {
5047 if (strcmp (cmethod->name, "Sin") == 0) {
5049 } else if (strcmp (cmethod->name, "Cos") == 0) {
5051 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5053 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5058 MONO_INST_NEW (cfg, ins, opcode);
5059 ins->type = STACK_R8;
5060 ins->dreg = mono_alloc_freg (cfg);
5061 ins->sreg1 = args [0]->dreg;
5062 MONO_ADD_INS (cfg->cbb, ins);
5066 if (cfg->opt & MONO_OPT_CMOV) {
5067 if (strcmp (cmethod->name, "Min") == 0) {
5068 if (fsig->params [0]->type == MONO_TYPE_I4)
5070 if (fsig->params [0]->type == MONO_TYPE_U4)
5071 opcode = OP_IMIN_UN;
5072 else if (fsig->params [0]->type == MONO_TYPE_I8)
5074 else if (fsig->params [0]->type == MONO_TYPE_U8)
5075 opcode = OP_LMIN_UN;
5076 } else if (strcmp (cmethod->name, "Max") == 0) {
5077 if (fsig->params [0]->type == MONO_TYPE_I4)
5079 if (fsig->params [0]->type == MONO_TYPE_U4)
5080 opcode = OP_IMAX_UN;
5081 else if (fsig->params [0]->type == MONO_TYPE_I8)
5083 else if (fsig->params [0]->type == MONO_TYPE_U8)
5084 opcode = OP_LMAX_UN;
5089 MONO_INST_NEW (cfg, ins, opcode);
5090 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5091 ins->dreg = mono_alloc_ireg (cfg);
5092 ins->sreg1 = args [0]->dreg;
5093 ins->sreg2 = args [1]->dreg;
5094 MONO_ADD_INS (cfg->cbb, ins);
5102 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5104 if (cmethod->klass == mono_defaults.array_class) {
5105 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5106 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5107 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5108 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5115 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5117 MonoInst *ins = NULL;
5119 static MonoClass *runtime_helpers_class = NULL;
5120 if (! runtime_helpers_class)
5121 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5122 "System.Runtime.CompilerServices", "RuntimeHelpers");
5124 if (cmethod->klass == mono_defaults.string_class) {
5125 if (strcmp (cmethod->name, "get_Chars") == 0) {
5126 int dreg = alloc_ireg (cfg);
5127 int index_reg = alloc_preg (cfg);
5128 int mult_reg = alloc_preg (cfg);
5129 int add_reg = alloc_preg (cfg);
5131 #if SIZEOF_REGISTER == 8
5132 /* The array reg is 64 bits but the index reg is only 32 */
5133 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5135 index_reg = args [1]->dreg;
5137 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5139 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5140 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5141 add_reg = ins->dreg;
5142 /* Avoid a warning */
5144 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5147 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5148 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5149 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5150 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5152 type_from_op (ins, NULL, NULL);
5154 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5155 int dreg = alloc_ireg (cfg);
5156 /* Decompose later to allow more optimizations */
5157 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5158 ins->type = STACK_I4;
5159 ins->flags |= MONO_INST_FAULT;
5160 cfg->cbb->has_array_access = TRUE;
5161 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5164 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5165 int mult_reg = alloc_preg (cfg);
5166 int add_reg = alloc_preg (cfg);
5168 /* The corlib functions check for oob already. */
5169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5170 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5171 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5172 return cfg->cbb->last_ins;
5175 } else if (cmethod->klass == mono_defaults.object_class) {
5177 if (strcmp (cmethod->name, "GetType") == 0) {
5178 int dreg = alloc_ireg_ref (cfg);
5179 int vt_reg = alloc_preg (cfg);
5180 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5181 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5182 type_from_op (ins, NULL, NULL);
5185 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5186 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5187 int dreg = alloc_ireg (cfg);
5188 int t1 = alloc_ireg (cfg);
5190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5191 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5192 ins->type = STACK_I4;
5196 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5197 MONO_INST_NEW (cfg, ins, OP_NOP);
5198 MONO_ADD_INS (cfg->cbb, ins);
5202 } else if (cmethod->klass == mono_defaults.array_class) {
5203 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5204 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5206 #ifndef MONO_BIG_ARRAYS
5208 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5211 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5212 int dreg = alloc_ireg (cfg);
5213 int bounds_reg = alloc_ireg_mp (cfg);
5214 MonoBasicBlock *end_bb, *szarray_bb;
5215 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5217 NEW_BBLOCK (cfg, end_bb);
5218 NEW_BBLOCK (cfg, szarray_bb);
5220 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5221 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5224 /* Non-szarray case */
5226 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5227 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5229 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5230 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5231 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5232 MONO_START_BB (cfg, szarray_bb);
5235 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5236 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5238 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5239 MONO_START_BB (cfg, end_bb);
5241 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5242 ins->type = STACK_I4;
5248 if (cmethod->name [0] != 'g')
5251 if (strcmp (cmethod->name, "get_Rank") == 0) {
5252 int dreg = alloc_ireg (cfg);
5253 int vtable_reg = alloc_preg (cfg);
5254 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5255 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5256 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5257 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5258 type_from_op (ins, NULL, NULL);
5261 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5262 int dreg = alloc_ireg (cfg);
5264 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5265 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5266 type_from_op (ins, NULL, NULL);
5271 } else if (cmethod->klass == runtime_helpers_class) {
5273 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5274 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5278 } else if (cmethod->klass == mono_defaults.thread_class) {
5279 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5280 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5281 MONO_ADD_INS (cfg->cbb, ins);
5283 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5284 return emit_memory_barrier (cfg, FullBarrier);
5286 } else if (cmethod->klass == mono_defaults.monitor_class) {
5288 /* FIXME this should be integrated to the check below once we support the trampoline version */
5289 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5290 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5291 MonoMethod *fast_method = NULL;
5293 /* Avoid infinite recursion */
5294 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5297 fast_method = mono_monitor_get_fast_path (cmethod);
5301 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5305 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5306 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5309 if (COMPILE_LLVM (cfg)) {
5311 * Pass the argument normally, the LLVM backend will handle the
5312 * calling convention problems.
5314 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5316 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5317 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5318 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5319 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5322 return (MonoInst*)call;
5323 } else if (strcmp (cmethod->name, "Exit") == 0) {
5326 if (COMPILE_LLVM (cfg)) {
5327 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5329 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5330 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5331 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5332 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5335 return (MonoInst*)call;
5337 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5339 MonoMethod *fast_method = NULL;
5341 /* Avoid infinite recursion */
5342 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5343 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5344 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5347 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5348 strcmp (cmethod->name, "Exit") == 0)
5349 fast_method = mono_monitor_get_fast_path (cmethod);
5353 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5356 } else if (cmethod->klass->image == mono_defaults.corlib &&
5357 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5358 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5361 #if SIZEOF_REGISTER == 8
5362 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5363 /* 64 bit reads are already atomic */
5364 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5365 ins->dreg = mono_alloc_preg (cfg);
5366 ins->inst_basereg = args [0]->dreg;
5367 ins->inst_offset = 0;
5368 MONO_ADD_INS (cfg->cbb, ins);
5372 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5373 if (strcmp (cmethod->name, "Increment") == 0) {
5374 MonoInst *ins_iconst;
5377 if (fsig->params [0]->type == MONO_TYPE_I4) {
5378 opcode = OP_ATOMIC_ADD_NEW_I4;
5379 cfg->has_atomic_add_new_i4 = TRUE;
5381 #if SIZEOF_REGISTER == 8
5382 else if (fsig->params [0]->type == MONO_TYPE_I8)
5383 opcode = OP_ATOMIC_ADD_NEW_I8;
5386 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5387 ins_iconst->inst_c0 = 1;
5388 ins_iconst->dreg = mono_alloc_ireg (cfg);
5389 MONO_ADD_INS (cfg->cbb, ins_iconst);
5391 MONO_INST_NEW (cfg, ins, opcode);
5392 ins->dreg = mono_alloc_ireg (cfg);
5393 ins->inst_basereg = args [0]->dreg;
5394 ins->inst_offset = 0;
5395 ins->sreg2 = ins_iconst->dreg;
5396 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5397 MONO_ADD_INS (cfg->cbb, ins);
5399 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5400 MonoInst *ins_iconst;
5403 if (fsig->params [0]->type == MONO_TYPE_I4) {
5404 opcode = OP_ATOMIC_ADD_NEW_I4;
5405 cfg->has_atomic_add_new_i4 = TRUE;
5407 #if SIZEOF_REGISTER == 8
5408 else if (fsig->params [0]->type == MONO_TYPE_I8)
5409 opcode = OP_ATOMIC_ADD_NEW_I8;
5412 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5413 ins_iconst->inst_c0 = -1;
5414 ins_iconst->dreg = mono_alloc_ireg (cfg);
5415 MONO_ADD_INS (cfg->cbb, ins_iconst);
5417 MONO_INST_NEW (cfg, ins, opcode);
5418 ins->dreg = mono_alloc_ireg (cfg);
5419 ins->inst_basereg = args [0]->dreg;
5420 ins->inst_offset = 0;
5421 ins->sreg2 = ins_iconst->dreg;
5422 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5423 MONO_ADD_INS (cfg->cbb, ins);
5425 } else if (strcmp (cmethod->name, "Add") == 0) {
5428 if (fsig->params [0]->type == MONO_TYPE_I4) {
5429 opcode = OP_ATOMIC_ADD_NEW_I4;
5430 cfg->has_atomic_add_new_i4 = TRUE;
5432 #if SIZEOF_REGISTER == 8
5433 else if (fsig->params [0]->type == MONO_TYPE_I8)
5434 opcode = OP_ATOMIC_ADD_NEW_I8;
5438 MONO_INST_NEW (cfg, ins, opcode);
5439 ins->dreg = mono_alloc_ireg (cfg);
5440 ins->inst_basereg = args [0]->dreg;
5441 ins->inst_offset = 0;
5442 ins->sreg2 = args [1]->dreg;
5443 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5444 MONO_ADD_INS (cfg->cbb, ins);
5447 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5449 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5450 if (strcmp (cmethod->name, "Exchange") == 0) {
5452 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5454 if (fsig->params [0]->type == MONO_TYPE_I4) {
5455 opcode = OP_ATOMIC_EXCHANGE_I4;
5456 cfg->has_atomic_exchange_i4 = TRUE;
5458 #if SIZEOF_REGISTER == 8
5459 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5460 (fsig->params [0]->type == MONO_TYPE_I))
5461 opcode = OP_ATOMIC_EXCHANGE_I8;
5463 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5464 opcode = OP_ATOMIC_EXCHANGE_I4;
5465 cfg->has_atomic_exchange_i4 = TRUE;
5471 MONO_INST_NEW (cfg, ins, opcode);
5472 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5473 ins->inst_basereg = args [0]->dreg;
5474 ins->inst_offset = 0;
5475 ins->sreg2 = args [1]->dreg;
5476 MONO_ADD_INS (cfg->cbb, ins);
5478 switch (fsig->params [0]->type) {
5480 ins->type = STACK_I4;
5484 ins->type = STACK_I8;
5486 case MONO_TYPE_OBJECT:
5487 ins->type = STACK_OBJ;
5490 g_assert_not_reached ();
5493 if (cfg->gen_write_barriers && is_ref)
5494 emit_write_barrier (cfg, args [0], args [1]);
5496 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5498 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5499 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5501 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5502 if (fsig->params [1]->type == MONO_TYPE_I4)
5504 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5505 size = sizeof (gpointer);
5506 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5509 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5510 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5511 ins->sreg1 = args [0]->dreg;
5512 ins->sreg2 = args [1]->dreg;
5513 ins->sreg3 = args [2]->dreg;
5514 ins->type = STACK_I4;
5515 MONO_ADD_INS (cfg->cbb, ins);
5516 } else if (size == 8) {
5517 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5518 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5519 ins->sreg1 = args [0]->dreg;
5520 ins->sreg2 = args [1]->dreg;
5521 ins->sreg3 = args [2]->dreg;
5522 ins->type = STACK_I8;
5523 MONO_ADD_INS (cfg->cbb, ins);
5525 /* g_assert_not_reached (); */
5527 if (cfg->gen_write_barriers && is_ref)
5528 emit_write_barrier (cfg, args [0], args [1]);
5530 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5532 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5533 ins = emit_memory_barrier (cfg, FullBarrier);
5537 } else if (cmethod->klass->image == mono_defaults.corlib) {
5538 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5539 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5540 if (should_insert_brekpoint (cfg->method)) {
5541 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5543 MONO_INST_NEW (cfg, ins, OP_NOP);
5544 MONO_ADD_INS (cfg->cbb, ins);
5548 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5549 && strcmp (cmethod->klass->name, "Environment") == 0) {
5551 EMIT_NEW_ICONST (cfg, ins, 1);
5553 EMIT_NEW_ICONST (cfg, ins, 0);
5557 } else if (cmethod->klass == mono_defaults.math_class) {
5559 * There is general branches code for Min/Max, but it does not work for
5561 * http://everything2.com/?node_id=1051618
5563 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5564 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5566 MonoJumpInfoToken *ji;
5569 cfg->disable_llvm = TRUE;
5571 if (args [0]->opcode == OP_GOT_ENTRY) {
5572 pi = args [0]->inst_p1;
5573 g_assert (pi->opcode == OP_PATCH_INFO);
5574 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5577 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5578 ji = args [0]->inst_p0;
5581 NULLIFY_INS (args [0]);
5584 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5585 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5586 ins->dreg = mono_alloc_ireg (cfg);
5588 ins->inst_p0 = mono_string_to_utf8 (s);
5589 MONO_ADD_INS (cfg->cbb, ins);
5594 #ifdef MONO_ARCH_SIMD_INTRINSICS
5595 if (cfg->opt & MONO_OPT_SIMD) {
5596 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5602 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5606 if (COMPILE_LLVM (cfg)) {
5607 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5612 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5616 * This entry point could be used later for arbitrary method
5619 inline static MonoInst*
5620 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5621 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5623 if (method->klass == mono_defaults.string_class) {
5624 /* managed string allocation support */
5625 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5626 MonoInst *iargs [2];
5627 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5628 MonoMethod *managed_alloc = NULL;
5630 g_assert (vtable); /*Should not fail since it System.String*/
5631 #ifndef MONO_CROSS_COMPILE
5632 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5636 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5637 iargs [1] = args [0];
5638 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5645 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5647 MonoInst *store, *temp;
5650 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5651 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5654 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5655 * would be different than the MonoInst's used to represent arguments, and
5656 * the ldelema implementation can't deal with that.
5657 * Solution: When ldelema is used on an inline argument, create a var for
5658 * it, emit ldelema on that var, and emit the saving code below in
5659 * inline_method () if needed.
5661 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5662 cfg->args [i] = temp;
5663 /* This uses cfg->args [i] which is set by the preceeding line */
5664 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5665 store->cil_code = sp [0]->cil_code;
5670 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5671 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5673 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5675 check_inline_called_method_name_limit (MonoMethod *called_method)
5678 static const char *limit = NULL;
5680 if (limit == NULL) {
5681 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5683 if (limit_string != NULL)
5684 limit = limit_string;
5689 if (limit [0] != '\0') {
5690 char *called_method_name = mono_method_full_name (called_method, TRUE);
5692 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5693 g_free (called_method_name);
5695 //return (strncmp_result <= 0);
5696 return (strncmp_result == 0);
5703 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5705 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5708 static const char *limit = NULL;
5710 if (limit == NULL) {
5711 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5712 if (limit_string != NULL) {
5713 limit = limit_string;
5719 if (limit [0] != '\0') {
5720 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5722 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5723 g_free (caller_method_name);
5725 //return (strncmp_result <= 0);
5726 return (strncmp_result == 0);
5734 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5736 static double r8_0 = 0.0;
5740 rtype = mini_replace_type (rtype);
5744 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5745 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5746 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5747 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5748 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5749 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5750 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5751 ins->type = STACK_R8;
5752 ins->inst_p0 = (void*)&r8_0;
5754 MONO_ADD_INS (cfg->cbb, ins);
5755 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5756 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5757 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5758 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5759 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5761 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5766 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5770 rtype = mini_replace_type (rtype);
5774 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5775 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5776 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5777 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5778 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5779 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5780 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5781 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5782 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5783 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5784 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5785 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5787 emit_init_rvar (cfg, dreg, rtype);
5791 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5793 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5795 MonoInst *var = cfg->locals [local];
5796 if (COMPILE_SOFT_FLOAT (cfg)) {
5798 int reg = alloc_dreg (cfg, var->type);
5799 emit_init_rvar (cfg, reg, type);
5800 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5803 emit_init_rvar (cfg, var->dreg, type);
5805 emit_dummy_init_rvar (cfg, var->dreg, type);
5810 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5811 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5813 MonoInst *ins, *rvar = NULL;
5814 MonoMethodHeader *cheader;
5815 MonoBasicBlock *ebblock, *sbblock;
5817 MonoMethod *prev_inlined_method;
5818 MonoInst **prev_locals, **prev_args;
5819 MonoType **prev_arg_types;
5820 guint prev_real_offset;
5821 GHashTable *prev_cbb_hash;
5822 MonoBasicBlock **prev_cil_offset_to_bb;
5823 MonoBasicBlock *prev_cbb;
5824 unsigned char* prev_cil_start;
5825 guint32 prev_cil_offset_to_bb_len;
5826 MonoMethod *prev_current_method;
5827 MonoGenericContext *prev_generic_context;
5828 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5830 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5832 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5833 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5836 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5837 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5841 if (cfg->verbose_level > 2)
5842 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5844 if (!cmethod->inline_info) {
5845 cfg->stat_inlineable_methods++;
5846 cmethod->inline_info = 1;
5849 /* allocate local variables */
5850 cheader = mono_method_get_header (cmethod);
5852 if (cheader == NULL || mono_loader_get_last_error ()) {
5853 MonoLoaderError *error = mono_loader_get_last_error ();
5856 mono_metadata_free_mh (cheader);
5857 if (inline_always && error)
5858 mono_cfg_set_exception (cfg, error->exception_type);
5860 mono_loader_clear_error ();
5864 /*Must verify before creating locals as it can cause the JIT to assert.*/
5865 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5866 mono_metadata_free_mh (cheader);
5870 /* allocate space to store the return value */
5871 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5872 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5875 prev_locals = cfg->locals;
5876 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5877 for (i = 0; i < cheader->num_locals; ++i)
5878 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5880 /* allocate start and end blocks */
5881 /* This is needed so if the inline is aborted, we can clean up */
5882 NEW_BBLOCK (cfg, sbblock);
5883 sbblock->real_offset = real_offset;
5885 NEW_BBLOCK (cfg, ebblock);
5886 ebblock->block_num = cfg->num_bblocks++;
5887 ebblock->real_offset = real_offset;
5889 prev_args = cfg->args;
5890 prev_arg_types = cfg->arg_types;
5891 prev_inlined_method = cfg->inlined_method;
5892 cfg->inlined_method = cmethod;
5893 cfg->ret_var_set = FALSE;
5894 cfg->inline_depth ++;
5895 prev_real_offset = cfg->real_offset;
5896 prev_cbb_hash = cfg->cbb_hash;
5897 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5898 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5899 prev_cil_start = cfg->cil_start;
5900 prev_cbb = cfg->cbb;
5901 prev_current_method = cfg->current_method;
5902 prev_generic_context = cfg->generic_context;
5903 prev_ret_var_set = cfg->ret_var_set;
5905 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5908 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5910 ret_var_set = cfg->ret_var_set;
5912 cfg->inlined_method = prev_inlined_method;
5913 cfg->real_offset = prev_real_offset;
5914 cfg->cbb_hash = prev_cbb_hash;
5915 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5916 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5917 cfg->cil_start = prev_cil_start;
5918 cfg->locals = prev_locals;
5919 cfg->args = prev_args;
5920 cfg->arg_types = prev_arg_types;
5921 cfg->current_method = prev_current_method;
5922 cfg->generic_context = prev_generic_context;
5923 cfg->ret_var_set = prev_ret_var_set;
5924 cfg->inline_depth --;
5926 if ((costs >= 0 && costs < 60) || inline_always) {
5927 if (cfg->verbose_level > 2)
5928 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5930 cfg->stat_inlined_methods++;
5932 /* always add some code to avoid block split failures */
5933 MONO_INST_NEW (cfg, ins, OP_NOP);
5934 MONO_ADD_INS (prev_cbb, ins);
5936 prev_cbb->next_bb = sbblock;
5937 link_bblock (cfg, prev_cbb, sbblock);
5940 * Get rid of the begin and end bblocks if possible to aid local
5943 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5945 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5946 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5948 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5949 MonoBasicBlock *prev = ebblock->in_bb [0];
5950 mono_merge_basic_blocks (cfg, prev, ebblock);
5952 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5953 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5954 cfg->cbb = prev_cbb;
5958 * Its possible that the rvar is set in some prev bblock, but not in others.
5964 for (i = 0; i < ebblock->in_count; ++i) {
5965 bb = ebblock->in_bb [i];
5967 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5970 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5980 * If the inlined method contains only a throw, then the ret var is not
5981 * set, so set it to a dummy value.
5984 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5986 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5989 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5992 if (cfg->verbose_level > 2)
5993 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
5994 cfg->exception_type = MONO_EXCEPTION_NONE;
5995 mono_loader_clear_error ();
5997 /* This gets rid of the newly added bblocks */
5998 cfg->cbb = prev_cbb;
6000 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6005 * Some of these comments may well be out-of-date.
6006 * Design decisions: we do a single pass over the IL code (and we do bblock
6007 * splitting/merging in the few cases when it's required: a back jump to an IL
6008 * address that was not already seen as bblock starting point).
6009 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6010 * Complex operations are decomposed in simpler ones right away. We need to let the
6011 * arch-specific code peek and poke inside this process somehow (except when the
6012 * optimizations can take advantage of the full semantic info of coarse opcodes).
6013 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6014 * MonoInst->opcode initially is the IL opcode or some simplification of that
6015 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6016 * opcode with value bigger than OP_LAST.
6017 * At this point the IR can be handed over to an interpreter, a dumb code generator
6018 * or to the optimizing code generator that will translate it to SSA form.
6020 * Profiling directed optimizations.
6021 * We may compile by default with few or no optimizations and instrument the code
6022 * or the user may indicate what methods to optimize the most either in a config file
6023 * or through repeated runs where the compiler applies offline the optimizations to
6024 * each method and then decides if it was worth it.
6027 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6028 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6029 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6030 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6031 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6032 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6033 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6034 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6036 /* offset from br.s -> br like opcodes */
6037 #define BIG_BRANCH_OFFSET 13
6040 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6042 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6044 return b == NULL || b == bb;
6048 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6050 unsigned char *ip = start;
6051 unsigned char *target;
6054 MonoBasicBlock *bblock;
6055 const MonoOpcode *opcode;
6058 cli_addr = ip - start;
6059 i = mono_opcode_value ((const guint8 **)&ip, end);
6062 opcode = &mono_opcodes [i];
6063 switch (opcode->argument) {
6064 case MonoInlineNone:
6067 case MonoInlineString:
6068 case MonoInlineType:
6069 case MonoInlineField:
6070 case MonoInlineMethod:
6073 case MonoShortInlineR:
6080 case MonoShortInlineVar:
6081 case MonoShortInlineI:
6084 case MonoShortInlineBrTarget:
6085 target = start + cli_addr + 2 + (signed char)ip [1];
6086 GET_BBLOCK (cfg, bblock, target);
6089 GET_BBLOCK (cfg, bblock, ip);
6091 case MonoInlineBrTarget:
6092 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6093 GET_BBLOCK (cfg, bblock, target);
6096 GET_BBLOCK (cfg, bblock, ip);
6098 case MonoInlineSwitch: {
6099 guint32 n = read32 (ip + 1);
6102 cli_addr += 5 + 4 * n;
6103 target = start + cli_addr;
6104 GET_BBLOCK (cfg, bblock, target);
6106 for (j = 0; j < n; ++j) {
6107 target = start + cli_addr + (gint32)read32 (ip);
6108 GET_BBLOCK (cfg, bblock, target);
6118 g_assert_not_reached ();
6121 if (i == CEE_THROW) {
6122 unsigned char *bb_start = ip - 1;
6124 /* Find the start of the bblock containing the throw */
6126 while ((bb_start >= start) && !bblock) {
6127 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6131 bblock->out_of_line = 1;
6141 static inline MonoMethod *
6142 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6146 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6147 method = mono_method_get_wrapper_data (m, token);
6149 method = mono_class_inflate_generic_method (method, context);
6151 method = mono_get_method_full (m->klass->image, token, klass, context);
6157 static inline MonoMethod *
6158 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6160 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6162 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6168 static inline MonoClass*
6169 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6173 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6174 klass = mono_method_get_wrapper_data (method, token);
6176 klass = mono_class_inflate_generic_class (klass, context);
6178 klass = mono_class_get_full (method->klass->image, token, context);
6181 mono_class_init (klass);
6185 static inline MonoMethodSignature*
6186 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6188 MonoMethodSignature *fsig;
6190 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6193 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6195 fsig = mono_inflate_generic_signature (fsig, context, &error);
6197 g_assert (mono_error_ok (&error));
6200 fsig = mono_metadata_parse_signature (method->klass->image, token);
6206 * Returns TRUE if the JIT should abort inlining because "callee"
6207 * is influenced by security attributes.
6210 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6214 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6218 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6219 if (result == MONO_JIT_SECURITY_OK)
6222 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6223 /* Generate code to throw a SecurityException before the actual call/link */
6224 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6227 NEW_ICONST (cfg, args [0], 4);
6228 NEW_METHODCONST (cfg, args [1], caller);
6229 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6230 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6231 /* don't hide previous results */
6232 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6233 cfg->exception_data = result;
6241 throw_exception (void)
6243 static MonoMethod *method = NULL;
6246 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6247 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6254 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6256 MonoMethod *thrower = throw_exception ();
6259 EMIT_NEW_PCONST (cfg, args [0], ex);
6260 mono_emit_method_call (cfg, thrower, args, NULL);
6264 * Return the original method is a wrapper is specified. We can only access
6265 * the custom attributes from the original method.
6268 get_original_method (MonoMethod *method)
6270 if (method->wrapper_type == MONO_WRAPPER_NONE)
6273 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6274 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6277 /* in other cases we need to find the original method */
6278 return mono_marshal_method_from_wrapper (method);
6282 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6283 MonoBasicBlock *bblock, unsigned char *ip)
6285 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6286 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6288 emit_throw_exception (cfg, ex);
6292 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6293 MonoBasicBlock *bblock, unsigned char *ip)
6295 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6296 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6298 emit_throw_exception (cfg, ex);
6302 * Check that the IL instructions at ip are the array initialization
6303 * sequence and return the pointer to the data and the size.
6306 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6309 * newarr[System.Int32]
6311 * ldtoken field valuetype ...
6312 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6314 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6315 guint32 token = read32 (ip + 7);
6316 guint32 field_token = read32 (ip + 2);
6317 guint32 field_index = field_token & 0xffffff;
6319 const char *data_ptr;
6321 MonoMethod *cmethod;
6322 MonoClass *dummy_class;
6323 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6329 *out_field_token = field_token;
6331 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6334 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6336 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6337 case MONO_TYPE_BOOLEAN:
6341 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6342 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6343 case MONO_TYPE_CHAR:
6360 if (size > mono_type_size (field->type, &dummy_align))
6363 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6364 if (!method->klass->image->dynamic) {
6365 field_index = read32 (ip + 2) & 0xffffff;
6366 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6367 data_ptr = mono_image_rva_map (method->klass->image, rva);
6368 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6369 /* for aot code we do the lookup on load */
6370 if (aot && data_ptr)
6371 return GUINT_TO_POINTER (rva);
6373 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6375 data_ptr = mono_field_get_data (field);
6383 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6385 char *method_fname = mono_method_full_name (method, TRUE);
6387 MonoMethodHeader *header = mono_method_get_header (method);
6389 if (header->code_size == 0)
6390 method_code = g_strdup ("method body is empty.");
6392 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6393 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6394 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6395 g_free (method_fname);
6396 g_free (method_code);
6397 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6401 set_exception_object (MonoCompile *cfg, MonoException *exception)
6403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6404 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6405 cfg->exception_ptr = exception;
6409 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6412 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6413 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6414 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6415 /* Optimize reg-reg moves away */
6417 * Can't optimize other opcodes, since sp[0] might point to
6418 * the last ins of a decomposed opcode.
6420 sp [0]->dreg = (cfg)->locals [n]->dreg;
6422 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6427 * ldloca inhibits many optimizations so try to get rid of it in common
6430 static inline unsigned char *
6431 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6441 local = read16 (ip + 2);
6445 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6446 /* From the INITOBJ case */
6447 token = read32 (ip + 2);
6448 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6449 CHECK_TYPELOAD (klass);
6450 type = mini_replace_type (&klass->byval_arg);
6451 emit_init_local (cfg, local, type, TRUE);
6459 is_exception_class (MonoClass *class)
6462 if (class == mono_defaults.exception_class)
6464 class = class->parent;
6470 * is_jit_optimizer_disabled:
6472 * Determine whenever M's assembly has a DebuggableAttribute with the
6473 * IsJITOptimizerDisabled flag set.
6476 is_jit_optimizer_disabled (MonoMethod *m)
6478 MonoAssembly *ass = m->klass->image->assembly;
6479 MonoCustomAttrInfo* attrs;
6480 static MonoClass *klass;
6482 gboolean val = FALSE;
6485 if (ass->jit_optimizer_disabled_inited)
6486 return ass->jit_optimizer_disabled;
6489 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6492 ass->jit_optimizer_disabled = FALSE;
6493 mono_memory_barrier ();
6494 ass->jit_optimizer_disabled_inited = TRUE;
6498 attrs = mono_custom_attrs_from_assembly (ass);
6500 for (i = 0; i < attrs->num_attrs; ++i) {
6501 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6504 MonoMethodSignature *sig;
6506 if (!attr->ctor || attr->ctor->klass != klass)
6508 /* Decode the attribute. See reflection.c */
6509 len = attr->data_size;
6510 p = (const char*)attr->data;
6511 g_assert (read16 (p) == 0x0001);
6514 // FIXME: Support named parameters
6515 sig = mono_method_signature (attr->ctor);
6516 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6518 /* Two boolean arguments */
6522 mono_custom_attrs_free (attrs);
6525 ass->jit_optimizer_disabled = val;
6526 mono_memory_barrier ();
6527 ass->jit_optimizer_disabled_inited = TRUE;
6533 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6535 gboolean supported_tail_call;
6538 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6539 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6541 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6544 for (i = 0; i < fsig->param_count; ++i) {
6545 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6546 /* These can point to the current method's stack */
6547 supported_tail_call = FALSE;
6549 if (fsig->hasthis && cmethod->klass->valuetype)
6550 /* this might point to the current method's stack */
6551 supported_tail_call = FALSE;
6552 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6553 supported_tail_call = FALSE;
6554 if (cfg->method->save_lmf)
6555 supported_tail_call = FALSE;
6556 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6557 supported_tail_call = FALSE;
6558 if (call_opcode != CEE_CALL)
6559 supported_tail_call = FALSE;
6561 /* Debugging support */
6563 if (supported_tail_call) {
6564 if (!mono_debug_count ())
6565 supported_tail_call = FALSE;
6569 return supported_tail_call;
6572 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6573 * it to the thread local value based on the tls_offset field. Every other kind of access to
6574 * the field causes an assert.
6577 is_magic_tls_access (MonoClassField *field)
6579 if (strcmp (field->name, "tlsdata"))
6581 if (strcmp (field->parent->name, "ThreadLocal`1"))
6583 return field->parent->image == mono_defaults.corlib;
6586 /* emits the code needed to access a managed tls var (like ThreadStatic)
6587 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6588 * pointer for the current thread.
6589 * Returns the MonoInst* representing the address of the tls var.
6592 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6595 int static_data_reg, array_reg, dreg;
6596 int offset2_reg, idx_reg;
6597 // inlined access to the tls data
6598 // idx = (offset >> 24) - 1;
6599 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6600 static_data_reg = alloc_ireg (cfg);
6601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6602 idx_reg = alloc_ireg (cfg);
6603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6604 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6606 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6607 array_reg = alloc_ireg (cfg);
6608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6609 offset2_reg = alloc_ireg (cfg);
6610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6611 dreg = alloc_ireg (cfg);
6612 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6617 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6618 * this address is cached per-method in cached_tls_addr.
6621 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6623 MonoInst *load, *addr, *temp, *store, *thread_ins;
6624 MonoClassField *offset_field;
6626 if (*cached_tls_addr) {
6627 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6630 thread_ins = mono_get_thread_intrinsic (cfg);
6631 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6633 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6635 MONO_ADD_INS (cfg->cbb, thread_ins);
6637 MonoMethod *thread_method;
6638 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6639 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6641 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6642 addr->klass = mono_class_from_mono_type (tls_field->type);
6643 addr->type = STACK_MP;
6644 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6645 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6647 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6652 * mono_method_to_ir:
6654 * Translate the .net IL into linear IR.
6657 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6658 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6659 guint inline_offset, gboolean is_virtual_call)
6662 MonoInst *ins, **sp, **stack_start;
6663 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6664 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6665 MonoMethod *cmethod, *method_definition;
6666 MonoInst **arg_array;
6667 MonoMethodHeader *header;
6669 guint32 token, ins_flag;
6671 MonoClass *constrained_call = NULL;
6672 unsigned char *ip, *end, *target, *err_pos;
6673 MonoMethodSignature *sig;
6674 MonoGenericContext *generic_context = NULL;
6675 MonoGenericContainer *generic_container = NULL;
6676 MonoType **param_types;
6677 int i, n, start_new_bblock, dreg;
6678 int num_calls = 0, inline_costs = 0;
6679 int breakpoint_id = 0;
6681 MonoBoolean security, pinvoke;
6682 MonoSecurityManager* secman = NULL;
6683 MonoDeclSecurityActions actions;
6684 GSList *class_inits = NULL;
6685 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6687 gboolean init_locals, seq_points, skip_dead_blocks;
6688 gboolean disable_inline, sym_seq_points = FALSE;
6689 MonoInst *cached_tls_addr = NULL;
6690 MonoDebugMethodInfo *minfo;
6691 MonoBitSet *seq_point_locs = NULL;
6692 MonoBitSet *seq_point_set_locs = NULL;
6694 disable_inline = is_jit_optimizer_disabled (method);
6696 /* serialization and xdomain stuff may need access to private fields and methods */
6697 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6698 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6699 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6700 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6701 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6702 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6704 dont_verify |= mono_security_smcs_hack_enabled ();
6706 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6707 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6708 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6709 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6710 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6712 image = method->klass->image;
6713 header = mono_method_get_header (method);
6715 MonoLoaderError *error;
6717 if ((error = mono_loader_get_last_error ())) {
6718 mono_cfg_set_exception (cfg, error->exception_type);
6720 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6721 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6723 goto exception_exit;
6725 generic_container = mono_method_get_generic_container (method);
6726 sig = mono_method_signature (method);
6727 num_args = sig->hasthis + sig->param_count;
6728 ip = (unsigned char*)header->code;
6729 cfg->cil_start = ip;
6730 end = ip + header->code_size;
6731 cfg->stat_cil_code_size += header->code_size;
6733 seq_points = cfg->gen_seq_points && cfg->method == method;
6734 #ifdef PLATFORM_ANDROID
6735 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6738 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6739 /* We could hit a seq point before attaching to the JIT (#8338) */
6743 if (cfg->gen_seq_points && cfg->method == method) {
6744 minfo = mono_debug_lookup_method (method);
6746 int i, n_il_offsets;
6750 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6751 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6752 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6753 sym_seq_points = TRUE;
6754 for (i = 0; i < n_il_offsets; ++i) {
6755 if (il_offsets [i] < header->code_size)
6756 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6758 g_free (il_offsets);
6759 g_free (line_numbers);
6764 * Methods without init_locals set could cause asserts in various passes
6765 * (#497220). To work around this, we emit dummy initialization opcodes
6766 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6767 * on some platforms.
6769 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6770 init_locals = header->init_locals;
6774 method_definition = method;
6775 while (method_definition->is_inflated) {
6776 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6777 method_definition = imethod->declaring;
6780 /* SkipVerification is not allowed if core-clr is enabled */
6781 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6783 dont_verify_stloc = TRUE;
6786 if (sig->is_inflated)
6787 generic_context = mono_method_get_context (method);
6788 else if (generic_container)
6789 generic_context = &generic_container->context;
6790 cfg->generic_context = generic_context;
6792 if (!cfg->generic_sharing_context)
6793 g_assert (!sig->has_type_parameters);
6795 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6796 g_assert (method->is_inflated);
6797 g_assert (mono_method_get_context (method)->method_inst);
6799 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6800 g_assert (sig->generic_param_count);
6802 if (cfg->method == method) {
6803 cfg->real_offset = 0;
6805 cfg->real_offset = inline_offset;
6808 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6809 cfg->cil_offset_to_bb_len = header->code_size;
6811 cfg->current_method = method;
6813 if (cfg->verbose_level > 2)
6814 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6816 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6818 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6819 for (n = 0; n < sig->param_count; ++n)
6820 param_types [n + sig->hasthis] = sig->params [n];
6821 cfg->arg_types = param_types;
6823 dont_inline = g_list_prepend (dont_inline, method);
6824 if (cfg->method == method) {
6826 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6827 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6830 NEW_BBLOCK (cfg, start_bblock);
6831 cfg->bb_entry = start_bblock;
6832 start_bblock->cil_code = NULL;
6833 start_bblock->cil_length = 0;
6834 #if defined(__native_client_codegen__)
6835 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6836 ins->dreg = alloc_dreg (cfg, STACK_I4);
6837 MONO_ADD_INS (start_bblock, ins);
6841 NEW_BBLOCK (cfg, end_bblock);
6842 cfg->bb_exit = end_bblock;
6843 end_bblock->cil_code = NULL;
6844 end_bblock->cil_length = 0;
6845 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6846 g_assert (cfg->num_bblocks == 2);
6848 arg_array = cfg->args;
6850 if (header->num_clauses) {
6851 cfg->spvars = g_hash_table_new (NULL, NULL);
6852 cfg->exvars = g_hash_table_new (NULL, NULL);
6854 /* handle exception clauses */
6855 for (i = 0; i < header->num_clauses; ++i) {
6856 MonoBasicBlock *try_bb;
6857 MonoExceptionClause *clause = &header->clauses [i];
6858 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6859 try_bb->real_offset = clause->try_offset;
6860 try_bb->try_start = TRUE;
6861 try_bb->region = ((i + 1) << 8) | clause->flags;
6862 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6863 tblock->real_offset = clause->handler_offset;
6864 tblock->flags |= BB_EXCEPTION_HANDLER;
6867 * Linking the try block with the EH block hinders inlining as we won't be able to
6868 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6870 if (COMPILE_LLVM (cfg))
6871 link_bblock (cfg, try_bb, tblock);
6873 if (*(ip + clause->handler_offset) == CEE_POP)
6874 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6876 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6877 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6878 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6879 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6880 MONO_ADD_INS (tblock, ins);
6882 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6883 /* finally clauses already have a seq point */
6884 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6885 MONO_ADD_INS (tblock, ins);
6888 /* todo: is a fault block unsafe to optimize? */
6889 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6890 tblock->flags |= BB_EXCEPTION_UNSAFE;
6894 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6896 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6898 /* catch and filter blocks get the exception object on the stack */
6899 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6900 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6901 MonoInst *dummy_use;
6903 /* mostly like handle_stack_args (), but just sets the input args */
6904 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6905 tblock->in_scount = 1;
6906 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6907 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6910 * Add a dummy use for the exvar so its liveness info will be
6914 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6916 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6917 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6918 tblock->flags |= BB_EXCEPTION_HANDLER;
6919 tblock->real_offset = clause->data.filter_offset;
6920 tblock->in_scount = 1;
6921 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6922 /* The filter block shares the exvar with the handler block */
6923 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6924 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6925 MONO_ADD_INS (tblock, ins);
6929 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6930 clause->data.catch_class &&
6931 cfg->generic_sharing_context &&
6932 mono_class_check_context_used (clause->data.catch_class)) {
6934 * In shared generic code with catch
6935 * clauses containing type variables
6936 * the exception handling code has to
6937 * be able to get to the rgctx.
6938 * Therefore we have to make sure that
6939 * the vtable/mrgctx argument (for
6940 * static or generic methods) or the
6941 * "this" argument (for non-static
6942 * methods) are live.
6944 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6945 mini_method_get_context (method)->method_inst ||
6946 method->klass->valuetype) {
6947 mono_get_vtable_var (cfg);
6949 MonoInst *dummy_use;
6951 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6956 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6957 cfg->cbb = start_bblock;
6958 cfg->args = arg_array;
6959 mono_save_args (cfg, sig, inline_args);
6962 /* FIRST CODE BLOCK */
6963 NEW_BBLOCK (cfg, bblock);
6964 bblock->cil_code = ip;
6968 ADD_BBLOCK (cfg, bblock);
6970 if (cfg->method == method) {
6971 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6972 if (breakpoint_id) {
6973 MONO_INST_NEW (cfg, ins, OP_BREAK);
6974 MONO_ADD_INS (bblock, ins);
6978 if (mono_security_cas_enabled ())
6979 secman = mono_security_manager_get_methods ();
6981 security = (secman && mono_security_method_has_declsec (method));
6982 /* at this point having security doesn't mean we have any code to generate */
6983 if (security && (cfg->method == method)) {
6984 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6985 * And we do not want to enter the next section (with allocation) if we
6986 * have nothing to generate */
6987 security = mono_declsec_get_demands (method, &actions);
6990 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6991 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6993 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6994 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6995 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6997 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6998 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7002 mono_custom_attrs_free (custom);
7005 custom = mono_custom_attrs_from_class (wrapped->klass);
7006 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7010 mono_custom_attrs_free (custom);
7013 /* not a P/Invoke after all */
7018 /* we use a separate basic block for the initialization code */
7019 NEW_BBLOCK (cfg, init_localsbb);
7020 cfg->bb_init = init_localsbb;
7021 init_localsbb->real_offset = cfg->real_offset;
7022 start_bblock->next_bb = init_localsbb;
7023 init_localsbb->next_bb = bblock;
7024 link_bblock (cfg, start_bblock, init_localsbb);
7025 link_bblock (cfg, init_localsbb, bblock);
7027 cfg->cbb = init_localsbb;
7029 if (cfg->gsharedvt && cfg->method == method) {
7030 MonoGSharedVtMethodInfo *info;
7031 MonoInst *var, *locals_var;
7034 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7035 info->method = cfg->method;
7036 info->count_entries = 16;
7037 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7038 cfg->gsharedvt_info = info;
7040 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7041 /* prevent it from being register allocated */
7042 //var->flags |= MONO_INST_VOLATILE;
7043 cfg->gsharedvt_info_var = var;
7045 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7046 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7048 /* Allocate locals */
7049 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7050 /* prevent it from being register allocated */
7051 //locals_var->flags |= MONO_INST_VOLATILE;
7052 cfg->gsharedvt_locals_var = locals_var;
7054 dreg = alloc_ireg (cfg);
7055 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7057 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7058 ins->dreg = locals_var->dreg;
7060 MONO_ADD_INS (cfg->cbb, ins);
7061 cfg->gsharedvt_locals_var_ins = ins;
7063 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7066 ins->flags |= MONO_INST_INIT;
7070 /* at this point we know, if security is TRUE, that some code needs to be generated */
7071 if (security && (cfg->method == method)) {
7074 cfg->stat_cas_demand_generation++;
7076 if (actions.demand.blob) {
7077 /* Add code for SecurityAction.Demand */
7078 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7079 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7080 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7081 mono_emit_method_call (cfg, secman->demand, args, NULL);
7083 if (actions.noncasdemand.blob) {
7084 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7085 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7086 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7087 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7088 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7089 mono_emit_method_call (cfg, secman->demand, args, NULL);
7091 if (actions.demandchoice.blob) {
7092 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7093 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7094 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7095 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7096 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7100 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7102 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7105 if (mono_security_core_clr_enabled ()) {
7106 /* check if this is native code, e.g. an icall or a p/invoke */
7107 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7108 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7110 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7111 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7113 /* if this ia a native call then it can only be JITted from platform code */
7114 if ((icall || pinvk) && method->klass && method->klass->image) {
7115 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7116 MonoException *ex = icall ? mono_get_exception_security () :
7117 mono_get_exception_method_access ();
7118 emit_throw_exception (cfg, ex);
7125 CHECK_CFG_EXCEPTION;
7127 if (header->code_size == 0)
7130 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7135 if (cfg->method == method)
7136 mono_debug_init_method (cfg, bblock, breakpoint_id);
7138 for (n = 0; n < header->num_locals; ++n) {
7139 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7144 /* We force the vtable variable here for all shared methods
7145 for the possibility that they might show up in a stack
7146 trace where their exact instantiation is needed. */
7147 if (cfg->generic_sharing_context && method == cfg->method) {
7148 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7149 mini_method_get_context (method)->method_inst ||
7150 method->klass->valuetype) {
7151 mono_get_vtable_var (cfg);
7153 /* FIXME: Is there a better way to do this?
7154 We need the variable live for the duration
7155 of the whole method. */
7156 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7160 /* add a check for this != NULL to inlined methods */
7161 if (is_virtual_call) {
7164 NEW_ARGLOAD (cfg, arg_ins, 0);
7165 MONO_ADD_INS (cfg->cbb, arg_ins);
7166 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7169 skip_dead_blocks = !dont_verify;
7170 if (skip_dead_blocks) {
7171 original_bb = bb = mono_basic_block_split (method, &error);
7172 if (!mono_error_ok (&error)) {
7173 mono_error_cleanup (&error);
7179 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7180 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7183 start_new_bblock = 0;
7186 if (cfg->method == method)
7187 cfg->real_offset = ip - header->code;
7189 cfg->real_offset = inline_offset;
7194 if (start_new_bblock) {
7195 bblock->cil_length = ip - bblock->cil_code;
7196 if (start_new_bblock == 2) {
7197 g_assert (ip == tblock->cil_code);
7199 GET_BBLOCK (cfg, tblock, ip);
7201 bblock->next_bb = tblock;
7204 start_new_bblock = 0;
7205 for (i = 0; i < bblock->in_scount; ++i) {
7206 if (cfg->verbose_level > 3)
7207 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7208 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7212 g_slist_free (class_inits);
7215 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7216 link_bblock (cfg, bblock, tblock);
7217 if (sp != stack_start) {
7218 handle_stack_args (cfg, stack_start, sp - stack_start);
7220 CHECK_UNVERIFIABLE (cfg);
7222 bblock->next_bb = tblock;
7225 for (i = 0; i < bblock->in_scount; ++i) {
7226 if (cfg->verbose_level > 3)
7227 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7228 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7231 g_slist_free (class_inits);
7236 if (skip_dead_blocks) {
7237 int ip_offset = ip - header->code;
7239 if (ip_offset == bb->end)
7243 int op_size = mono_opcode_size (ip, end);
7244 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7246 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7248 if (ip_offset + op_size == bb->end) {
7249 MONO_INST_NEW (cfg, ins, OP_NOP);
7250 MONO_ADD_INS (bblock, ins);
7251 start_new_bblock = 1;
7259 * Sequence points are points where the debugger can place a breakpoint.
7260 * Currently, we generate these automatically at points where the IL
7263 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7265 * Make methods interruptable at the beginning, and at the targets of
7266 * backward branches.
7267 * Also, do this at the start of every bblock in methods with clauses too,
7268 * to be able to handle instructions with inprecise control flow like
7270 * Backward branches are handled at the end of method-to-ir ().
7272 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7274 /* Avoid sequence points on empty IL like .volatile */
7275 // FIXME: Enable this
7276 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7277 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7278 if (sp != stack_start)
7279 ins->flags |= MONO_INST_NONEMPTY_STACK;
7280 MONO_ADD_INS (cfg->cbb, ins);
7283 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7286 bblock->real_offset = cfg->real_offset;
7288 if ((cfg->method == method) && cfg->coverage_info) {
7289 guint32 cil_offset = ip - header->code;
7290 cfg->coverage_info->data [cil_offset].cil_code = ip;
7292 /* TODO: Use an increment here */
7293 #if defined(TARGET_X86)
7294 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7295 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7297 MONO_ADD_INS (cfg->cbb, ins);
7299 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7300 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7304 if (cfg->verbose_level > 3)
7305 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7309 if (seq_points && !sym_seq_points && sp != stack_start) {
7311 * The C# compiler uses these nops to notify the JIT that it should
7312 * insert seq points.
7314 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7315 MONO_ADD_INS (cfg->cbb, ins);
7317 if (cfg->keep_cil_nops)
7318 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7320 MONO_INST_NEW (cfg, ins, OP_NOP);
7322 MONO_ADD_INS (bblock, ins);
7325 if (should_insert_brekpoint (cfg->method)) {
7326 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7328 MONO_INST_NEW (cfg, ins, OP_NOP);
7331 MONO_ADD_INS (bblock, ins);
7337 CHECK_STACK_OVF (1);
7338 n = (*ip)-CEE_LDARG_0;
7340 EMIT_NEW_ARGLOAD (cfg, ins, n);
7348 CHECK_STACK_OVF (1);
7349 n = (*ip)-CEE_LDLOC_0;
7351 EMIT_NEW_LOCLOAD (cfg, ins, n);
7360 n = (*ip)-CEE_STLOC_0;
7363 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7365 emit_stloc_ir (cfg, sp, header, n);
7372 CHECK_STACK_OVF (1);
7375 EMIT_NEW_ARGLOAD (cfg, ins, n);
7381 CHECK_STACK_OVF (1);
7384 NEW_ARGLOADA (cfg, ins, n);
7385 MONO_ADD_INS (cfg->cbb, ins);
7395 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7397 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7402 CHECK_STACK_OVF (1);
7405 EMIT_NEW_LOCLOAD (cfg, ins, n);
7409 case CEE_LDLOCA_S: {
7410 unsigned char *tmp_ip;
7412 CHECK_STACK_OVF (1);
7413 CHECK_LOCAL (ip [1]);
7415 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7421 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7430 CHECK_LOCAL (ip [1]);
7431 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7433 emit_stloc_ir (cfg, sp, header, ip [1]);
7438 CHECK_STACK_OVF (1);
7439 EMIT_NEW_PCONST (cfg, ins, NULL);
7440 ins->type = STACK_OBJ;
7445 CHECK_STACK_OVF (1);
7446 EMIT_NEW_ICONST (cfg, ins, -1);
7459 CHECK_STACK_OVF (1);
7460 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7466 CHECK_STACK_OVF (1);
7468 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7474 CHECK_STACK_OVF (1);
7475 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7481 CHECK_STACK_OVF (1);
7482 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7483 ins->type = STACK_I8;
7484 ins->dreg = alloc_dreg (cfg, STACK_I8);
7486 ins->inst_l = (gint64)read64 (ip);
7487 MONO_ADD_INS (bblock, ins);
7493 gboolean use_aotconst = FALSE;
7495 #ifdef TARGET_POWERPC
7496 /* FIXME: Clean this up */
7497 if (cfg->compile_aot)
7498 use_aotconst = TRUE;
7501 /* FIXME: we should really allocate this only late in the compilation process */
7502 f = mono_domain_alloc (cfg->domain, sizeof (float));
7504 CHECK_STACK_OVF (1);
7510 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7512 dreg = alloc_freg (cfg);
7513 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7514 ins->type = STACK_R8;
7516 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7517 ins->type = STACK_R8;
7518 ins->dreg = alloc_dreg (cfg, STACK_R8);
7520 MONO_ADD_INS (bblock, ins);
7530 gboolean use_aotconst = FALSE;
7532 #ifdef TARGET_POWERPC
7533 /* FIXME: Clean this up */
7534 if (cfg->compile_aot)
7535 use_aotconst = TRUE;
7538 /* FIXME: we should really allocate this only late in the compilation process */
7539 d = mono_domain_alloc (cfg->domain, sizeof (double));
7541 CHECK_STACK_OVF (1);
7547 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7549 dreg = alloc_freg (cfg);
7550 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7551 ins->type = STACK_R8;
7553 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7554 ins->type = STACK_R8;
7555 ins->dreg = alloc_dreg (cfg, STACK_R8);
7557 MONO_ADD_INS (bblock, ins);
7566 MonoInst *temp, *store;
7568 CHECK_STACK_OVF (1);
7572 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7573 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7575 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7578 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7591 if (sp [0]->type == STACK_R8)
7592 /* we need to pop the value from the x86 FP stack */
7593 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7599 INLINE_FAILURE ("jmp");
7600 GSHAREDVT_FAILURE (*ip);
7603 if (stack_start != sp)
7605 token = read32 (ip + 1);
7606 /* FIXME: check the signature matches */
7607 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7609 if (!cmethod || mono_loader_get_last_error ())
7612 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7613 GENERIC_SHARING_FAILURE (CEE_JMP);
7615 if (mono_security_cas_enabled ())
7616 CHECK_CFG_EXCEPTION;
7618 if (ARCH_HAVE_OP_TAIL_CALL) {
7619 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7622 /* Handle tail calls similarly to calls */
7623 n = fsig->param_count + fsig->hasthis;
7627 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7628 call->method = cmethod;
7629 call->tail_call = TRUE;
7630 call->signature = mono_method_signature (cmethod);
7631 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7632 call->inst.inst_p0 = cmethod;
7633 for (i = 0; i < n; ++i)
7634 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7636 mono_arch_emit_call (cfg, call);
7637 MONO_ADD_INS (bblock, (MonoInst*)call);
7639 for (i = 0; i < num_args; ++i)
7640 /* Prevent arguments from being optimized away */
7641 arg_array [i]->flags |= MONO_INST_VOLATILE;
7643 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7644 ins = (MonoInst*)call;
7645 ins->inst_p0 = cmethod;
7646 MONO_ADD_INS (bblock, ins);
7650 start_new_bblock = 1;
7655 case CEE_CALLVIRT: {
7656 MonoInst *addr = NULL;
7657 MonoMethodSignature *fsig = NULL;
7659 int virtual = *ip == CEE_CALLVIRT;
7660 int calli = *ip == CEE_CALLI;
7661 gboolean pass_imt_from_rgctx = FALSE;
7662 MonoInst *imt_arg = NULL;
7663 MonoInst *keep_this_alive = NULL;
7664 gboolean pass_vtable = FALSE;
7665 gboolean pass_mrgctx = FALSE;
7666 MonoInst *vtable_arg = NULL;
7667 gboolean check_this = FALSE;
7668 gboolean supported_tail_call = FALSE;
7669 gboolean tail_call = FALSE;
7670 gboolean need_seq_point = FALSE;
7671 guint32 call_opcode = *ip;
7672 gboolean emit_widen = TRUE;
7673 gboolean push_res = TRUE;
7674 gboolean skip_ret = FALSE;
7675 gboolean delegate_invoke = FALSE;
7678 token = read32 (ip + 1);
7683 //GSHAREDVT_FAILURE (*ip);
7688 fsig = mini_get_signature (method, token, generic_context);
7689 n = fsig->param_count + fsig->hasthis;
7691 if (method->dynamic && fsig->pinvoke) {
7695 * This is a call through a function pointer using a pinvoke
7696 * signature. Have to create a wrapper and call that instead.
7697 * FIXME: This is very slow, need to create a wrapper at JIT time
7698 * instead based on the signature.
7700 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7701 EMIT_NEW_PCONST (cfg, args [1], fsig);
7703 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7706 MonoMethod *cil_method;
7708 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7709 cil_method = cmethod;
7711 if (constrained_call) {
7712 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7713 if (cfg->verbose_level > 2)
7714 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7715 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7716 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7717 cfg->generic_sharing_context)) {
7718 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7721 if (cfg->verbose_level > 2)
7722 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7724 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7726 * This is needed since get_method_constrained can't find
7727 * the method in klass representing a type var.
7728 * The type var is guaranteed to be a reference type in this
7731 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7732 g_assert (!cmethod->klass->valuetype);
7734 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7739 if (!cmethod || mono_loader_get_last_error ())
7741 if (!dont_verify && !cfg->skip_visibility) {
7742 MonoMethod *target_method = cil_method;
7743 if (method->is_inflated) {
7744 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7746 if (!mono_method_can_access_method (method_definition, target_method) &&
7747 !mono_method_can_access_method (method, cil_method))
7748 METHOD_ACCESS_FAILURE;
7751 if (mono_security_core_clr_enabled ())
7752 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7754 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7755 /* MS.NET seems to silently convert this to a callvirt */
7760 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7761 * converts to a callvirt.
7763 * tests/bug-515884.il is an example of this behavior
7765 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7766 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7767 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7771 if (!cmethod->klass->inited)
7772 if (!mono_class_init (cmethod->klass))
7773 TYPE_LOAD_ERROR (cmethod->klass);
7775 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7776 mini_class_is_system_array (cmethod->klass)) {
7777 array_rank = cmethod->klass->rank;
7778 fsig = mono_method_signature (cmethod);
7780 fsig = mono_method_signature (cmethod);
7785 if (fsig->pinvoke) {
7786 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7787 check_for_pending_exc, cfg->compile_aot);
7788 fsig = mono_method_signature (wrapper);
7789 } else if (constrained_call) {
7790 fsig = mono_method_signature (cmethod);
7792 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7796 mono_save_token_info (cfg, image, token, cil_method);
7798 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7800 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7801 * foo (bar (), baz ())
7802 * works correctly. MS does this also:
7803 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7804 * The problem with this approach is that the debugger will stop after all calls returning a value,
7805 * even for simple cases, like:
7808 /* Special case a few common successor opcodes */
7809 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7810 need_seq_point = TRUE;
7813 n = fsig->param_count + fsig->hasthis;
7815 /* Don't support calls made using type arguments for now */
7817 if (cfg->gsharedvt) {
7818 if (mini_is_gsharedvt_signature (cfg, fsig))
7819 GSHAREDVT_FAILURE (*ip);
7823 if (mono_security_cas_enabled ()) {
7824 if (check_linkdemand (cfg, method, cmethod))
7825 INLINE_FAILURE ("linkdemand");
7826 CHECK_CFG_EXCEPTION;
7829 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7830 g_assert_not_reached ();
7833 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7836 if (!cfg->generic_sharing_context && cmethod)
7837 g_assert (!mono_method_check_context_used (cmethod));
7841 //g_assert (!virtual || fsig->hasthis);
7845 if (constrained_call) {
7846 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7848 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7850 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7851 /* The 'Own method' case below */
7852 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7853 /* 'The type parameter is instantiated as a reference type' case below. */
7854 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7855 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7856 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7857 MonoInst *args [16];
7860 * This case handles calls to
7861 * - object:ToString()/Equals()/GetHashCode(),
7862 * - System.IComparable<T>:CompareTo()
7863 * - System.IEquatable<T>:Equals ()
7864 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7868 if (mono_method_check_context_used (cmethod))
7869 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7871 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7872 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7874 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7875 if (fsig->hasthis && fsig->param_count) {
7876 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7877 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7878 ins->dreg = alloc_preg (cfg);
7879 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7880 MONO_ADD_INS (cfg->cbb, ins);
7883 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7886 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7888 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7889 addr_reg = ins->dreg;
7890 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7892 EMIT_NEW_ICONST (cfg, args [3], 0);
7893 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7896 EMIT_NEW_ICONST (cfg, args [3], 0);
7897 EMIT_NEW_ICONST (cfg, args [4], 0);
7899 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7902 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7903 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7904 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7908 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7909 MONO_ADD_INS (cfg->cbb, add);
7911 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7912 MONO_ADD_INS (cfg->cbb, ins);
7913 /* ins represents the call result */
7918 GSHAREDVT_FAILURE (*ip);
7922 * We have the `constrained.' prefix opcode.
7924 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7926 * The type parameter is instantiated as a valuetype,
7927 * but that type doesn't override the method we're
7928 * calling, so we need to box `this'.
7930 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7931 ins->klass = constrained_call;
7932 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7933 CHECK_CFG_EXCEPTION;
7934 } else if (!constrained_call->valuetype) {
7935 int dreg = alloc_ireg_ref (cfg);
7938 * The type parameter is instantiated as a reference
7939 * type. We have a managed pointer on the stack, so
7940 * we need to dereference it here.
7942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7943 ins->type = STACK_OBJ;
7946 if (cmethod->klass->valuetype) {
7949 /* Interface method */
7952 mono_class_setup_vtable (constrained_call);
7953 CHECK_TYPELOAD (constrained_call);
7954 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7956 TYPE_LOAD_ERROR (constrained_call);
7957 slot = mono_method_get_vtable_slot (cmethod);
7959 TYPE_LOAD_ERROR (cmethod->klass);
7960 cmethod = constrained_call->vtable [ioffset + slot];
7962 if (cmethod->klass == mono_defaults.enum_class) {
7963 /* Enum implements some interfaces, so treat this as the first case */
7964 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7965 ins->klass = constrained_call;
7966 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7967 CHECK_CFG_EXCEPTION;
7972 constrained_call = NULL;
7975 if (!calli && check_call_signature (cfg, fsig, sp))
7978 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7979 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7980 delegate_invoke = TRUE;
7983 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7985 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7986 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7994 * If the callee is a shared method, then its static cctor
7995 * might not get called after the call was patched.
7997 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7998 emit_generic_class_init (cfg, cmethod->klass);
7999 CHECK_TYPELOAD (cmethod->klass);
8003 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8005 if (cfg->generic_sharing_context && cmethod) {
8006 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8008 context_used = mini_method_check_context_used (cfg, cmethod);
8010 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8011 /* Generic method interface
8012 calls are resolved via a
8013 helper function and don't
8015 if (!cmethod_context || !cmethod_context->method_inst)
8016 pass_imt_from_rgctx = TRUE;
8020 * If a shared method calls another
8021 * shared method then the caller must
8022 * have a generic sharing context
8023 * because the magic trampoline
8024 * requires it. FIXME: We shouldn't
8025 * have to force the vtable/mrgctx
8026 * variable here. Instead there
8027 * should be a flag in the cfg to
8028 * request a generic sharing context.
8031 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8032 mono_get_vtable_var (cfg);
8037 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8039 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8041 CHECK_TYPELOAD (cmethod->klass);
8042 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8047 g_assert (!vtable_arg);
8049 if (!cfg->compile_aot) {
8051 * emit_get_rgctx_method () calls mono_class_vtable () so check
8052 * for type load errors before.
8054 mono_class_setup_vtable (cmethod->klass);
8055 CHECK_TYPELOAD (cmethod->klass);
8058 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8060 /* !marshalbyref is needed to properly handle generic methods + remoting */
8061 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8062 MONO_METHOD_IS_FINAL (cmethod)) &&
8063 !mono_class_is_marshalbyref (cmethod->klass)) {
8070 if (pass_imt_from_rgctx) {
8071 g_assert (!pass_vtable);
8074 imt_arg = emit_get_rgctx_method (cfg, context_used,
8075 cmethod, MONO_RGCTX_INFO_METHOD);
8079 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8081 /* Calling virtual generic methods */
8082 if (cmethod && virtual &&
8083 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8084 !(MONO_METHOD_IS_FINAL (cmethod) &&
8085 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8086 fsig->generic_param_count &&
8087 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8088 MonoInst *this_temp, *this_arg_temp, *store;
8089 MonoInst *iargs [4];
8090 gboolean use_imt = FALSE;
8092 g_assert (fsig->is_inflated);
8094 /* Prevent inlining of methods that contain indirect calls */
8095 INLINE_FAILURE ("virtual generic call");
8097 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8098 GSHAREDVT_FAILURE (*ip);
8100 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8101 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8106 g_assert (!imt_arg);
8108 g_assert (cmethod->is_inflated);
8109 imt_arg = emit_get_rgctx_method (cfg, context_used,
8110 cmethod, MONO_RGCTX_INFO_METHOD);
8111 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8113 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8114 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8115 MONO_ADD_INS (bblock, store);
8117 /* FIXME: This should be a managed pointer */
8118 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8120 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8121 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8122 cmethod, MONO_RGCTX_INFO_METHOD);
8123 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8124 addr = mono_emit_jit_icall (cfg,
8125 mono_helper_compile_generic_method, iargs);
8127 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8129 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8136 * Implement a workaround for the inherent races involved in locking:
8142 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8143 * try block, the Exit () won't be executed, see:
8144 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8145 * To work around this, we extend such try blocks to include the last x bytes
8146 * of the Monitor.Enter () call.
8148 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8149 MonoBasicBlock *tbb;
8151 GET_BBLOCK (cfg, tbb, ip + 5);
8153 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8154 * from Monitor.Enter like ArgumentNullException.
8156 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8157 /* Mark this bblock as needing to be extended */
8158 tbb->extend_try_block = TRUE;
8162 /* Conversion to a JIT intrinsic */
8163 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8165 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8166 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8173 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8174 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8175 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8176 !g_list_find (dont_inline, cmethod)) {
8178 gboolean always = FALSE;
8180 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8181 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8182 /* Prevent inlining of methods that call wrappers */
8183 INLINE_FAILURE ("wrapper call");
8184 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8188 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8190 cfg->real_offset += 5;
8193 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8194 /* *sp is already set by inline_method */
8199 inline_costs += costs;
8205 /* Tail recursion elimination */
8206 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8207 gboolean has_vtargs = FALSE;
8210 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8211 INLINE_FAILURE ("tail call");
8213 /* keep it simple */
8214 for (i = fsig->param_count - 1; i >= 0; i--) {
8215 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8220 for (i = 0; i < n; ++i)
8221 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8222 MONO_INST_NEW (cfg, ins, OP_BR);
8223 MONO_ADD_INS (bblock, ins);
8224 tblock = start_bblock->out_bb [0];
8225 link_bblock (cfg, bblock, tblock);
8226 ins->inst_target_bb = tblock;
8227 start_new_bblock = 1;
8229 /* skip the CEE_RET, too */
8230 if (ip_in_bb (cfg, bblock, ip + 5))
8237 inline_costs += 10 * num_calls++;
8240 * Making generic calls out of gsharedvt methods.
8242 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8243 MonoRgctxInfoType info_type;
8246 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8247 //GSHAREDVT_FAILURE (*ip);
8248 // disable for possible remoting calls
8249 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8250 GSHAREDVT_FAILURE (*ip);
8251 if (fsig->generic_param_count) {
8252 /* virtual generic call */
8253 g_assert (mono_use_imt);
8254 g_assert (!imt_arg);
8255 /* Same as the virtual generic case above */
8256 imt_arg = emit_get_rgctx_method (cfg, context_used,
8257 cmethod, MONO_RGCTX_INFO_METHOD);
8258 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8263 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8264 /* test_0_multi_dim_arrays () in gshared.cs */
8265 GSHAREDVT_FAILURE (*ip);
8267 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8268 keep_this_alive = sp [0];
8270 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8271 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8273 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8274 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8276 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8278 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8280 * We pass the address to the gsharedvt trampoline in the rgctx reg
8282 MonoInst *callee = addr;
8284 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8286 GSHAREDVT_FAILURE (*ip);
8288 addr = emit_get_rgctx_sig (cfg, context_used,
8289 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8290 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8294 /* Generic sharing */
8295 /* FIXME: only do this for generic methods if
8296 they are not shared! */
8297 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8298 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8299 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8300 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8301 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8302 INLINE_FAILURE ("gshared");
8304 g_assert (cfg->generic_sharing_context && cmethod);
8308 * We are compiling a call to a
8309 * generic method from shared code,
8310 * which means that we have to look up
8311 * the method in the rgctx and do an
8315 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8317 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8318 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8322 /* Indirect calls */
8324 if (call_opcode == CEE_CALL)
8325 g_assert (context_used);
8326 else if (call_opcode == CEE_CALLI)
8327 g_assert (!vtable_arg);
8329 /* FIXME: what the hell is this??? */
8330 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8331 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8333 /* Prevent inlining of methods with indirect calls */
8334 INLINE_FAILURE ("indirect call");
8336 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8341 * Instead of emitting an indirect call, emit a direct call
8342 * with the contents of the aotconst as the patch info.
8344 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8345 info_type = addr->inst_c1;
8346 info_data = addr->inst_p0;
8348 info_type = addr->inst_right->inst_c1;
8349 info_data = addr->inst_right->inst_left;
8352 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8353 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8358 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8366 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8367 MonoInst *val = sp [fsig->param_count];
8369 if (val->type == STACK_OBJ) {
8370 MonoInst *iargs [2];
8375 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8378 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8379 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8380 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8381 emit_write_barrier (cfg, addr, val);
8382 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8383 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8385 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8386 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8387 if (!cmethod->klass->element_class->valuetype && !readonly)
8388 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8389 CHECK_TYPELOAD (cmethod->klass);
8392 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8395 g_assert_not_reached ();
8402 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8406 /* Tail prefix / tail call optimization */
8408 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8409 /* FIXME: runtime generic context pointer for jumps? */
8410 /* FIXME: handle this for generic sharing eventually */
8411 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8412 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8413 supported_tail_call = TRUE;
8415 if (supported_tail_call) {
8418 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8419 INLINE_FAILURE ("tail call");
8421 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8423 if (ARCH_HAVE_OP_TAIL_CALL) {
8424 /* Handle tail calls similarly to normal calls */
8427 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8428 call->tail_call = TRUE;
8429 call->method = cmethod;
8430 call->signature = mono_method_signature (cmethod);
8433 * We implement tail calls by storing the actual arguments into the
8434 * argument variables, then emitting a CEE_JMP.
8436 for (i = 0; i < n; ++i) {
8437 /* Prevent argument from being register allocated */
8438 arg_array [i]->flags |= MONO_INST_VOLATILE;
8439 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8441 ins = (MonoInst*)call;
8442 ins->inst_p0 = cmethod;
8443 ins->inst_p1 = arg_array [0];
8444 MONO_ADD_INS (bblock, ins);
8445 link_bblock (cfg, bblock, end_bblock);
8446 start_new_bblock = 1;
8448 // FIXME: Eliminate unreachable epilogs
8451 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8452 * only reachable from this call.
8454 GET_BBLOCK (cfg, tblock, ip + 5);
8455 if (tblock == bblock || tblock->in_count == 0)
8464 * Synchronized wrappers.
8465 * Its hard to determine where to replace a method with its synchronized
8466 * wrapper without causing an infinite recursion. The current solution is
8467 * to add the synchronized wrapper in the trampolines, and to
8468 * change the called method to a dummy wrapper, and resolve that wrapper
8469 * to the real method in mono_jit_compile_method ().
8471 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8472 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8473 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8474 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8478 INLINE_FAILURE ("call");
8479 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8480 imt_arg, vtable_arg);
8483 link_bblock (cfg, bblock, end_bblock);
8484 start_new_bblock = 1;
8486 // FIXME: Eliminate unreachable epilogs
8489 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8490 * only reachable from this call.
8492 GET_BBLOCK (cfg, tblock, ip + 5);
8493 if (tblock == bblock || tblock->in_count == 0)
8500 /* End of call, INS should contain the result of the call, if any */
8502 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8505 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8510 if (keep_this_alive) {
8511 MonoInst *dummy_use;
8513 /* See mono_emit_method_call_full () */
8514 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8517 CHECK_CFG_EXCEPTION;
8521 g_assert (*ip == CEE_RET);
8525 constrained_call = NULL;
8527 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8531 if (cfg->method != method) {
8532 /* return from inlined method */
8534 * If in_count == 0, that means the ret is unreachable due to
8535 * being preceeded by a throw. In that case, inline_method () will
8536 * handle setting the return value
8537 * (test case: test_0_inline_throw ()).
8539 if (return_var && cfg->cbb->in_count) {
8540 MonoType *ret_type = mono_method_signature (method)->ret;
8546 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8549 //g_assert (returnvar != -1);
8550 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8551 cfg->ret_var_set = TRUE;
8554 if (cfg->lmf_var && cfg->cbb->in_count)
8558 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8560 if (seq_points && !sym_seq_points) {
8562 * Place a seq point here too even through the IL stack is not
8563 * empty, so a step over on
8566 * will work correctly.
8568 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8569 MONO_ADD_INS (cfg->cbb, ins);
8572 g_assert (!return_var);
8576 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8579 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8582 if (!cfg->vret_addr) {
8585 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8587 EMIT_NEW_RETLOADA (cfg, ret_addr);
8589 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8590 ins->klass = mono_class_from_mono_type (ret_type);
8593 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8594 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8595 MonoInst *iargs [1];
8599 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8600 mono_arch_emit_setret (cfg, method, conv);
8602 mono_arch_emit_setret (cfg, method, *sp);
8605 mono_arch_emit_setret (cfg, method, *sp);
8610 if (sp != stack_start)
8612 MONO_INST_NEW (cfg, ins, OP_BR);
8614 ins->inst_target_bb = end_bblock;
8615 MONO_ADD_INS (bblock, ins);
8616 link_bblock (cfg, bblock, end_bblock);
8617 start_new_bblock = 1;
8621 MONO_INST_NEW (cfg, ins, OP_BR);
8623 target = ip + 1 + (signed char)(*ip);
8625 GET_BBLOCK (cfg, tblock, target);
8626 link_bblock (cfg, bblock, tblock);
8627 ins->inst_target_bb = tblock;
8628 if (sp != stack_start) {
8629 handle_stack_args (cfg, stack_start, sp - stack_start);
8631 CHECK_UNVERIFIABLE (cfg);
8633 MONO_ADD_INS (bblock, ins);
8634 start_new_bblock = 1;
8635 inline_costs += BRANCH_COST;
8649 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8651 target = ip + 1 + *(signed char*)ip;
8657 inline_costs += BRANCH_COST;
8661 MONO_INST_NEW (cfg, ins, OP_BR);
8664 target = ip + 4 + (gint32)read32(ip);
8666 GET_BBLOCK (cfg, tblock, target);
8667 link_bblock (cfg, bblock, tblock);
8668 ins->inst_target_bb = tblock;
8669 if (sp != stack_start) {
8670 handle_stack_args (cfg, stack_start, sp - stack_start);
8672 CHECK_UNVERIFIABLE (cfg);
8675 MONO_ADD_INS (bblock, ins);
8677 start_new_bblock = 1;
8678 inline_costs += BRANCH_COST;
8685 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8686 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8687 guint32 opsize = is_short ? 1 : 4;
8689 CHECK_OPSIZE (opsize);
8691 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8694 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8699 GET_BBLOCK (cfg, tblock, target);
8700 link_bblock (cfg, bblock, tblock);
8701 GET_BBLOCK (cfg, tblock, ip);
8702 link_bblock (cfg, bblock, tblock);
8704 if (sp != stack_start) {
8705 handle_stack_args (cfg, stack_start, sp - stack_start);
8706 CHECK_UNVERIFIABLE (cfg);
8709 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8710 cmp->sreg1 = sp [0]->dreg;
8711 type_from_op (cmp, sp [0], NULL);
8714 #if SIZEOF_REGISTER == 4
8715 if (cmp->opcode == OP_LCOMPARE_IMM) {
8716 /* Convert it to OP_LCOMPARE */
8717 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8718 ins->type = STACK_I8;
8719 ins->dreg = alloc_dreg (cfg, STACK_I8);
8721 MONO_ADD_INS (bblock, ins);
8722 cmp->opcode = OP_LCOMPARE;
8723 cmp->sreg2 = ins->dreg;
8726 MONO_ADD_INS (bblock, cmp);
8728 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8729 type_from_op (ins, sp [0], NULL);
8730 MONO_ADD_INS (bblock, ins);
8731 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8732 GET_BBLOCK (cfg, tblock, target);
8733 ins->inst_true_bb = tblock;
8734 GET_BBLOCK (cfg, tblock, ip);
8735 ins->inst_false_bb = tblock;
8736 start_new_bblock = 2;
8739 inline_costs += BRANCH_COST;
8754 MONO_INST_NEW (cfg, ins, *ip);
8756 target = ip + 4 + (gint32)read32(ip);
8762 inline_costs += BRANCH_COST;
8766 MonoBasicBlock **targets;
8767 MonoBasicBlock *default_bblock;
8768 MonoJumpInfoBBTable *table;
8769 int offset_reg = alloc_preg (cfg);
8770 int target_reg = alloc_preg (cfg);
8771 int table_reg = alloc_preg (cfg);
8772 int sum_reg = alloc_preg (cfg);
8773 gboolean use_op_switch;
8777 n = read32 (ip + 1);
8780 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8784 CHECK_OPSIZE (n * sizeof (guint32));
8785 target = ip + n * sizeof (guint32);
8787 GET_BBLOCK (cfg, default_bblock, target);
8788 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8790 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8791 for (i = 0; i < n; ++i) {
8792 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8793 targets [i] = tblock;
8794 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8798 if (sp != stack_start) {
8800 * Link the current bb with the targets as well, so handle_stack_args
8801 * will set their in_stack correctly.
8803 link_bblock (cfg, bblock, default_bblock);
8804 for (i = 0; i < n; ++i)
8805 link_bblock (cfg, bblock, targets [i]);
8807 handle_stack_args (cfg, stack_start, sp - stack_start);
8809 CHECK_UNVERIFIABLE (cfg);
8812 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8813 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8816 for (i = 0; i < n; ++i)
8817 link_bblock (cfg, bblock, targets [i]);
8819 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8820 table->table = targets;
8821 table->table_size = n;
8823 use_op_switch = FALSE;
8825 /* ARM implements SWITCH statements differently */
8826 /* FIXME: Make it use the generic implementation */
8827 if (!cfg->compile_aot)
8828 use_op_switch = TRUE;
8831 if (COMPILE_LLVM (cfg))
8832 use_op_switch = TRUE;
8834 cfg->cbb->has_jump_table = 1;
8836 if (use_op_switch) {
8837 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8838 ins->sreg1 = src1->dreg;
8839 ins->inst_p0 = table;
8840 ins->inst_many_bb = targets;
8841 ins->klass = GUINT_TO_POINTER (n);
8842 MONO_ADD_INS (cfg->cbb, ins);
8844 if (sizeof (gpointer) == 8)
8845 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8849 #if SIZEOF_REGISTER == 8
8850 /* The upper word might not be zero, and we add it to a 64 bit address later */
8851 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8854 if (cfg->compile_aot) {
8855 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8857 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8858 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8859 ins->inst_p0 = table;
8860 ins->dreg = table_reg;
8861 MONO_ADD_INS (cfg->cbb, ins);
8864 /* FIXME: Use load_memindex */
8865 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8866 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8867 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8869 start_new_bblock = 1;
8870 inline_costs += (BRANCH_COST * 2);
8890 dreg = alloc_freg (cfg);
8893 dreg = alloc_lreg (cfg);
8896 dreg = alloc_ireg_ref (cfg);
8899 dreg = alloc_preg (cfg);
8902 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8903 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8904 ins->flags |= ins_flag;
8906 MONO_ADD_INS (bblock, ins);
8908 if (ins->flags & MONO_INST_VOLATILE) {
8909 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8910 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8911 emit_memory_barrier (cfg, FullBarrier);
8926 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8927 ins->flags |= ins_flag;
8930 if (ins->flags & MONO_INST_VOLATILE) {
8931 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8932 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8933 emit_memory_barrier (cfg, FullBarrier);
8936 MONO_ADD_INS (bblock, ins);
8938 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8939 emit_write_barrier (cfg, sp [0], sp [1]);
8948 MONO_INST_NEW (cfg, ins, (*ip));
8950 ins->sreg1 = sp [0]->dreg;
8951 ins->sreg2 = sp [1]->dreg;
8952 type_from_op (ins, sp [0], sp [1]);
8954 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8956 /* Use the immediate opcodes if possible */
8957 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8958 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8959 if (imm_opcode != -1) {
8960 ins->opcode = imm_opcode;
8961 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8964 sp [1]->opcode = OP_NOP;
8968 MONO_ADD_INS ((cfg)->cbb, (ins));
8970 *sp++ = mono_decompose_opcode (cfg, ins);
8987 MONO_INST_NEW (cfg, ins, (*ip));
8989 ins->sreg1 = sp [0]->dreg;
8990 ins->sreg2 = sp [1]->dreg;
8991 type_from_op (ins, sp [0], sp [1]);
8993 ADD_WIDEN_OP (ins, sp [0], sp [1]);
8994 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8996 /* FIXME: Pass opcode to is_inst_imm */
8998 /* Use the immediate opcodes if possible */
8999 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9002 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9003 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9004 /* Keep emulated opcodes which are optimized away later */
9005 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9006 imm_opcode = mono_op_to_op_imm (ins->opcode);
9009 if (imm_opcode != -1) {
9010 ins->opcode = imm_opcode;
9011 if (sp [1]->opcode == OP_I8CONST) {
9012 #if SIZEOF_REGISTER == 8
9013 ins->inst_imm = sp [1]->inst_l;
9015 ins->inst_ls_word = sp [1]->inst_ls_word;
9016 ins->inst_ms_word = sp [1]->inst_ms_word;
9020 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9023 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9024 if (sp [1]->next == NULL)
9025 sp [1]->opcode = OP_NOP;
9028 MONO_ADD_INS ((cfg)->cbb, (ins));
9030 *sp++ = mono_decompose_opcode (cfg, ins);
9043 case CEE_CONV_OVF_I8:
9044 case CEE_CONV_OVF_U8:
9048 /* Special case this earlier so we have long constants in the IR */
9049 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9050 int data = sp [-1]->inst_c0;
9051 sp [-1]->opcode = OP_I8CONST;
9052 sp [-1]->type = STACK_I8;
9053 #if SIZEOF_REGISTER == 8
9054 if ((*ip) == CEE_CONV_U8)
9055 sp [-1]->inst_c0 = (guint32)data;
9057 sp [-1]->inst_c0 = data;
9059 sp [-1]->inst_ls_word = data;
9060 if ((*ip) == CEE_CONV_U8)
9061 sp [-1]->inst_ms_word = 0;
9063 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9065 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9072 case CEE_CONV_OVF_I4:
9073 case CEE_CONV_OVF_I1:
9074 case CEE_CONV_OVF_I2:
9075 case CEE_CONV_OVF_I:
9076 case CEE_CONV_OVF_U:
9079 if (sp [-1]->type == STACK_R8) {
9080 ADD_UNOP (CEE_CONV_OVF_I8);
9087 case CEE_CONV_OVF_U1:
9088 case CEE_CONV_OVF_U2:
9089 case CEE_CONV_OVF_U4:
9092 if (sp [-1]->type == STACK_R8) {
9093 ADD_UNOP (CEE_CONV_OVF_U8);
9100 case CEE_CONV_OVF_I1_UN:
9101 case CEE_CONV_OVF_I2_UN:
9102 case CEE_CONV_OVF_I4_UN:
9103 case CEE_CONV_OVF_I8_UN:
9104 case CEE_CONV_OVF_U1_UN:
9105 case CEE_CONV_OVF_U2_UN:
9106 case CEE_CONV_OVF_U4_UN:
9107 case CEE_CONV_OVF_U8_UN:
9108 case CEE_CONV_OVF_I_UN:
9109 case CEE_CONV_OVF_U_UN:
9116 CHECK_CFG_EXCEPTION;
9120 case CEE_ADD_OVF_UN:
9122 case CEE_MUL_OVF_UN:
9124 case CEE_SUB_OVF_UN:
9130 GSHAREDVT_FAILURE (*ip);
9133 token = read32 (ip + 1);
9134 klass = mini_get_class (method, token, generic_context);
9135 CHECK_TYPELOAD (klass);
9137 if (generic_class_is_reference_type (cfg, klass)) {
9138 MonoInst *store, *load;
9139 int dreg = alloc_ireg_ref (cfg);
9141 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9142 load->flags |= ins_flag;
9143 MONO_ADD_INS (cfg->cbb, load);
9145 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9146 store->flags |= ins_flag;
9147 MONO_ADD_INS (cfg->cbb, store);
9149 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9150 emit_write_barrier (cfg, sp [0], sp [1]);
9152 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9164 token = read32 (ip + 1);
9165 klass = mini_get_class (method, token, generic_context);
9166 CHECK_TYPELOAD (klass);
9168 /* Optimize the common ldobj+stloc combination */
9178 loc_index = ip [5] - CEE_STLOC_0;
9185 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9186 CHECK_LOCAL (loc_index);
9188 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9189 ins->dreg = cfg->locals [loc_index]->dreg;
9195 /* Optimize the ldobj+stobj combination */
9196 /* The reference case ends up being a load+store anyway */
9197 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9202 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9209 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9218 CHECK_STACK_OVF (1);
9220 n = read32 (ip + 1);
9222 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9223 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9224 ins->type = STACK_OBJ;
9227 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9228 MonoInst *iargs [1];
9230 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9231 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9233 if (cfg->opt & MONO_OPT_SHARED) {
9234 MonoInst *iargs [3];
9236 if (cfg->compile_aot) {
9237 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9239 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9240 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9241 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9242 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9243 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9245 if (bblock->out_of_line) {
9246 MonoInst *iargs [2];
9248 if (image == mono_defaults.corlib) {
9250 * Avoid relocations in AOT and save some space by using a
9251 * version of helper_ldstr specialized to mscorlib.
9253 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9254 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9256 /* Avoid creating the string object */
9257 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9258 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9259 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9263 if (cfg->compile_aot) {
9264 NEW_LDSTRCONST (cfg, ins, image, n);
9266 MONO_ADD_INS (bblock, ins);
9269 NEW_PCONST (cfg, ins, NULL);
9270 ins->type = STACK_OBJ;
9271 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9273 OUT_OF_MEMORY_FAILURE;
9276 MONO_ADD_INS (bblock, ins);
9285 MonoInst *iargs [2];
9286 MonoMethodSignature *fsig;
9289 MonoInst *vtable_arg = NULL;
9292 token = read32 (ip + 1);
9293 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9294 if (!cmethod || mono_loader_get_last_error ())
9296 fsig = mono_method_get_signature (cmethod, image, token);
9300 mono_save_token_info (cfg, image, token, cmethod);
9302 if (!mono_class_init (cmethod->klass))
9303 TYPE_LOAD_ERROR (cmethod->klass);
9305 context_used = mini_method_check_context_used (cfg, cmethod);
9307 if (mono_security_cas_enabled ()) {
9308 if (check_linkdemand (cfg, method, cmethod))
9309 INLINE_FAILURE ("linkdemand");
9310 CHECK_CFG_EXCEPTION;
9311 } else if (mono_security_core_clr_enabled ()) {
9312 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9315 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9316 emit_generic_class_init (cfg, cmethod->klass);
9317 CHECK_TYPELOAD (cmethod->klass);
9321 if (cfg->gsharedvt) {
9322 if (mini_is_gsharedvt_variable_signature (sig))
9323 GSHAREDVT_FAILURE (*ip);
9327 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9328 mono_method_is_generic_sharable (cmethod, TRUE)) {
9329 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9330 mono_class_vtable (cfg->domain, cmethod->klass);
9331 CHECK_TYPELOAD (cmethod->klass);
9333 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9334 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9337 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9338 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9340 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9342 CHECK_TYPELOAD (cmethod->klass);
9343 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9348 n = fsig->param_count;
9352 * Generate smaller code for the common newobj <exception> instruction in
9353 * argument checking code.
9355 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9356 is_exception_class (cmethod->klass) && n <= 2 &&
9357 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9358 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9359 MonoInst *iargs [3];
9361 g_assert (!vtable_arg);
9365 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9368 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9372 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9377 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9380 g_assert_not_reached ();
9388 /* move the args to allow room for 'this' in the first position */
9394 /* check_call_signature () requires sp[0] to be set */
9395 this_ins.type = STACK_OBJ;
9397 if (check_call_signature (cfg, fsig, sp))
9402 if (mini_class_is_system_array (cmethod->klass)) {
9403 g_assert (!vtable_arg);
9405 *sp = emit_get_rgctx_method (cfg, context_used,
9406 cmethod, MONO_RGCTX_INFO_METHOD);
9408 /* Avoid varargs in the common case */
9409 if (fsig->param_count == 1)
9410 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9411 else if (fsig->param_count == 2)
9412 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9413 else if (fsig->param_count == 3)
9414 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9415 else if (fsig->param_count == 4)
9416 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9418 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9419 } else if (cmethod->string_ctor) {
9420 g_assert (!context_used);
9421 g_assert (!vtable_arg);
9422 /* we simply pass a null pointer */
9423 EMIT_NEW_PCONST (cfg, *sp, NULL);
9424 /* now call the string ctor */
9425 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9427 MonoInst* callvirt_this_arg = NULL;
9429 if (cmethod->klass->valuetype) {
9430 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9431 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9432 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9437 * The code generated by mini_emit_virtual_call () expects
9438 * iargs [0] to be a boxed instance, but luckily the vcall
9439 * will be transformed into a normal call there.
9441 } else if (context_used) {
9442 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9445 MonoVTable *vtable = NULL;
9447 if (!cfg->compile_aot)
9448 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9449 CHECK_TYPELOAD (cmethod->klass);
9452 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9453 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9454 * As a workaround, we call class cctors before allocating objects.
9456 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9457 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9458 if (cfg->verbose_level > 2)
9459 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9460 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9463 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9466 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9469 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9471 /* Now call the actual ctor */
9472 /* Avoid virtual calls to ctors if possible */
9473 if (mono_class_is_marshalbyref (cmethod->klass))
9474 callvirt_this_arg = sp [0];
9477 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9478 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9479 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9484 CHECK_CFG_EXCEPTION;
9485 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9486 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9487 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9488 !g_list_find (dont_inline, cmethod)) {
9491 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9492 cfg->real_offset += 5;
9495 inline_costs += costs - 5;
9497 INLINE_FAILURE ("inline failure");
9498 // FIXME-VT: Clean this up
9499 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9500 GSHAREDVT_FAILURE(*ip);
9501 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9503 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9506 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9507 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9508 } else if (context_used &&
9509 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9510 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9511 MonoInst *cmethod_addr;
9513 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9514 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9516 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9518 INLINE_FAILURE ("ctor call");
9519 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9520 callvirt_this_arg, NULL, vtable_arg);
9524 if (alloc == NULL) {
9526 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9527 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9541 token = read32 (ip + 1);
9542 klass = mini_get_class (method, token, generic_context);
9543 CHECK_TYPELOAD (klass);
9544 if (sp [0]->type != STACK_OBJ)
9547 context_used = mini_class_check_context_used (cfg, klass);
9549 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9550 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9557 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9560 if (cfg->compile_aot)
9561 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9563 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9565 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9567 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9568 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9569 reset_cast_details (cfg);
9572 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9573 MonoMethod *mono_castclass;
9574 MonoInst *iargs [1];
9577 mono_castclass = mono_marshal_get_castclass (klass);
9580 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9581 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9582 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9583 reset_cast_details (cfg);
9584 CHECK_CFG_EXCEPTION;
9585 g_assert (costs > 0);
9588 cfg->real_offset += 5;
9593 inline_costs += costs;
9596 ins = handle_castclass (cfg, klass, *sp, context_used);
9597 CHECK_CFG_EXCEPTION;
9607 token = read32 (ip + 1);
9608 klass = mini_get_class (method, token, generic_context);
9609 CHECK_TYPELOAD (klass);
9610 if (sp [0]->type != STACK_OBJ)
9613 context_used = mini_class_check_context_used (cfg, klass);
9615 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9616 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9623 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9626 if (cfg->compile_aot)
9627 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9629 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9631 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9634 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9635 MonoMethod *mono_isinst;
9636 MonoInst *iargs [1];
9639 mono_isinst = mono_marshal_get_isinst (klass);
9642 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9643 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9644 CHECK_CFG_EXCEPTION;
9645 g_assert (costs > 0);
9648 cfg->real_offset += 5;
9653 inline_costs += costs;
9656 ins = handle_isinst (cfg, klass, *sp, context_used);
9657 CHECK_CFG_EXCEPTION;
9664 case CEE_UNBOX_ANY: {
9668 token = read32 (ip + 1);
9669 klass = mini_get_class (method, token, generic_context);
9670 CHECK_TYPELOAD (klass);
9672 mono_save_token_info (cfg, image, token, klass);
9674 context_used = mini_class_check_context_used (cfg, klass);
9676 if (mini_is_gsharedvt_klass (cfg, klass)) {
9677 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9685 if (generic_class_is_reference_type (cfg, klass)) {
9686 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9687 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9688 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
9695 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9698 /*FIXME AOT support*/
9699 if (cfg->compile_aot)
9700 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9702 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9704 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9705 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
9708 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9709 MonoMethod *mono_castclass;
9710 MonoInst *iargs [1];
9713 mono_castclass = mono_marshal_get_castclass (klass);
9716 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9717 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9718 CHECK_CFG_EXCEPTION;
9719 g_assert (costs > 0);
9722 cfg->real_offset += 5;
9726 inline_costs += costs;
9728 ins = handle_castclass (cfg, klass, *sp, context_used);
9729 CHECK_CFG_EXCEPTION;
9737 if (mono_class_is_nullable (klass)) {
9738 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9745 ins = handle_unbox (cfg, klass, sp, context_used);
9751 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9764 token = read32 (ip + 1);
9765 klass = mini_get_class (method, token, generic_context);
9766 CHECK_TYPELOAD (klass);
9768 mono_save_token_info (cfg, image, token, klass);
9770 context_used = mini_class_check_context_used (cfg, klass);
9772 if (generic_class_is_reference_type (cfg, klass)) {
9778 if (klass == mono_defaults.void_class)
9780 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9782 /* frequent check in generic code: box (struct), brtrue */
9784 // FIXME: LLVM can't handle the inconsistent bb linking
9785 if (!mono_class_is_nullable (klass) &&
9786 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9787 (ip [5] == CEE_BRTRUE ||
9788 ip [5] == CEE_BRTRUE_S ||
9789 ip [5] == CEE_BRFALSE ||
9790 ip [5] == CEE_BRFALSE_S)) {
9791 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9793 MonoBasicBlock *true_bb, *false_bb;
9797 if (cfg->verbose_level > 3) {
9798 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9799 printf ("<box+brtrue opt>\n");
9807 target = ip + 1 + (signed char)(*ip);
9814 target = ip + 4 + (gint)(read32 (ip));
9818 g_assert_not_reached ();
9822 * We need to link both bblocks, since it is needed for handling stack
9823 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9824 * Branching to only one of them would lead to inconsistencies, so
9825 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9827 GET_BBLOCK (cfg, true_bb, target);
9828 GET_BBLOCK (cfg, false_bb, ip);
9830 mono_link_bblock (cfg, cfg->cbb, true_bb);
9831 mono_link_bblock (cfg, cfg->cbb, false_bb);
9833 if (sp != stack_start) {
9834 handle_stack_args (cfg, stack_start, sp - stack_start);
9836 CHECK_UNVERIFIABLE (cfg);
9839 if (COMPILE_LLVM (cfg)) {
9840 dreg = alloc_ireg (cfg);
9841 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9842 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9844 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9846 /* The JIT can't eliminate the iconst+compare */
9847 MONO_INST_NEW (cfg, ins, OP_BR);
9848 ins->inst_target_bb = is_true ? true_bb : false_bb;
9849 MONO_ADD_INS (cfg->cbb, ins);
9852 start_new_bblock = 1;
9856 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9858 CHECK_CFG_EXCEPTION;
9867 token = read32 (ip + 1);
9868 klass = mini_get_class (method, token, generic_context);
9869 CHECK_TYPELOAD (klass);
9871 mono_save_token_info (cfg, image, token, klass);
9873 context_used = mini_class_check_context_used (cfg, klass);
9875 if (mono_class_is_nullable (klass)) {
9878 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9879 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9883 ins = handle_unbox (cfg, klass, sp, context_used);
9896 MonoClassField *field;
9897 #ifndef DISABLE_REMOTING
9901 gboolean is_instance;
9903 gpointer addr = NULL;
9904 gboolean is_special_static;
9906 MonoInst *store_val = NULL;
9907 MonoInst *thread_ins;
9910 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9912 if (op == CEE_STFLD) {
9920 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9922 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9925 if (op == CEE_STSFLD) {
9933 token = read32 (ip + 1);
9934 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9935 field = mono_method_get_wrapper_data (method, token);
9936 klass = field->parent;
9939 field = mono_field_from_token (image, token, &klass, generic_context);
9943 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9944 FIELD_ACCESS_FAILURE;
9945 mono_class_init (klass);
9947 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9950 /* if the class is Critical then transparent code cannot access it's fields */
9951 if (!is_instance && mono_security_core_clr_enabled ())
9952 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9954 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9955 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9956 if (mono_security_core_clr_enabled ())
9957 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9961 * LDFLD etc. is usable on static fields as well, so convert those cases to
9964 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9976 g_assert_not_reached ();
9978 is_instance = FALSE;
9981 context_used = mini_class_check_context_used (cfg, klass);
9985 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9986 if (op == CEE_STFLD) {
9987 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9989 #ifndef DISABLE_REMOTING
9990 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
9991 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
9992 MonoInst *iargs [5];
9994 GSHAREDVT_FAILURE (op);
9997 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9998 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
9999 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10001 iargs [4] = sp [1];
10003 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10004 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10005 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10006 CHECK_CFG_EXCEPTION;
10007 g_assert (costs > 0);
10009 cfg->real_offset += 5;
10012 inline_costs += costs;
10014 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10021 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10023 if (mini_is_gsharedvt_klass (cfg, klass)) {
10024 MonoInst *offset_ins;
10026 context_used = mini_class_check_context_used (cfg, klass);
10028 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10029 dreg = alloc_ireg_mp (cfg);
10030 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10031 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10032 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10034 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10036 if (sp [0]->opcode != OP_LDADDR)
10037 store->flags |= MONO_INST_FAULT;
10039 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10040 /* insert call to write barrier */
10044 dreg = alloc_ireg_mp (cfg);
10045 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10046 emit_write_barrier (cfg, ptr, sp [1]);
10049 store->flags |= ins_flag;
10056 #ifndef DISABLE_REMOTING
10057 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10058 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10059 MonoInst *iargs [4];
10061 GSHAREDVT_FAILURE (op);
10063 iargs [0] = sp [0];
10064 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10065 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10066 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10067 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10068 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10069 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10070 CHECK_CFG_EXCEPTION;
10072 g_assert (costs > 0);
10074 cfg->real_offset += 5;
10078 inline_costs += costs;
10080 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10086 if (sp [0]->type == STACK_VTYPE) {
10089 /* Have to compute the address of the variable */
10091 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10093 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10095 g_assert (var->klass == klass);
10097 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10101 if (op == CEE_LDFLDA) {
10102 if (is_magic_tls_access (field)) {
10103 GSHAREDVT_FAILURE (*ip);
10105 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10107 if (sp [0]->type == STACK_OBJ) {
10108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10109 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10112 dreg = alloc_ireg_mp (cfg);
10114 if (mini_is_gsharedvt_klass (cfg, klass)) {
10115 MonoInst *offset_ins;
10117 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10118 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10120 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10122 ins->klass = mono_class_from_mono_type (field->type);
10123 ins->type = STACK_MP;
10129 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10131 if (mini_is_gsharedvt_klass (cfg, klass)) {
10132 MonoInst *offset_ins;
10134 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10135 dreg = alloc_ireg_mp (cfg);
10136 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10137 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10139 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10141 load->flags |= ins_flag;
10142 if (sp [0]->opcode != OP_LDADDR)
10143 load->flags |= MONO_INST_FAULT;
10157 * We can only support shared generic static
10158 * field access on architectures where the
10159 * trampoline code has been extended to handle
10160 * the generic class init.
10162 #ifndef MONO_ARCH_VTABLE_REG
10163 GENERIC_SHARING_FAILURE (op);
10166 context_used = mini_class_check_context_used (cfg, klass);
10168 ftype = mono_field_get_type (field);
10170 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10173 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10174 * to be called here.
10176 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10177 mono_class_vtable (cfg->domain, klass);
10178 CHECK_TYPELOAD (klass);
10180 mono_domain_lock (cfg->domain);
10181 if (cfg->domain->special_static_fields)
10182 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10183 mono_domain_unlock (cfg->domain);
10185 is_special_static = mono_class_field_is_special_static (field);
10187 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10188 thread_ins = mono_get_thread_intrinsic (cfg);
10192 /* Generate IR to compute the field address */
10193 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10195 * Fast access to TLS data
10196 * Inline version of get_thread_static_data () in
10200 int idx, static_data_reg, array_reg, dreg;
10202 GSHAREDVT_FAILURE (op);
10204 // offset &= 0x7fffffff;
10205 // idx = (offset >> 24) - 1;
10206 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10207 MONO_ADD_INS (cfg->cbb, thread_ins);
10208 static_data_reg = alloc_ireg (cfg);
10209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10211 if (cfg->compile_aot) {
10212 int offset_reg, offset2_reg, idx_reg;
10214 /* For TLS variables, this will return the TLS offset */
10215 EMIT_NEW_SFLDACONST (cfg, ins, field);
10216 offset_reg = ins->dreg;
10217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10218 idx_reg = alloc_ireg (cfg);
10219 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10220 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10222 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10223 array_reg = alloc_ireg (cfg);
10224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10225 offset2_reg = alloc_ireg (cfg);
10226 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10227 dreg = alloc_ireg (cfg);
10228 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10230 offset = (gsize)addr & 0x7fffffff;
10231 idx = (offset >> 24) - 1;
10233 array_reg = alloc_ireg (cfg);
10234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10235 dreg = alloc_ireg (cfg);
10236 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10238 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10239 (cfg->compile_aot && is_special_static) ||
10240 (context_used && is_special_static)) {
10241 MonoInst *iargs [2];
10243 g_assert (field->parent);
10244 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10245 if (context_used) {
10246 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10247 field, MONO_RGCTX_INFO_CLASS_FIELD);
10249 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10251 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10252 } else if (context_used) {
10253 MonoInst *static_data;
10256 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10257 method->klass->name_space, method->klass->name, method->name,
10258 depth, field->offset);
10261 if (mono_class_needs_cctor_run (klass, method))
10262 emit_generic_class_init (cfg, klass);
10265 * The pointer we're computing here is
10267 * super_info.static_data + field->offset
10269 static_data = emit_get_rgctx_klass (cfg, context_used,
10270 klass, MONO_RGCTX_INFO_STATIC_DATA);
10272 if (mini_is_gsharedvt_klass (cfg, klass)) {
10273 MonoInst *offset_ins;
10275 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10276 dreg = alloc_ireg_mp (cfg);
10277 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10278 } else if (field->offset == 0) {
10281 int addr_reg = mono_alloc_preg (cfg);
10282 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10284 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10285 MonoInst *iargs [2];
10287 g_assert (field->parent);
10288 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10289 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10290 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10292 MonoVTable *vtable = NULL;
10294 if (!cfg->compile_aot)
10295 vtable = mono_class_vtable (cfg->domain, klass);
10296 CHECK_TYPELOAD (klass);
10299 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10300 if (!(g_slist_find (class_inits, klass))) {
10301 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10302 if (cfg->verbose_level > 2)
10303 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10304 class_inits = g_slist_prepend (class_inits, klass);
10307 if (cfg->run_cctors) {
10309 /* This makes so that inline cannot trigger */
10310 /* .cctors: too many apps depend on them */
10311 /* running with a specific order... */
10313 if (! vtable->initialized)
10314 INLINE_FAILURE ("class init");
10315 ex = mono_runtime_class_init_full (vtable, FALSE);
10317 set_exception_object (cfg, ex);
10318 goto exception_exit;
10322 if (cfg->compile_aot)
10323 EMIT_NEW_SFLDACONST (cfg, ins, field);
10326 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10328 EMIT_NEW_PCONST (cfg, ins, addr);
10331 MonoInst *iargs [1];
10332 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10333 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10337 /* Generate IR to do the actual load/store operation */
10339 if (op == CEE_LDSFLDA) {
10340 ins->klass = mono_class_from_mono_type (ftype);
10341 ins->type = STACK_PTR;
10343 } else if (op == CEE_STSFLD) {
10346 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10347 store->flags |= ins_flag;
10349 gboolean is_const = FALSE;
10350 MonoVTable *vtable = NULL;
10351 gpointer addr = NULL;
10353 if (!context_used) {
10354 vtable = mono_class_vtable (cfg->domain, klass);
10355 CHECK_TYPELOAD (klass);
10357 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10358 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10359 int ro_type = ftype->type;
10361 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10362 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10363 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10366 GSHAREDVT_FAILURE (op);
10368 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10371 case MONO_TYPE_BOOLEAN:
10373 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10377 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10380 case MONO_TYPE_CHAR:
10382 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10386 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10391 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10395 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10400 case MONO_TYPE_PTR:
10401 case MONO_TYPE_FNPTR:
10402 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10403 type_to_eval_stack_type ((cfg), field->type, *sp);
10406 case MONO_TYPE_STRING:
10407 case MONO_TYPE_OBJECT:
10408 case MONO_TYPE_CLASS:
10409 case MONO_TYPE_SZARRAY:
10410 case MONO_TYPE_ARRAY:
10411 if (!mono_gc_is_moving ()) {
10412 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10413 type_to_eval_stack_type ((cfg), field->type, *sp);
10421 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10426 case MONO_TYPE_VALUETYPE:
10436 CHECK_STACK_OVF (1);
10438 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10439 load->flags |= ins_flag;
10452 token = read32 (ip + 1);
10453 klass = mini_get_class (method, token, generic_context);
10454 CHECK_TYPELOAD (klass);
10455 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10456 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10457 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10458 generic_class_is_reference_type (cfg, klass)) {
10459 /* insert call to write barrier */
10460 emit_write_barrier (cfg, sp [0], sp [1]);
10472 const char *data_ptr;
10474 guint32 field_token;
10480 token = read32 (ip + 1);
10482 klass = mini_get_class (method, token, generic_context);
10483 CHECK_TYPELOAD (klass);
10485 context_used = mini_class_check_context_used (cfg, klass);
10487 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10488 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10489 ins->sreg1 = sp [0]->dreg;
10490 ins->type = STACK_I4;
10491 ins->dreg = alloc_ireg (cfg);
10492 MONO_ADD_INS (cfg->cbb, ins);
10493 *sp = mono_decompose_opcode (cfg, ins);
10496 if (context_used) {
10497 MonoInst *args [3];
10498 MonoClass *array_class = mono_array_class_get (klass, 1);
10499 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10501 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10504 args [0] = emit_get_rgctx_klass (cfg, context_used,
10505 array_class, MONO_RGCTX_INFO_VTABLE);
10510 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10512 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10514 if (cfg->opt & MONO_OPT_SHARED) {
10515 /* Decompose now to avoid problems with references to the domainvar */
10516 MonoInst *iargs [3];
10518 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10519 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10520 iargs [2] = sp [0];
10522 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10524 /* Decompose later since it is needed by abcrem */
10525 MonoClass *array_type = mono_array_class_get (klass, 1);
10526 mono_class_vtable (cfg->domain, array_type);
10527 CHECK_TYPELOAD (array_type);
10529 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10530 ins->dreg = alloc_ireg_ref (cfg);
10531 ins->sreg1 = sp [0]->dreg;
10532 ins->inst_newa_class = klass;
10533 ins->type = STACK_OBJ;
10534 ins->klass = array_type;
10535 MONO_ADD_INS (cfg->cbb, ins);
10536 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10537 cfg->cbb->has_array_access = TRUE;
10539 /* Needed so mono_emit_load_get_addr () gets called */
10540 mono_get_got_var (cfg);
10550 * we inline/optimize the initialization sequence if possible.
10551 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10552 * for small sizes open code the memcpy
10553 * ensure the rva field is big enough
10555 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10556 MonoMethod *memcpy_method = get_memcpy_method ();
10557 MonoInst *iargs [3];
10558 int add_reg = alloc_ireg_mp (cfg);
10560 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10561 if (cfg->compile_aot) {
10562 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10564 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10566 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10567 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10576 if (sp [0]->type != STACK_OBJ)
10579 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10580 ins->dreg = alloc_preg (cfg);
10581 ins->sreg1 = sp [0]->dreg;
10582 ins->type = STACK_I4;
10583 /* This flag will be inherited by the decomposition */
10584 ins->flags |= MONO_INST_FAULT;
10585 MONO_ADD_INS (cfg->cbb, ins);
10586 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10587 cfg->cbb->has_array_access = TRUE;
10595 if (sp [0]->type != STACK_OBJ)
10598 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10600 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10601 CHECK_TYPELOAD (klass);
10602 /* we need to make sure that this array is exactly the type it needs
10603 * to be for correctness. the wrappers are lax with their usage
10604 * so we need to ignore them here
10606 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10607 MonoClass *array_class = mono_array_class_get (klass, 1);
10608 mini_emit_check_array_type (cfg, sp [0], array_class);
10609 CHECK_TYPELOAD (array_class);
10613 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10618 case CEE_LDELEM_I1:
10619 case CEE_LDELEM_U1:
10620 case CEE_LDELEM_I2:
10621 case CEE_LDELEM_U2:
10622 case CEE_LDELEM_I4:
10623 case CEE_LDELEM_U4:
10624 case CEE_LDELEM_I8:
10626 case CEE_LDELEM_R4:
10627 case CEE_LDELEM_R8:
10628 case CEE_LDELEM_REF: {
10634 if (*ip == CEE_LDELEM) {
10636 token = read32 (ip + 1);
10637 klass = mini_get_class (method, token, generic_context);
10638 CHECK_TYPELOAD (klass);
10639 mono_class_init (klass);
10642 klass = array_access_to_klass (*ip);
10644 if (sp [0]->type != STACK_OBJ)
10647 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10649 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10650 // FIXME-VT: OP_ICONST optimization
10651 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10652 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10653 ins->opcode = OP_LOADV_MEMBASE;
10654 } else if (sp [1]->opcode == OP_ICONST) {
10655 int array_reg = sp [0]->dreg;
10656 int index_reg = sp [1]->dreg;
10657 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10659 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10660 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10662 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10663 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10666 if (*ip == CEE_LDELEM)
10673 case CEE_STELEM_I1:
10674 case CEE_STELEM_I2:
10675 case CEE_STELEM_I4:
10676 case CEE_STELEM_I8:
10677 case CEE_STELEM_R4:
10678 case CEE_STELEM_R8:
10679 case CEE_STELEM_REF:
10684 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10686 if (*ip == CEE_STELEM) {
10688 token = read32 (ip + 1);
10689 klass = mini_get_class (method, token, generic_context);
10690 CHECK_TYPELOAD (klass);
10691 mono_class_init (klass);
10694 klass = array_access_to_klass (*ip);
10696 if (sp [0]->type != STACK_OBJ)
10699 emit_array_store (cfg, klass, sp, TRUE);
10701 if (*ip == CEE_STELEM)
10708 case CEE_CKFINITE: {
10712 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10713 ins->sreg1 = sp [0]->dreg;
10714 ins->dreg = alloc_freg (cfg);
10715 ins->type = STACK_R8;
10716 MONO_ADD_INS (bblock, ins);
10718 *sp++ = mono_decompose_opcode (cfg, ins);
10723 case CEE_REFANYVAL: {
10724 MonoInst *src_var, *src;
10726 int klass_reg = alloc_preg (cfg);
10727 int dreg = alloc_preg (cfg);
10729 GSHAREDVT_FAILURE (*ip);
10732 MONO_INST_NEW (cfg, ins, *ip);
10735 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10736 CHECK_TYPELOAD (klass);
10737 mono_class_init (klass);
10739 context_used = mini_class_check_context_used (cfg, klass);
10742 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10744 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10745 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10748 if (context_used) {
10749 MonoInst *klass_ins;
10751 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10752 klass, MONO_RGCTX_INFO_KLASS);
10755 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10756 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10758 mini_emit_class_check (cfg, klass_reg, klass);
10760 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10761 ins->type = STACK_MP;
10766 case CEE_MKREFANY: {
10767 MonoInst *loc, *addr;
10769 GSHAREDVT_FAILURE (*ip);
10772 MONO_INST_NEW (cfg, ins, *ip);
10775 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10776 CHECK_TYPELOAD (klass);
10777 mono_class_init (klass);
10779 context_used = mini_class_check_context_used (cfg, klass);
10781 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10782 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10784 if (context_used) {
10785 MonoInst *const_ins;
10786 int type_reg = alloc_preg (cfg);
10788 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10790 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10791 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10792 } else if (cfg->compile_aot) {
10793 int const_reg = alloc_preg (cfg);
10794 int type_reg = alloc_preg (cfg);
10796 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10797 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10801 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10802 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10804 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10806 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10807 ins->type = STACK_VTYPE;
10808 ins->klass = mono_defaults.typed_reference_class;
10813 case CEE_LDTOKEN: {
10815 MonoClass *handle_class;
10817 CHECK_STACK_OVF (1);
10820 n = read32 (ip + 1);
10822 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10823 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10824 handle = mono_method_get_wrapper_data (method, n);
10825 handle_class = mono_method_get_wrapper_data (method, n + 1);
10826 if (handle_class == mono_defaults.typehandle_class)
10827 handle = &((MonoClass*)handle)->byval_arg;
10830 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10834 mono_class_init (handle_class);
10835 if (cfg->generic_sharing_context) {
10836 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10837 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10838 /* This case handles ldtoken
10839 of an open type, like for
10842 } else if (handle_class == mono_defaults.typehandle_class) {
10843 /* If we get a MONO_TYPE_CLASS
10844 then we need to provide the
10846 instantiation of it. */
10847 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10850 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10851 } else if (handle_class == mono_defaults.fieldhandle_class)
10852 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10853 else if (handle_class == mono_defaults.methodhandle_class)
10854 context_used = mini_method_check_context_used (cfg, handle);
10856 g_assert_not_reached ();
10859 if ((cfg->opt & MONO_OPT_SHARED) &&
10860 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10861 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10862 MonoInst *addr, *vtvar, *iargs [3];
10863 int method_context_used;
10865 method_context_used = mini_method_check_context_used (cfg, method);
10867 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10869 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10870 EMIT_NEW_ICONST (cfg, iargs [1], n);
10871 if (method_context_used) {
10872 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10873 method, MONO_RGCTX_INFO_METHOD);
10874 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10876 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10877 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10879 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10881 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10883 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10885 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10886 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10887 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10888 (cmethod->klass == mono_defaults.systemtype_class) &&
10889 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10890 MonoClass *tclass = mono_class_from_mono_type (handle);
10892 mono_class_init (tclass);
10893 if (context_used) {
10894 ins = emit_get_rgctx_klass (cfg, context_used,
10895 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10896 } else if (cfg->compile_aot) {
10897 if (method->wrapper_type) {
10898 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10899 /* Special case for static synchronized wrappers */
10900 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10902 /* FIXME: n is not a normal token */
10904 EMIT_NEW_PCONST (cfg, ins, NULL);
10907 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10910 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10912 ins->type = STACK_OBJ;
10913 ins->klass = cmethod->klass;
10916 MonoInst *addr, *vtvar;
10918 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10920 if (context_used) {
10921 if (handle_class == mono_defaults.typehandle_class) {
10922 ins = emit_get_rgctx_klass (cfg, context_used,
10923 mono_class_from_mono_type (handle),
10924 MONO_RGCTX_INFO_TYPE);
10925 } else if (handle_class == mono_defaults.methodhandle_class) {
10926 ins = emit_get_rgctx_method (cfg, context_used,
10927 handle, MONO_RGCTX_INFO_METHOD);
10928 } else if (handle_class == mono_defaults.fieldhandle_class) {
10929 ins = emit_get_rgctx_field (cfg, context_used,
10930 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10932 g_assert_not_reached ();
10934 } else if (cfg->compile_aot) {
10935 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10937 EMIT_NEW_PCONST (cfg, ins, handle);
10939 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10940 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10941 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10951 MONO_INST_NEW (cfg, ins, OP_THROW);
10953 ins->sreg1 = sp [0]->dreg;
10955 bblock->out_of_line = TRUE;
10956 MONO_ADD_INS (bblock, ins);
10957 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10958 MONO_ADD_INS (bblock, ins);
10961 link_bblock (cfg, bblock, end_bblock);
10962 start_new_bblock = 1;
10964 case CEE_ENDFINALLY:
10965 /* mono_save_seq_point_info () depends on this */
10966 if (sp != stack_start)
10967 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10968 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10969 MONO_ADD_INS (bblock, ins);
10971 start_new_bblock = 1;
10974 * Control will leave the method so empty the stack, otherwise
10975 * the next basic block will start with a nonempty stack.
10977 while (sp != stack_start) {
10982 case CEE_LEAVE_S: {
10985 if (*ip == CEE_LEAVE) {
10987 target = ip + 5 + (gint32)read32(ip + 1);
10990 target = ip + 2 + (signed char)(ip [1]);
10993 /* empty the stack */
10994 while (sp != stack_start) {
10999 * If this leave statement is in a catch block, check for a
11000 * pending exception, and rethrow it if necessary.
11001 * We avoid doing this in runtime invoke wrappers, since those are called
11002 * by native code which excepts the wrapper to catch all exceptions.
11004 for (i = 0; i < header->num_clauses; ++i) {
11005 MonoExceptionClause *clause = &header->clauses [i];
11008 * Use <= in the final comparison to handle clauses with multiple
11009 * leave statements, like in bug #78024.
11010 * The ordering of the exception clauses guarantees that we find the
11011 * innermost clause.
11013 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11015 MonoBasicBlock *dont_throw;
11020 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11023 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11025 NEW_BBLOCK (cfg, dont_throw);
11028 * Currently, we always rethrow the abort exception, despite the
11029 * fact that this is not correct. See thread6.cs for an example.
11030 * But propagating the abort exception is more important than
11031 * getting the sematics right.
11033 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11035 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11037 MONO_START_BB (cfg, dont_throw);
11042 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11044 MonoExceptionClause *clause;
11046 for (tmp = handlers; tmp; tmp = tmp->next) {
11047 clause = tmp->data;
11048 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11050 link_bblock (cfg, bblock, tblock);
11051 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11052 ins->inst_target_bb = tblock;
11053 ins->inst_eh_block = clause;
11054 MONO_ADD_INS (bblock, ins);
11055 bblock->has_call_handler = 1;
11056 if (COMPILE_LLVM (cfg)) {
11057 MonoBasicBlock *target_bb;
11060 * Link the finally bblock with the target, since it will
11061 * conceptually branch there.
11062 * FIXME: Have to link the bblock containing the endfinally.
11064 GET_BBLOCK (cfg, target_bb, target);
11065 link_bblock (cfg, tblock, target_bb);
11068 g_list_free (handlers);
11071 MONO_INST_NEW (cfg, ins, OP_BR);
11072 MONO_ADD_INS (bblock, ins);
11073 GET_BBLOCK (cfg, tblock, target);
11074 link_bblock (cfg, bblock, tblock);
11075 ins->inst_target_bb = tblock;
11076 start_new_bblock = 1;
11078 if (*ip == CEE_LEAVE)
11087 * Mono specific opcodes
11089 case MONO_CUSTOM_PREFIX: {
11091 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11095 case CEE_MONO_ICALL: {
11097 MonoJitICallInfo *info;
11099 token = read32 (ip + 2);
11100 func = mono_method_get_wrapper_data (method, token);
11101 info = mono_find_jit_icall_by_addr (func);
11103 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11106 CHECK_STACK (info->sig->param_count);
11107 sp -= info->sig->param_count;
11109 ins = mono_emit_jit_icall (cfg, info->func, sp);
11110 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11114 inline_costs += 10 * num_calls++;
11118 case CEE_MONO_LDPTR: {
11121 CHECK_STACK_OVF (1);
11123 token = read32 (ip + 2);
11125 ptr = mono_method_get_wrapper_data (method, token);
11126 /* FIXME: Generalize this */
11127 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11128 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11133 EMIT_NEW_PCONST (cfg, ins, ptr);
11136 inline_costs += 10 * num_calls++;
11137 /* Can't embed random pointers into AOT code */
11141 case CEE_MONO_JIT_ICALL_ADDR: {
11142 MonoJitICallInfo *callinfo;
11145 CHECK_STACK_OVF (1);
11147 token = read32 (ip + 2);
11149 ptr = mono_method_get_wrapper_data (method, token);
11150 callinfo = mono_find_jit_icall_by_addr (ptr);
11151 g_assert (callinfo);
11152 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11155 inline_costs += 10 * num_calls++;
11158 case CEE_MONO_ICALL_ADDR: {
11159 MonoMethod *cmethod;
11162 CHECK_STACK_OVF (1);
11164 token = read32 (ip + 2);
11166 cmethod = mono_method_get_wrapper_data (method, token);
11168 if (cfg->compile_aot) {
11169 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11171 ptr = mono_lookup_internal_call (cmethod);
11173 EMIT_NEW_PCONST (cfg, ins, ptr);
11179 case CEE_MONO_VTADDR: {
11180 MonoInst *src_var, *src;
11186 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11187 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11192 case CEE_MONO_NEWOBJ: {
11193 MonoInst *iargs [2];
11195 CHECK_STACK_OVF (1);
11197 token = read32 (ip + 2);
11198 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11199 mono_class_init (klass);
11200 NEW_DOMAINCONST (cfg, iargs [0]);
11201 MONO_ADD_INS (cfg->cbb, iargs [0]);
11202 NEW_CLASSCONST (cfg, iargs [1], klass);
11203 MONO_ADD_INS (cfg->cbb, iargs [1]);
11204 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11206 inline_costs += 10 * num_calls++;
11209 case CEE_MONO_OBJADDR:
11212 MONO_INST_NEW (cfg, ins, OP_MOVE);
11213 ins->dreg = alloc_ireg_mp (cfg);
11214 ins->sreg1 = sp [0]->dreg;
11215 ins->type = STACK_MP;
11216 MONO_ADD_INS (cfg->cbb, ins);
11220 case CEE_MONO_LDNATIVEOBJ:
11222 * Similar to LDOBJ, but instead load the unmanaged
11223 * representation of the vtype to the stack.
11228 token = read32 (ip + 2);
11229 klass = mono_method_get_wrapper_data (method, token);
11230 g_assert (klass->valuetype);
11231 mono_class_init (klass);
11234 MonoInst *src, *dest, *temp;
11237 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11238 temp->backend.is_pinvoke = 1;
11239 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11240 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11242 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11243 dest->type = STACK_VTYPE;
11244 dest->klass = klass;
11250 case CEE_MONO_RETOBJ: {
11252 * Same as RET, but return the native representation of a vtype
11255 g_assert (cfg->ret);
11256 g_assert (mono_method_signature (method)->pinvoke);
11261 token = read32 (ip + 2);
11262 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11264 if (!cfg->vret_addr) {
11265 g_assert (cfg->ret_var_is_local);
11267 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11269 EMIT_NEW_RETLOADA (cfg, ins);
11271 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11273 if (sp != stack_start)
11276 MONO_INST_NEW (cfg, ins, OP_BR);
11277 ins->inst_target_bb = end_bblock;
11278 MONO_ADD_INS (bblock, ins);
11279 link_bblock (cfg, bblock, end_bblock);
11280 start_new_bblock = 1;
11284 case CEE_MONO_CISINST:
11285 case CEE_MONO_CCASTCLASS: {
11290 token = read32 (ip + 2);
11291 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11292 if (ip [1] == CEE_MONO_CISINST)
11293 ins = handle_cisinst (cfg, klass, sp [0]);
11295 ins = handle_ccastclass (cfg, klass, sp [0]);
11301 case CEE_MONO_SAVE_LMF:
11302 case CEE_MONO_RESTORE_LMF:
11303 #ifdef MONO_ARCH_HAVE_LMF_OPS
11304 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11305 MONO_ADD_INS (bblock, ins);
11306 cfg->need_lmf_area = TRUE;
11310 case CEE_MONO_CLASSCONST:
11311 CHECK_STACK_OVF (1);
11313 token = read32 (ip + 2);
11314 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11317 inline_costs += 10 * num_calls++;
11319 case CEE_MONO_NOT_TAKEN:
11320 bblock->out_of_line = TRUE;
11323 case CEE_MONO_TLS: {
11326 CHECK_STACK_OVF (1);
11328 key = (gint32)read32 (ip + 2);
11329 g_assert (key < TLS_KEY_NUM);
11331 ins = mono_create_tls_get (cfg, key);
11333 if (cfg->compile_aot) {
11335 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11336 ins->dreg = alloc_preg (cfg);
11337 ins->type = STACK_PTR;
11339 g_assert_not_reached ();
11342 ins->type = STACK_PTR;
11343 MONO_ADD_INS (bblock, ins);
11348 case CEE_MONO_DYN_CALL: {
11349 MonoCallInst *call;
11351 /* It would be easier to call a trampoline, but that would put an
11352 * extra frame on the stack, confusing exception handling. So
11353 * implement it inline using an opcode for now.
11356 if (!cfg->dyn_call_var) {
11357 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11358 /* prevent it from being register allocated */
11359 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11362 /* Has to use a call inst since it local regalloc expects it */
11363 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11364 ins = (MonoInst*)call;
11366 ins->sreg1 = sp [0]->dreg;
11367 ins->sreg2 = sp [1]->dreg;
11368 MONO_ADD_INS (bblock, ins);
11370 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11373 inline_costs += 10 * num_calls++;
11377 case CEE_MONO_MEMORY_BARRIER: {
11379 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11383 case CEE_MONO_JIT_ATTACH: {
11384 MonoInst *args [16];
11385 MonoInst *ad_ins, *lmf_ins;
11386 MonoBasicBlock *next_bb = NULL;
11388 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11390 EMIT_NEW_PCONST (cfg, ins, NULL);
11391 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11397 ad_ins = mono_get_domain_intrinsic (cfg);
11398 lmf_ins = mono_get_lmf_intrinsic (cfg);
11401 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11402 NEW_BBLOCK (cfg, next_bb);
11404 MONO_ADD_INS (cfg->cbb, ad_ins);
11405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11406 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11408 MONO_ADD_INS (cfg->cbb, lmf_ins);
11409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11410 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11413 if (cfg->compile_aot) {
11414 /* AOT code is only used in the root domain */
11415 EMIT_NEW_PCONST (cfg, args [0], NULL);
11417 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11419 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11420 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11423 MONO_START_BB (cfg, next_bb);
11429 case CEE_MONO_JIT_DETACH: {
11430 MonoInst *args [16];
11432 /* Restore the original domain */
11433 dreg = alloc_ireg (cfg);
11434 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11435 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11440 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11446 case CEE_PREFIX1: {
11449 case CEE_ARGLIST: {
11450 /* somewhat similar to LDTOKEN */
11451 MonoInst *addr, *vtvar;
11452 CHECK_STACK_OVF (1);
11453 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11455 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11456 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11458 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11459 ins->type = STACK_VTYPE;
11460 ins->klass = mono_defaults.argumenthandle_class;
11473 * The following transforms:
11474 * CEE_CEQ into OP_CEQ
11475 * CEE_CGT into OP_CGT
11476 * CEE_CGT_UN into OP_CGT_UN
11477 * CEE_CLT into OP_CLT
11478 * CEE_CLT_UN into OP_CLT_UN
11480 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11482 MONO_INST_NEW (cfg, ins, cmp->opcode);
11484 cmp->sreg1 = sp [0]->dreg;
11485 cmp->sreg2 = sp [1]->dreg;
11486 type_from_op (cmp, sp [0], sp [1]);
11488 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11489 cmp->opcode = OP_LCOMPARE;
11490 else if (sp [0]->type == STACK_R8)
11491 cmp->opcode = OP_FCOMPARE;
11493 cmp->opcode = OP_ICOMPARE;
11494 MONO_ADD_INS (bblock, cmp);
11495 ins->type = STACK_I4;
11496 ins->dreg = alloc_dreg (cfg, ins->type);
11497 type_from_op (ins, sp [0], sp [1]);
11499 if (cmp->opcode == OP_FCOMPARE) {
11501 * The backends expect the fceq opcodes to do the
11504 cmp->opcode = OP_NOP;
11505 ins->sreg1 = cmp->sreg1;
11506 ins->sreg2 = cmp->sreg2;
11508 MONO_ADD_INS (bblock, ins);
11514 MonoInst *argconst;
11515 MonoMethod *cil_method;
11517 CHECK_STACK_OVF (1);
11519 n = read32 (ip + 2);
11520 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11521 if (!cmethod || mono_loader_get_last_error ())
11523 mono_class_init (cmethod->klass);
11525 mono_save_token_info (cfg, image, n, cmethod);
11527 context_used = mini_method_check_context_used (cfg, cmethod);
11529 cil_method = cmethod;
11530 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11531 METHOD_ACCESS_FAILURE;
11533 if (mono_security_cas_enabled ()) {
11534 if (check_linkdemand (cfg, method, cmethod))
11535 INLINE_FAILURE ("linkdemand");
11536 CHECK_CFG_EXCEPTION;
11537 } else if (mono_security_core_clr_enabled ()) {
11538 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11542 * Optimize the common case of ldftn+delegate creation
11544 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11545 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11546 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11547 MonoInst *target_ins;
11548 MonoMethod *invoke;
11549 int invoke_context_used;
11551 invoke = mono_get_delegate_invoke (ctor_method->klass);
11552 if (!invoke || !mono_method_signature (invoke))
11555 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11557 target_ins = sp [-1];
11559 if (mono_security_core_clr_enabled ())
11560 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11562 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11563 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11564 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11566 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11570 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11571 /* FIXME: SGEN support */
11572 if (invoke_context_used == 0) {
11574 if (cfg->verbose_level > 3)
11575 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11577 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11578 CHECK_CFG_EXCEPTION;
11587 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11588 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11592 inline_costs += 10 * num_calls++;
11595 case CEE_LDVIRTFTN: {
11596 MonoInst *args [2];
11600 n = read32 (ip + 2);
11601 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11602 if (!cmethod || mono_loader_get_last_error ())
11604 mono_class_init (cmethod->klass);
11606 context_used = mini_method_check_context_used (cfg, cmethod);
11608 if (mono_security_cas_enabled ()) {
11609 if (check_linkdemand (cfg, method, cmethod))
11610 INLINE_FAILURE ("linkdemand");
11611 CHECK_CFG_EXCEPTION;
11612 } else if (mono_security_core_clr_enabled ()) {
11613 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11619 args [1] = emit_get_rgctx_method (cfg, context_used,
11620 cmethod, MONO_RGCTX_INFO_METHOD);
11623 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11625 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11628 inline_costs += 10 * num_calls++;
11632 CHECK_STACK_OVF (1);
11634 n = read16 (ip + 2);
11636 EMIT_NEW_ARGLOAD (cfg, ins, n);
11641 CHECK_STACK_OVF (1);
11643 n = read16 (ip + 2);
11645 NEW_ARGLOADA (cfg, ins, n);
11646 MONO_ADD_INS (cfg->cbb, ins);
11654 n = read16 (ip + 2);
11656 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11658 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11662 CHECK_STACK_OVF (1);
11664 n = read16 (ip + 2);
11666 EMIT_NEW_LOCLOAD (cfg, ins, n);
11671 unsigned char *tmp_ip;
11672 CHECK_STACK_OVF (1);
11674 n = read16 (ip + 2);
11677 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11683 EMIT_NEW_LOCLOADA (cfg, ins, n);
11692 n = read16 (ip + 2);
11694 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11696 emit_stloc_ir (cfg, sp, header, n);
11703 if (sp != stack_start)
11705 if (cfg->method != method)
11707 * Inlining this into a loop in a parent could lead to
11708 * stack overflows which is different behavior than the
11709 * non-inlined case, thus disable inlining in this case.
11711 goto inline_failure;
11713 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11714 ins->dreg = alloc_preg (cfg);
11715 ins->sreg1 = sp [0]->dreg;
11716 ins->type = STACK_PTR;
11717 MONO_ADD_INS (cfg->cbb, ins);
11719 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11721 ins->flags |= MONO_INST_INIT;
11726 case CEE_ENDFILTER: {
11727 MonoExceptionClause *clause, *nearest;
11728 int cc, nearest_num;
11732 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11734 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11735 ins->sreg1 = (*sp)->dreg;
11736 MONO_ADD_INS (bblock, ins);
11737 start_new_bblock = 1;
11742 for (cc = 0; cc < header->num_clauses; ++cc) {
11743 clause = &header->clauses [cc];
11744 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11745 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11746 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11751 g_assert (nearest);
11752 if ((ip - header->code) != nearest->handler_offset)
11757 case CEE_UNALIGNED_:
11758 ins_flag |= MONO_INST_UNALIGNED;
11759 /* FIXME: record alignment? we can assume 1 for now */
11763 case CEE_VOLATILE_:
11764 ins_flag |= MONO_INST_VOLATILE;
11768 ins_flag |= MONO_INST_TAILCALL;
11769 cfg->flags |= MONO_CFG_HAS_TAIL;
11770 /* Can't inline tail calls at this time */
11771 inline_costs += 100000;
11778 token = read32 (ip + 2);
11779 klass = mini_get_class (method, token, generic_context);
11780 CHECK_TYPELOAD (klass);
11781 if (generic_class_is_reference_type (cfg, klass))
11782 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11784 mini_emit_initobj (cfg, *sp, NULL, klass);
11788 case CEE_CONSTRAINED_:
11790 token = read32 (ip + 2);
11791 constrained_call = mini_get_class (method, token, generic_context);
11792 CHECK_TYPELOAD (constrained_call);
11796 case CEE_INITBLK: {
11797 MonoInst *iargs [3];
11801 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11802 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11803 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11804 /* emit_memset only works when val == 0 */
11805 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11807 iargs [0] = sp [0];
11808 iargs [1] = sp [1];
11809 iargs [2] = sp [2];
11810 if (ip [1] == CEE_CPBLK) {
11811 MonoMethod *memcpy_method = get_memcpy_method ();
11812 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11814 MonoMethod *memset_method = get_memset_method ();
11815 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11825 ins_flag |= MONO_INST_NOTYPECHECK;
11827 ins_flag |= MONO_INST_NORANGECHECK;
11828 /* we ignore the no-nullcheck for now since we
11829 * really do it explicitly only when doing callvirt->call
11833 case CEE_RETHROW: {
11835 int handler_offset = -1;
11837 for (i = 0; i < header->num_clauses; ++i) {
11838 MonoExceptionClause *clause = &header->clauses [i];
11839 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11840 handler_offset = clause->handler_offset;
11845 bblock->flags |= BB_EXCEPTION_UNSAFE;
11847 g_assert (handler_offset != -1);
11849 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11850 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11851 ins->sreg1 = load->dreg;
11852 MONO_ADD_INS (bblock, ins);
11854 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11855 MONO_ADD_INS (bblock, ins);
11858 link_bblock (cfg, bblock, end_bblock);
11859 start_new_bblock = 1;
11867 GSHAREDVT_FAILURE (*ip);
11869 CHECK_STACK_OVF (1);
11871 token = read32 (ip + 2);
11872 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11873 MonoType *type = mono_type_create_from_typespec (image, token);
11874 val = mono_type_size (type, &ialign);
11876 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11877 CHECK_TYPELOAD (klass);
11878 mono_class_init (klass);
11879 val = mono_type_size (&klass->byval_arg, &ialign);
11881 EMIT_NEW_ICONST (cfg, ins, val);
11886 case CEE_REFANYTYPE: {
11887 MonoInst *src_var, *src;
11889 GSHAREDVT_FAILURE (*ip);
11895 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11897 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11898 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11899 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11904 case CEE_READONLY_:
11917 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11927 g_warning ("opcode 0x%02x not handled", *ip);
11931 if (start_new_bblock != 1)
11934 bblock->cil_length = ip - bblock->cil_code;
11935 if (bblock->next_bb) {
11936 /* This could already be set because of inlining, #693905 */
11937 MonoBasicBlock *bb = bblock;
11939 while (bb->next_bb)
11941 bb->next_bb = end_bblock;
11943 bblock->next_bb = end_bblock;
11946 if (cfg->method == method && cfg->domainvar) {
11948 MonoInst *get_domain;
11950 cfg->cbb = init_localsbb;
11952 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
11953 MONO_ADD_INS (cfg->cbb, get_domain);
11955 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11957 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11958 MONO_ADD_INS (cfg->cbb, store);
11961 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11962 if (cfg->compile_aot)
11963 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11964 mono_get_got_var (cfg);
11967 if (cfg->method == method && cfg->got_var)
11968 mono_emit_load_got_addr (cfg);
11970 if (init_localsbb) {
11971 cfg->cbb = init_localsbb;
11973 for (i = 0; i < header->num_locals; ++i) {
11974 emit_init_local (cfg, i, header->locals [i], init_locals);
11978 if (cfg->init_ref_vars && cfg->method == method) {
11979 /* Emit initialization for ref vars */
11980 // FIXME: Avoid duplication initialization for IL locals.
11981 for (i = 0; i < cfg->num_varinfo; ++i) {
11982 MonoInst *ins = cfg->varinfo [i];
11984 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11985 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11989 if (cfg->lmf_var && cfg->method == method) {
11990 cfg->cbb = init_localsbb;
11991 emit_push_lmf (cfg);
11995 MonoBasicBlock *bb;
11998 * Make seq points at backward branch targets interruptable.
12000 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12001 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12002 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12005 /* Add a sequence point for method entry/exit events */
12007 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12008 MONO_ADD_INS (init_localsbb, ins);
12009 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12010 MONO_ADD_INS (cfg->bb_exit, ins);
12014 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12015 * the code they refer to was dead (#11880).
12017 if (sym_seq_points) {
12018 for (i = 0; i < header->code_size; ++i) {
12019 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12022 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12023 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12030 if (cfg->method == method) {
12031 MonoBasicBlock *bb;
12032 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12033 bb->region = mono_find_block_region (cfg, bb->real_offset);
12035 mono_create_spvar_for_region (cfg, bb->region);
12036 if (cfg->verbose_level > 2)
12037 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12041 g_slist_free (class_inits);
12042 dont_inline = g_list_remove (dont_inline, method);
12044 if (inline_costs < 0) {
12047 /* Method is too large */
12048 mname = mono_method_full_name (method, TRUE);
12049 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12050 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12052 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12053 mono_basic_block_free (original_bb);
12057 if ((cfg->verbose_level > 2) && (cfg->method == method))
12058 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12060 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12061 mono_basic_block_free (original_bb);
12062 return inline_costs;
12065 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12072 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12076 set_exception_type_from_invalid_il (cfg, method, ip);
12080 g_slist_free (class_inits);
12081 mono_basic_block_free (original_bb);
12082 dont_inline = g_list_remove (dont_inline, method);
12083 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12088 store_membase_reg_to_store_membase_imm (int opcode)
12091 case OP_STORE_MEMBASE_REG:
12092 return OP_STORE_MEMBASE_IMM;
12093 case OP_STOREI1_MEMBASE_REG:
12094 return OP_STOREI1_MEMBASE_IMM;
12095 case OP_STOREI2_MEMBASE_REG:
12096 return OP_STOREI2_MEMBASE_IMM;
12097 case OP_STOREI4_MEMBASE_REG:
12098 return OP_STOREI4_MEMBASE_IMM;
12099 case OP_STOREI8_MEMBASE_REG:
12100 return OP_STOREI8_MEMBASE_IMM;
12102 g_assert_not_reached ();
12109 mono_op_to_op_imm (int opcode)
12113 return OP_IADD_IMM;
12115 return OP_ISUB_IMM;
12117 return OP_IDIV_IMM;
12119 return OP_IDIV_UN_IMM;
12121 return OP_IREM_IMM;
12123 return OP_IREM_UN_IMM;
12125 return OP_IMUL_IMM;
12127 return OP_IAND_IMM;
12131 return OP_IXOR_IMM;
12133 return OP_ISHL_IMM;
12135 return OP_ISHR_IMM;
12137 return OP_ISHR_UN_IMM;
12140 return OP_LADD_IMM;
12142 return OP_LSUB_IMM;
12144 return OP_LAND_IMM;
12148 return OP_LXOR_IMM;
12150 return OP_LSHL_IMM;
12152 return OP_LSHR_IMM;
12154 return OP_LSHR_UN_IMM;
12157 return OP_COMPARE_IMM;
12159 return OP_ICOMPARE_IMM;
12161 return OP_LCOMPARE_IMM;
12163 case OP_STORE_MEMBASE_REG:
12164 return OP_STORE_MEMBASE_IMM;
12165 case OP_STOREI1_MEMBASE_REG:
12166 return OP_STOREI1_MEMBASE_IMM;
12167 case OP_STOREI2_MEMBASE_REG:
12168 return OP_STOREI2_MEMBASE_IMM;
12169 case OP_STOREI4_MEMBASE_REG:
12170 return OP_STOREI4_MEMBASE_IMM;
12172 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12174 return OP_X86_PUSH_IMM;
12175 case OP_X86_COMPARE_MEMBASE_REG:
12176 return OP_X86_COMPARE_MEMBASE_IMM;
12178 #if defined(TARGET_AMD64)
12179 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12180 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12182 case OP_VOIDCALL_REG:
12183 return OP_VOIDCALL;
12191 return OP_LOCALLOC_IMM;
12198 ldind_to_load_membase (int opcode)
12202 return OP_LOADI1_MEMBASE;
12204 return OP_LOADU1_MEMBASE;
12206 return OP_LOADI2_MEMBASE;
12208 return OP_LOADU2_MEMBASE;
12210 return OP_LOADI4_MEMBASE;
12212 return OP_LOADU4_MEMBASE;
12214 return OP_LOAD_MEMBASE;
12215 case CEE_LDIND_REF:
12216 return OP_LOAD_MEMBASE;
12218 return OP_LOADI8_MEMBASE;
12220 return OP_LOADR4_MEMBASE;
12222 return OP_LOADR8_MEMBASE;
12224 g_assert_not_reached ();
12231 stind_to_store_membase (int opcode)
12235 return OP_STOREI1_MEMBASE_REG;
12237 return OP_STOREI2_MEMBASE_REG;
12239 return OP_STOREI4_MEMBASE_REG;
12241 case CEE_STIND_REF:
12242 return OP_STORE_MEMBASE_REG;
12244 return OP_STOREI8_MEMBASE_REG;
12246 return OP_STORER4_MEMBASE_REG;
12248 return OP_STORER8_MEMBASE_REG;
12250 g_assert_not_reached ();
12257 mono_load_membase_to_load_mem (int opcode)
12259 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12260 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12262 case OP_LOAD_MEMBASE:
12263 return OP_LOAD_MEM;
12264 case OP_LOADU1_MEMBASE:
12265 return OP_LOADU1_MEM;
12266 case OP_LOADU2_MEMBASE:
12267 return OP_LOADU2_MEM;
12268 case OP_LOADI4_MEMBASE:
12269 return OP_LOADI4_MEM;
12270 case OP_LOADU4_MEMBASE:
12271 return OP_LOADU4_MEM;
12272 #if SIZEOF_REGISTER == 8
12273 case OP_LOADI8_MEMBASE:
12274 return OP_LOADI8_MEM;
12283 op_to_op_dest_membase (int store_opcode, int opcode)
12285 #if defined(TARGET_X86)
12286 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12291 return OP_X86_ADD_MEMBASE_REG;
12293 return OP_X86_SUB_MEMBASE_REG;
12295 return OP_X86_AND_MEMBASE_REG;
12297 return OP_X86_OR_MEMBASE_REG;
12299 return OP_X86_XOR_MEMBASE_REG;
12302 return OP_X86_ADD_MEMBASE_IMM;
12305 return OP_X86_SUB_MEMBASE_IMM;
12308 return OP_X86_AND_MEMBASE_IMM;
12311 return OP_X86_OR_MEMBASE_IMM;
12314 return OP_X86_XOR_MEMBASE_IMM;
12320 #if defined(TARGET_AMD64)
12321 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12326 return OP_X86_ADD_MEMBASE_REG;
12328 return OP_X86_SUB_MEMBASE_REG;
12330 return OP_X86_AND_MEMBASE_REG;
12332 return OP_X86_OR_MEMBASE_REG;
12334 return OP_X86_XOR_MEMBASE_REG;
12336 return OP_X86_ADD_MEMBASE_IMM;
12338 return OP_X86_SUB_MEMBASE_IMM;
12340 return OP_X86_AND_MEMBASE_IMM;
12342 return OP_X86_OR_MEMBASE_IMM;
12344 return OP_X86_XOR_MEMBASE_IMM;
12346 return OP_AMD64_ADD_MEMBASE_REG;
12348 return OP_AMD64_SUB_MEMBASE_REG;
12350 return OP_AMD64_AND_MEMBASE_REG;
12352 return OP_AMD64_OR_MEMBASE_REG;
12354 return OP_AMD64_XOR_MEMBASE_REG;
12357 return OP_AMD64_ADD_MEMBASE_IMM;
12360 return OP_AMD64_SUB_MEMBASE_IMM;
12363 return OP_AMD64_AND_MEMBASE_IMM;
12366 return OP_AMD64_OR_MEMBASE_IMM;
12369 return OP_AMD64_XOR_MEMBASE_IMM;
12379 op_to_op_store_membase (int store_opcode, int opcode)
12381 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12384 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12385 return OP_X86_SETEQ_MEMBASE;
12387 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12388 return OP_X86_SETNE_MEMBASE;
12396 op_to_op_src1_membase (int load_opcode, int opcode)
12399 /* FIXME: This has sign extension issues */
12401 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12402 return OP_X86_COMPARE_MEMBASE8_IMM;
12405 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12410 return OP_X86_PUSH_MEMBASE;
12411 case OP_COMPARE_IMM:
12412 case OP_ICOMPARE_IMM:
12413 return OP_X86_COMPARE_MEMBASE_IMM;
12416 return OP_X86_COMPARE_MEMBASE_REG;
12420 #ifdef TARGET_AMD64
12421 /* FIXME: This has sign extension issues */
12423 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12424 return OP_X86_COMPARE_MEMBASE8_IMM;
12429 #ifdef __mono_ilp32__
12430 if (load_opcode == OP_LOADI8_MEMBASE)
12432 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12434 return OP_X86_PUSH_MEMBASE;
12436 /* FIXME: This only works for 32 bit immediates
12437 case OP_COMPARE_IMM:
12438 case OP_LCOMPARE_IMM:
12439 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12440 return OP_AMD64_COMPARE_MEMBASE_IMM;
12442 case OP_ICOMPARE_IMM:
12443 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12444 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12448 #ifdef __mono_ilp32__
12449 if (load_opcode == OP_LOAD_MEMBASE)
12450 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12451 if (load_opcode == OP_LOADI8_MEMBASE)
12453 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12455 return OP_AMD64_COMPARE_MEMBASE_REG;
12458 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12459 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12468 op_to_op_src2_membase (int load_opcode, int opcode)
12471 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12477 return OP_X86_COMPARE_REG_MEMBASE;
12479 return OP_X86_ADD_REG_MEMBASE;
12481 return OP_X86_SUB_REG_MEMBASE;
12483 return OP_X86_AND_REG_MEMBASE;
12485 return OP_X86_OR_REG_MEMBASE;
12487 return OP_X86_XOR_REG_MEMBASE;
12491 #ifdef TARGET_AMD64
12492 #ifdef __mono_ilp32__
12493 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12495 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12499 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12501 return OP_X86_ADD_REG_MEMBASE;
12503 return OP_X86_SUB_REG_MEMBASE;
12505 return OP_X86_AND_REG_MEMBASE;
12507 return OP_X86_OR_REG_MEMBASE;
12509 return OP_X86_XOR_REG_MEMBASE;
12511 #ifdef __mono_ilp32__
12512 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12514 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12519 return OP_AMD64_COMPARE_REG_MEMBASE;
12521 return OP_AMD64_ADD_REG_MEMBASE;
12523 return OP_AMD64_SUB_REG_MEMBASE;
12525 return OP_AMD64_AND_REG_MEMBASE;
12527 return OP_AMD64_OR_REG_MEMBASE;
12529 return OP_AMD64_XOR_REG_MEMBASE;
12538 mono_op_to_op_imm_noemul (int opcode)
12541 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12547 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12554 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12559 return mono_op_to_op_imm (opcode);
12564 * mono_handle_global_vregs:
12566 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12570 mono_handle_global_vregs (MonoCompile *cfg)
12572 gint32 *vreg_to_bb;
12573 MonoBasicBlock *bb;
12576 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12578 #ifdef MONO_ARCH_SIMD_INTRINSICS
12579 if (cfg->uses_simd_intrinsics)
12580 mono_simd_simplify_indirection (cfg);
12583 /* Find local vregs used in more than one bb */
12584 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12585 MonoInst *ins = bb->code;
12586 int block_num = bb->block_num;
12588 if (cfg->verbose_level > 2)
12589 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12592 for (; ins; ins = ins->next) {
12593 const char *spec = INS_INFO (ins->opcode);
12594 int regtype = 0, regindex;
12597 if (G_UNLIKELY (cfg->verbose_level > 2))
12598 mono_print_ins (ins);
12600 g_assert (ins->opcode >= MONO_CEE_LAST);
12602 for (regindex = 0; regindex < 4; regindex ++) {
12605 if (regindex == 0) {
12606 regtype = spec [MONO_INST_DEST];
12607 if (regtype == ' ')
12610 } else if (regindex == 1) {
12611 regtype = spec [MONO_INST_SRC1];
12612 if (regtype == ' ')
12615 } else if (regindex == 2) {
12616 regtype = spec [MONO_INST_SRC2];
12617 if (regtype == ' ')
12620 } else if (regindex == 3) {
12621 regtype = spec [MONO_INST_SRC3];
12622 if (regtype == ' ')
12627 #if SIZEOF_REGISTER == 4
12628 /* In the LLVM case, the long opcodes are not decomposed */
12629 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12631 * Since some instructions reference the original long vreg,
12632 * and some reference the two component vregs, it is quite hard
12633 * to determine when it needs to be global. So be conservative.
12635 if (!get_vreg_to_inst (cfg, vreg)) {
12636 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12638 if (cfg->verbose_level > 2)
12639 printf ("LONG VREG R%d made global.\n", vreg);
12643 * Make the component vregs volatile since the optimizations can
12644 * get confused otherwise.
12646 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12647 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12651 g_assert (vreg != -1);
12653 prev_bb = vreg_to_bb [vreg];
12654 if (prev_bb == 0) {
12655 /* 0 is a valid block num */
12656 vreg_to_bb [vreg] = block_num + 1;
12657 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12658 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12661 if (!get_vreg_to_inst (cfg, vreg)) {
12662 if (G_UNLIKELY (cfg->verbose_level > 2))
12663 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12667 if (vreg_is_ref (cfg, vreg))
12668 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12670 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12673 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12676 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12679 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12682 g_assert_not_reached ();
12686 /* Flag as having been used in more than one bb */
12687 vreg_to_bb [vreg] = -1;
12693 /* If a variable is used in only one bblock, convert it into a local vreg */
12694 for (i = 0; i < cfg->num_varinfo; i++) {
12695 MonoInst *var = cfg->varinfo [i];
12696 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12698 switch (var->type) {
12704 #if SIZEOF_REGISTER == 8
12707 #if !defined(TARGET_X86)
12708 /* Enabling this screws up the fp stack on x86 */
12711 if (mono_arch_is_soft_float ())
12714 /* Arguments are implicitly global */
12715 /* Putting R4 vars into registers doesn't work currently */
12716 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12717 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12719 * Make that the variable's liveness interval doesn't contain a call, since
12720 * that would cause the lvreg to be spilled, making the whole optimization
12723 /* This is too slow for JIT compilation */
12725 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12727 int def_index, call_index, ins_index;
12728 gboolean spilled = FALSE;
12733 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12734 const char *spec = INS_INFO (ins->opcode);
12736 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12737 def_index = ins_index;
12739 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12740 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12741 if (call_index > def_index) {
12747 if (MONO_IS_CALL (ins))
12748 call_index = ins_index;
12758 if (G_UNLIKELY (cfg->verbose_level > 2))
12759 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12760 var->flags |= MONO_INST_IS_DEAD;
12761 cfg->vreg_to_inst [var->dreg] = NULL;
12768 * Compress the varinfo and vars tables so the liveness computation is faster and
12769 * takes up less space.
12772 for (i = 0; i < cfg->num_varinfo; ++i) {
12773 MonoInst *var = cfg->varinfo [i];
12774 if (pos < i && cfg->locals_start == i)
12775 cfg->locals_start = pos;
12776 if (!(var->flags & MONO_INST_IS_DEAD)) {
12778 cfg->varinfo [pos] = cfg->varinfo [i];
12779 cfg->varinfo [pos]->inst_c0 = pos;
12780 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12781 cfg->vars [pos].idx = pos;
12782 #if SIZEOF_REGISTER == 4
12783 if (cfg->varinfo [pos]->type == STACK_I8) {
12784 /* Modify the two component vars too */
12787 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12788 var1->inst_c0 = pos;
12789 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12790 var1->inst_c0 = pos;
12797 cfg->num_varinfo = pos;
12798 if (cfg->locals_start > cfg->num_varinfo)
12799 cfg->locals_start = cfg->num_varinfo;
12803 * mono_spill_global_vars:
12805 * Generate spill code for variables which are not allocated to registers,
12806 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12807 * code is generated which could be optimized by the local optimization passes.
12810 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12812 MonoBasicBlock *bb;
12814 int orig_next_vreg;
12815 guint32 *vreg_to_lvreg;
12817 guint32 i, lvregs_len;
12818 gboolean dest_has_lvreg = FALSE;
12819 guint32 stacktypes [128];
12820 MonoInst **live_range_start, **live_range_end;
12821 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12822 int *gsharedvt_vreg_to_idx = NULL;
12824 *need_local_opts = FALSE;
12826 memset (spec2, 0, sizeof (spec2));
12828 /* FIXME: Move this function to mini.c */
12829 stacktypes ['i'] = STACK_PTR;
12830 stacktypes ['l'] = STACK_I8;
12831 stacktypes ['f'] = STACK_R8;
12832 #ifdef MONO_ARCH_SIMD_INTRINSICS
12833 stacktypes ['x'] = STACK_VTYPE;
12836 #if SIZEOF_REGISTER == 4
12837 /* Create MonoInsts for longs */
12838 for (i = 0; i < cfg->num_varinfo; i++) {
12839 MonoInst *ins = cfg->varinfo [i];
12841 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12842 switch (ins->type) {
12847 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12850 g_assert (ins->opcode == OP_REGOFFSET);
12852 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12854 tree->opcode = OP_REGOFFSET;
12855 tree->inst_basereg = ins->inst_basereg;
12856 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12858 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12860 tree->opcode = OP_REGOFFSET;
12861 tree->inst_basereg = ins->inst_basereg;
12862 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12872 if (cfg->compute_gc_maps) {
12873 /* registers need liveness info even for !non refs */
12874 for (i = 0; i < cfg->num_varinfo; i++) {
12875 MonoInst *ins = cfg->varinfo [i];
12877 if (ins->opcode == OP_REGVAR)
12878 ins->flags |= MONO_INST_GC_TRACK;
12882 if (cfg->gsharedvt) {
12883 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12885 for (i = 0; i < cfg->num_varinfo; ++i) {
12886 MonoInst *ins = cfg->varinfo [i];
12889 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12890 if (i >= cfg->locals_start) {
12892 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12893 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12894 ins->opcode = OP_GSHAREDVT_LOCAL;
12895 ins->inst_imm = idx;
12898 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12899 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12905 /* FIXME: widening and truncation */
12908 * As an optimization, when a variable allocated to the stack is first loaded into
12909 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12910 * the variable again.
12912 orig_next_vreg = cfg->next_vreg;
12913 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12914 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12918 * These arrays contain the first and last instructions accessing a given
12920 * Since we emit bblocks in the same order we process them here, and we
12921 * don't split live ranges, these will precisely describe the live range of
12922 * the variable, i.e. the instruction range where a valid value can be found
12923 * in the variables location.
12924 * The live range is computed using the liveness info computed by the liveness pass.
12925 * We can't use vmv->range, since that is an abstract live range, and we need
12926 * one which is instruction precise.
12927 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12929 /* FIXME: Only do this if debugging info is requested */
12930 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12931 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12932 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12933 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12935 /* Add spill loads/stores */
12936 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12939 if (cfg->verbose_level > 2)
12940 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12942 /* Clear vreg_to_lvreg array */
12943 for (i = 0; i < lvregs_len; i++)
12944 vreg_to_lvreg [lvregs [i]] = 0;
12948 MONO_BB_FOR_EACH_INS (bb, ins) {
12949 const char *spec = INS_INFO (ins->opcode);
12950 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12951 gboolean store, no_lvreg;
12952 int sregs [MONO_MAX_SRC_REGS];
12954 if (G_UNLIKELY (cfg->verbose_level > 2))
12955 mono_print_ins (ins);
12957 if (ins->opcode == OP_NOP)
12961 * We handle LDADDR here as well, since it can only be decomposed
12962 * when variable addresses are known.
12964 if (ins->opcode == OP_LDADDR) {
12965 MonoInst *var = ins->inst_p0;
12967 if (var->opcode == OP_VTARG_ADDR) {
12968 /* Happens on SPARC/S390 where vtypes are passed by reference */
12969 MonoInst *vtaddr = var->inst_left;
12970 if (vtaddr->opcode == OP_REGVAR) {
12971 ins->opcode = OP_MOVE;
12972 ins->sreg1 = vtaddr->dreg;
12974 else if (var->inst_left->opcode == OP_REGOFFSET) {
12975 ins->opcode = OP_LOAD_MEMBASE;
12976 ins->inst_basereg = vtaddr->inst_basereg;
12977 ins->inst_offset = vtaddr->inst_offset;
12980 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12981 /* gsharedvt arg passed by ref */
12982 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12984 ins->opcode = OP_LOAD_MEMBASE;
12985 ins->inst_basereg = var->inst_basereg;
12986 ins->inst_offset = var->inst_offset;
12987 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12988 MonoInst *load, *load2, *load3;
12989 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
12990 int reg1, reg2, reg3;
12991 MonoInst *info_var = cfg->gsharedvt_info_var;
12992 MonoInst *locals_var = cfg->gsharedvt_locals_var;
12996 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
12999 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13001 g_assert (info_var);
13002 g_assert (locals_var);
13004 /* Mark the instruction used to compute the locals var as used */
13005 cfg->gsharedvt_locals_var_ins = NULL;
13007 /* Load the offset */
13008 if (info_var->opcode == OP_REGOFFSET) {
13009 reg1 = alloc_ireg (cfg);
13010 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13011 } else if (info_var->opcode == OP_REGVAR) {
13013 reg1 = info_var->dreg;
13015 g_assert_not_reached ();
13017 reg2 = alloc_ireg (cfg);
13018 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13019 /* Load the locals area address */
13020 reg3 = alloc_ireg (cfg);
13021 if (locals_var->opcode == OP_REGOFFSET) {
13022 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13023 } else if (locals_var->opcode == OP_REGVAR) {
13024 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13026 g_assert_not_reached ();
13028 /* Compute the address */
13029 ins->opcode = OP_PADD;
13033 mono_bblock_insert_before_ins (bb, ins, load3);
13034 mono_bblock_insert_before_ins (bb, load3, load2);
13036 mono_bblock_insert_before_ins (bb, load2, load);
13038 g_assert (var->opcode == OP_REGOFFSET);
13040 ins->opcode = OP_ADD_IMM;
13041 ins->sreg1 = var->inst_basereg;
13042 ins->inst_imm = var->inst_offset;
13045 *need_local_opts = TRUE;
13046 spec = INS_INFO (ins->opcode);
13049 if (ins->opcode < MONO_CEE_LAST) {
13050 mono_print_ins (ins);
13051 g_assert_not_reached ();
13055 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13059 if (MONO_IS_STORE_MEMBASE (ins)) {
13060 tmp_reg = ins->dreg;
13061 ins->dreg = ins->sreg2;
13062 ins->sreg2 = tmp_reg;
13065 spec2 [MONO_INST_DEST] = ' ';
13066 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13067 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13068 spec2 [MONO_INST_SRC3] = ' ';
13070 } else if (MONO_IS_STORE_MEMINDEX (ins))
13071 g_assert_not_reached ();
13076 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13077 printf ("\t %.3s %d", spec, ins->dreg);
13078 num_sregs = mono_inst_get_src_registers (ins, sregs);
13079 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13080 printf (" %d", sregs [srcindex]);
13087 regtype = spec [MONO_INST_DEST];
13088 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13091 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13092 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13093 MonoInst *store_ins;
13095 MonoInst *def_ins = ins;
13096 int dreg = ins->dreg; /* The original vreg */
13098 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13100 if (var->opcode == OP_REGVAR) {
13101 ins->dreg = var->dreg;
13102 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13104 * Instead of emitting a load+store, use a _membase opcode.
13106 g_assert (var->opcode == OP_REGOFFSET);
13107 if (ins->opcode == OP_MOVE) {
13111 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13112 ins->inst_basereg = var->inst_basereg;
13113 ins->inst_offset = var->inst_offset;
13116 spec = INS_INFO (ins->opcode);
13120 g_assert (var->opcode == OP_REGOFFSET);
13122 prev_dreg = ins->dreg;
13124 /* Invalidate any previous lvreg for this vreg */
13125 vreg_to_lvreg [ins->dreg] = 0;
13129 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13131 store_opcode = OP_STOREI8_MEMBASE_REG;
13134 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13136 #if SIZEOF_REGISTER != 8
13137 if (regtype == 'l') {
13138 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13139 mono_bblock_insert_after_ins (bb, ins, store_ins);
13140 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13141 mono_bblock_insert_after_ins (bb, ins, store_ins);
13142 def_ins = store_ins;
13147 g_assert (store_opcode != OP_STOREV_MEMBASE);
13149 /* Try to fuse the store into the instruction itself */
13150 /* FIXME: Add more instructions */
13151 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13152 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13153 ins->inst_imm = ins->inst_c0;
13154 ins->inst_destbasereg = var->inst_basereg;
13155 ins->inst_offset = var->inst_offset;
13156 spec = INS_INFO (ins->opcode);
13157 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13158 ins->opcode = store_opcode;
13159 ins->inst_destbasereg = var->inst_basereg;
13160 ins->inst_offset = var->inst_offset;
13164 tmp_reg = ins->dreg;
13165 ins->dreg = ins->sreg2;
13166 ins->sreg2 = tmp_reg;
13169 spec2 [MONO_INST_DEST] = ' ';
13170 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13171 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13172 spec2 [MONO_INST_SRC3] = ' ';
13174 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13175 // FIXME: The backends expect the base reg to be in inst_basereg
13176 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13178 ins->inst_basereg = var->inst_basereg;
13179 ins->inst_offset = var->inst_offset;
13180 spec = INS_INFO (ins->opcode);
13182 /* printf ("INS: "); mono_print_ins (ins); */
13183 /* Create a store instruction */
13184 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13186 /* Insert it after the instruction */
13187 mono_bblock_insert_after_ins (bb, ins, store_ins);
13189 def_ins = store_ins;
13192 * We can't assign ins->dreg to var->dreg here, since the
13193 * sregs could use it. So set a flag, and do it after
13196 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13197 dest_has_lvreg = TRUE;
13202 if (def_ins && !live_range_start [dreg]) {
13203 live_range_start [dreg] = def_ins;
13204 live_range_start_bb [dreg] = bb;
13207 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13210 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13211 tmp->inst_c1 = dreg;
13212 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13219 num_sregs = mono_inst_get_src_registers (ins, sregs);
13220 for (srcindex = 0; srcindex < 3; ++srcindex) {
13221 regtype = spec [MONO_INST_SRC1 + srcindex];
13222 sreg = sregs [srcindex];
13224 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13225 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13226 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13227 MonoInst *use_ins = ins;
13228 MonoInst *load_ins;
13229 guint32 load_opcode;
13231 if (var->opcode == OP_REGVAR) {
13232 sregs [srcindex] = var->dreg;
13233 //mono_inst_set_src_registers (ins, sregs);
13234 live_range_end [sreg] = use_ins;
13235 live_range_end_bb [sreg] = bb;
13237 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13240 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13241 /* var->dreg is a hreg */
13242 tmp->inst_c1 = sreg;
13243 mono_bblock_insert_after_ins (bb, ins, tmp);
13249 g_assert (var->opcode == OP_REGOFFSET);
13251 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13253 g_assert (load_opcode != OP_LOADV_MEMBASE);
13255 if (vreg_to_lvreg [sreg]) {
13256 g_assert (vreg_to_lvreg [sreg] != -1);
13258 /* The variable is already loaded to an lvreg */
13259 if (G_UNLIKELY (cfg->verbose_level > 2))
13260 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13261 sregs [srcindex] = vreg_to_lvreg [sreg];
13262 //mono_inst_set_src_registers (ins, sregs);
13266 /* Try to fuse the load into the instruction */
13267 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13268 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13269 sregs [0] = var->inst_basereg;
13270 //mono_inst_set_src_registers (ins, sregs);
13271 ins->inst_offset = var->inst_offset;
13272 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13273 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13274 sregs [1] = var->inst_basereg;
13275 //mono_inst_set_src_registers (ins, sregs);
13276 ins->inst_offset = var->inst_offset;
13278 if (MONO_IS_REAL_MOVE (ins)) {
13279 ins->opcode = OP_NOP;
13282 //printf ("%d ", srcindex); mono_print_ins (ins);
13284 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13286 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13287 if (var->dreg == prev_dreg) {
13289 * sreg refers to the value loaded by the load
13290 * emitted below, but we need to use ins->dreg
13291 * since it refers to the store emitted earlier.
13295 g_assert (sreg != -1);
13296 vreg_to_lvreg [var->dreg] = sreg;
13297 g_assert (lvregs_len < 1024);
13298 lvregs [lvregs_len ++] = var->dreg;
13302 sregs [srcindex] = sreg;
13303 //mono_inst_set_src_registers (ins, sregs);
13305 #if SIZEOF_REGISTER != 8
13306 if (regtype == 'l') {
13307 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13308 mono_bblock_insert_before_ins (bb, ins, load_ins);
13309 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13310 mono_bblock_insert_before_ins (bb, ins, load_ins);
13311 use_ins = load_ins;
13316 #if SIZEOF_REGISTER == 4
13317 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13319 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13320 mono_bblock_insert_before_ins (bb, ins, load_ins);
13321 use_ins = load_ins;
13325 if (var->dreg < orig_next_vreg) {
13326 live_range_end [var->dreg] = use_ins;
13327 live_range_end_bb [var->dreg] = bb;
13330 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13333 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13334 tmp->inst_c1 = var->dreg;
13335 mono_bblock_insert_after_ins (bb, ins, tmp);
13339 mono_inst_set_src_registers (ins, sregs);
13341 if (dest_has_lvreg) {
13342 g_assert (ins->dreg != -1);
13343 vreg_to_lvreg [prev_dreg] = ins->dreg;
13344 g_assert (lvregs_len < 1024);
13345 lvregs [lvregs_len ++] = prev_dreg;
13346 dest_has_lvreg = FALSE;
13350 tmp_reg = ins->dreg;
13351 ins->dreg = ins->sreg2;
13352 ins->sreg2 = tmp_reg;
13355 if (MONO_IS_CALL (ins)) {
13356 /* Clear vreg_to_lvreg array */
13357 for (i = 0; i < lvregs_len; i++)
13358 vreg_to_lvreg [lvregs [i]] = 0;
13360 } else if (ins->opcode == OP_NOP) {
13362 MONO_INST_NULLIFY_SREGS (ins);
13365 if (cfg->verbose_level > 2)
13366 mono_print_ins_index (1, ins);
13369 /* Extend the live range based on the liveness info */
13370 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13371 for (i = 0; i < cfg->num_varinfo; i ++) {
13372 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13374 if (vreg_is_volatile (cfg, vi->vreg))
13375 /* The liveness info is incomplete */
13378 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13379 /* Live from at least the first ins of this bb */
13380 live_range_start [vi->vreg] = bb->code;
13381 live_range_start_bb [vi->vreg] = bb;
13384 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13385 /* Live at least until the last ins of this bb */
13386 live_range_end [vi->vreg] = bb->last_ins;
13387 live_range_end_bb [vi->vreg] = bb;
13393 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13395 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13396 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13398 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13399 for (i = 0; i < cfg->num_varinfo; ++i) {
13400 int vreg = MONO_VARINFO (cfg, i)->vreg;
13403 if (live_range_start [vreg]) {
13404 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13406 ins->inst_c1 = vreg;
13407 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13409 if (live_range_end [vreg]) {
13410 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13412 ins->inst_c1 = vreg;
13413 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13414 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13416 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13422 if (cfg->gsharedvt_locals_var_ins) {
13423 /* Nullify if unused */
13424 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13425 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13428 g_free (live_range_start);
13429 g_free (live_range_end);
13430 g_free (live_range_start_bb);
13431 g_free (live_range_end_bb);
13436 * - use 'iadd' instead of 'int_add'
13437 * - handling ovf opcodes: decompose in method_to_ir.
13438 * - unify iregs/fregs
13439 * -> partly done, the missing parts are:
13440 * - a more complete unification would involve unifying the hregs as well, so
13441 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13442 * would no longer map to the machine hregs, so the code generators would need to
13443 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13444 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13445 * fp/non-fp branches speeds it up by about 15%.
13446 * - use sext/zext opcodes instead of shifts
13448 * - get rid of TEMPLOADs if possible and use vregs instead
13449 * - clean up usage of OP_P/OP_ opcodes
13450 * - cleanup usage of DUMMY_USE
13451 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13453 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13454 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13455 * - make sure handle_stack_args () is called before the branch is emitted
13456 * - when the new IR is done, get rid of all unused stuff
13457 * - COMPARE/BEQ as separate instructions or unify them ?
13458 * - keeping them separate allows specialized compare instructions like
13459 * compare_imm, compare_membase
13460 * - most back ends unify fp compare+branch, fp compare+ceq
13461 * - integrate mono_save_args into inline_method
13462 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13463 * - handle long shift opts on 32 bit platforms somehow: they require
13464 * 3 sregs (2 for arg1 and 1 for arg2)
13465 * - make byref a 'normal' type.
13466 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13467 * variable if needed.
13468 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13469 * like inline_method.
13470 * - remove inlining restrictions
13471 * - fix LNEG and enable cfold of INEG
13472 * - generalize x86 optimizations like ldelema as a peephole optimization
13473 * - add store_mem_imm for amd64
13474 * - optimize the loading of the interruption flag in the managed->native wrappers
13475 * - avoid special handling of OP_NOP in passes
13476 * - move code inserting instructions into one function/macro.
13477 * - try a coalescing phase after liveness analysis
13478 * - add float -> vreg conversion + local optimizations on !x86
13479 * - figure out how to handle decomposed branches during optimizations, ie.
13480 * compare+branch, op_jump_table+op_br etc.
13481 * - promote RuntimeXHandles to vregs
13482 * - vtype cleanups:
13483 * - add a NEW_VARLOADA_VREG macro
13484 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13485 * accessing vtype fields.
13486 * - get rid of I8CONST on 64 bit platforms
13487 * - dealing with the increase in code size due to branches created during opcode
13489 * - use extended basic blocks
13490 * - all parts of the JIT
13491 * - handle_global_vregs () && local regalloc
13492 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13493 * - sources of increase in code size:
13496 * - isinst and castclass
13497 * - lvregs not allocated to global registers even if used multiple times
13498 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13500 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13501 * - add all micro optimizations from the old JIT
13502 * - put tree optimizations into the deadce pass
13503 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13504 * specific function.
13505 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13506 * fcompare + branchCC.
13507 * - create a helper function for allocating a stack slot, taking into account
13508 * MONO_CFG_HAS_SPILLUP.
13510 * - merge the ia64 switch changes.
13511 * - optimize mono_regstate2_alloc_int/float.
13512 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13513 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13514 * parts of the tree could be separated by other instructions, killing the tree
13515 * arguments, or stores killing loads etc. Also, should we fold loads into other
13516 * instructions if the result of the load is used multiple times ?
13517 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13518 * - LAST MERGE: 108395.
13519 * - when returning vtypes in registers, generate IR and append it to the end of the
13520 * last bb instead of doing it in the epilog.
13521 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13529 - When to decompose opcodes:
13530 - earlier: this makes some optimizations hard to implement, since the low level IR
13531 no longer contains the neccessary information. But it is easier to do.
13532 - later: harder to implement, enables more optimizations.
13533 - Branches inside bblocks:
13534 - created when decomposing complex opcodes.
13535 - branches to another bblock: harmless, but not tracked by the branch
13536 optimizations, so need to branch to a label at the start of the bblock.
13537 - branches to inside the same bblock: very problematic, trips up the local
13538 reg allocator. Can be fixed by spitting the current bblock, but that is a
13539 complex operation, since some local vregs can become global vregs etc.
13540 - Local/global vregs:
13541 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13542 local register allocator.
13543 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13544 structure, created by mono_create_var (). Assigned to hregs or the stack by
13545 the global register allocator.
13546 - When to do optimizations like alu->alu_imm:
13547 - earlier -> saves work later on since the IR will be smaller/simpler
13548 - later -> can work on more instructions
13549 - Handling of valuetypes:
13550 - When a vtype is pushed on the stack, a new temporary is created, an
13551 instruction computing its address (LDADDR) is emitted and pushed on
13552 the stack. Need to optimize cases when the vtype is used immediately as in
13553 argument passing, stloc etc.
13554 - Instead of the to_end stuff in the old JIT, simply call the function handling
13555 the values on the stack before emitting the last instruction of the bb.
13558 #endif /* DISABLE_JIT */