2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_freg (MonoCompile *cfg)
214 return alloc_freg (cfg);
218 mono_alloc_preg (MonoCompile *cfg)
220 return alloc_preg (cfg);
224 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
226 return alloc_dreg (cfg, stack_type);
230 * mono_alloc_ireg_ref:
232 * Allocate an IREG, and mark it as holding a GC ref.
235 mono_alloc_ireg_ref (MonoCompile *cfg)
237 return alloc_ireg_ref (cfg);
241 * mono_alloc_ireg_mp:
243 * Allocate an IREG, and mark it as holding a managed pointer.
246 mono_alloc_ireg_mp (MonoCompile *cfg)
248 return alloc_ireg_mp (cfg);
252 * mono_alloc_ireg_copy:
254 * Allocate an IREG with the same GC type as VREG.
257 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
259 if (vreg_is_ref (cfg, vreg))
260 return alloc_ireg_ref (cfg);
261 else if (vreg_is_mp (cfg, vreg))
262 return alloc_ireg_mp (cfg);
264 return alloc_ireg (cfg);
268 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
273 type = mini_replace_type (type);
275 switch (type->type) {
278 case MONO_TYPE_BOOLEAN:
290 case MONO_TYPE_FNPTR:
292 case MONO_TYPE_CLASS:
293 case MONO_TYPE_STRING:
294 case MONO_TYPE_OBJECT:
295 case MONO_TYPE_SZARRAY:
296 case MONO_TYPE_ARRAY:
300 #if SIZEOF_REGISTER == 8
309 case MONO_TYPE_VALUETYPE:
310 if (type->data.klass->enumtype) {
311 type = mono_class_enum_basetype (type->data.klass);
314 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
317 case MONO_TYPE_TYPEDBYREF:
319 case MONO_TYPE_GENERICINST:
320 type = &type->data.generic_class->container_class->byval_arg;
324 g_assert (cfg->generic_sharing_context);
325 if (mini_type_var_is_vt (cfg, type))
330 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
336 mono_print_bb (MonoBasicBlock *bb, const char *msg)
341 printf ("\n%s %d: [IN: ", msg, bb->block_num);
342 for (i = 0; i < bb->in_count; ++i)
343 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
345 for (i = 0; i < bb->out_count; ++i)
346 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
348 for (tree = bb->code; tree; tree = tree->next)
349 mono_print_ins_index (-1, tree);
353 mono_create_helper_signatures (void)
355 helper_sig_domain_get = mono_create_icall_signature ("ptr");
356 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
357 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
358 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
359 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
360 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
361 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
365 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
366 * foo<T> (int i) { ldarg.0; box T; }
368 #define UNVERIFIED do { \
369 if (cfg->gsharedvt) { \
370 if (cfg->verbose_level > 2) \
371 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
373 goto exception_exit; \
375 if (mini_get_debug_options ()->break_on_unverified) \
381 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
383 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
385 #define GET_BBLOCK(cfg,tblock,ip) do { \
386 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
388 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
389 NEW_BBLOCK (cfg, (tblock)); \
390 (tblock)->cil_code = (ip); \
391 ADD_BBLOCK (cfg, (tblock)); \
395 #if defined(TARGET_X86) || defined(TARGET_AMD64)
396 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
397 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
398 (dest)->dreg = alloc_ireg_mp ((cfg)); \
399 (dest)->sreg1 = (sr1); \
400 (dest)->sreg2 = (sr2); \
401 (dest)->inst_imm = (imm); \
402 (dest)->backend.shift_amount = (shift); \
403 MONO_ADD_INS ((cfg)->cbb, (dest)); \
407 #if SIZEOF_REGISTER == 8
408 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
409 /* FIXME: Need to add many more cases */ \
410 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
412 int dr = alloc_preg (cfg); \
413 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
414 (ins)->sreg2 = widen->dreg; \
418 #define ADD_WIDEN_OP(ins, arg1, arg2)
421 #define ADD_BINOP(op) do { \
422 MONO_INST_NEW (cfg, ins, (op)); \
424 ins->sreg1 = sp [0]->dreg; \
425 ins->sreg2 = sp [1]->dreg; \
426 type_from_op (ins, sp [0], sp [1]); \
428 /* Have to insert a widening op */ \
429 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
430 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
431 MONO_ADD_INS ((cfg)->cbb, (ins)); \
432 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
435 #define ADD_UNOP(op) do { \
436 MONO_INST_NEW (cfg, ins, (op)); \
438 ins->sreg1 = sp [0]->dreg; \
439 type_from_op (ins, sp [0], NULL); \
441 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
442 MONO_ADD_INS ((cfg)->cbb, (ins)); \
443 *sp++ = mono_decompose_opcode (cfg, ins); \
446 #define ADD_BINCOND(next_block) do { \
449 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
450 cmp->sreg1 = sp [0]->dreg; \
451 cmp->sreg2 = sp [1]->dreg; \
452 type_from_op (cmp, sp [0], sp [1]); \
454 type_from_op (ins, sp [0], sp [1]); \
455 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
456 GET_BBLOCK (cfg, tblock, target); \
457 link_bblock (cfg, bblock, tblock); \
458 ins->inst_true_bb = tblock; \
459 if ((next_block)) { \
460 link_bblock (cfg, bblock, (next_block)); \
461 ins->inst_false_bb = (next_block); \
462 start_new_bblock = 1; \
464 GET_BBLOCK (cfg, tblock, ip); \
465 link_bblock (cfg, bblock, tblock); \
466 ins->inst_false_bb = tblock; \
467 start_new_bblock = 2; \
469 if (sp != stack_start) { \
470 handle_stack_args (cfg, stack_start, sp - stack_start); \
471 CHECK_UNVERIFIABLE (cfg); \
473 MONO_ADD_INS (bblock, cmp); \
474 MONO_ADD_INS (bblock, ins); \
478 * link_bblock: Links two basic blocks
480 * links two basic blocks in the control flow graph, the 'from'
481 * argument is the starting block and the 'to' argument is the block
482 * the control flow ends to after 'from'.
485 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
487 MonoBasicBlock **newa;
491 if (from->cil_code) {
493 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
495 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
498 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
500 printf ("edge from entry to exit\n");
505 for (i = 0; i < from->out_count; ++i) {
506 if (to == from->out_bb [i]) {
512 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
513 for (i = 0; i < from->out_count; ++i) {
514 newa [i] = from->out_bb [i];
522 for (i = 0; i < to->in_count; ++i) {
523 if (from == to->in_bb [i]) {
529 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
530 for (i = 0; i < to->in_count; ++i) {
531 newa [i] = to->in_bb [i];
540 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
542 link_bblock (cfg, from, to);
546 * mono_find_block_region:
548 * We mark each basic block with a region ID. We use that to avoid BB
549 * optimizations when blocks are in different regions.
552 * A region token that encodes where this region is, and information
553 * about the clause owner for this block.
555 * The region encodes the try/catch/filter clause that owns this block
556 * as well as the type. -1 is a special value that represents a block
557 * that is in none of try/catch/filter.
560 mono_find_block_region (MonoCompile *cfg, int offset)
562 MonoMethodHeader *header = cfg->header;
563 MonoExceptionClause *clause;
566 for (i = 0; i < header->num_clauses; ++i) {
567 clause = &header->clauses [i];
568 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
569 (offset < (clause->handler_offset)))
570 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
572 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
573 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
574 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
575 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
576 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
578 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
581 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
582 return ((i + 1) << 8) | clause->flags;
589 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
591 MonoMethodHeader *header = cfg->header;
592 MonoExceptionClause *clause;
596 for (i = 0; i < header->num_clauses; ++i) {
597 clause = &header->clauses [i];
598 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
599 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
600 if (clause->flags == type)
601 res = g_list_append (res, clause);
608 mono_create_spvar_for_region (MonoCompile *cfg, int region)
612 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
616 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
617 /* prevent it from being register allocated */
618 var->flags |= MONO_INST_VOLATILE;
620 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
624 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
626 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
630 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
634 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
638 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
639 /* prevent it from being register allocated */
640 var->flags |= MONO_INST_VOLATILE;
642 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
648 * Returns the type used in the eval stack when @type is loaded.
649 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
652 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
656 type = mini_replace_type (type);
657 inst->klass = klass = mono_class_from_mono_type (type);
659 inst->type = STACK_MP;
664 switch (type->type) {
666 inst->type = STACK_INV;
670 case MONO_TYPE_BOOLEAN:
676 inst->type = STACK_I4;
681 case MONO_TYPE_FNPTR:
682 inst->type = STACK_PTR;
684 case MONO_TYPE_CLASS:
685 case MONO_TYPE_STRING:
686 case MONO_TYPE_OBJECT:
687 case MONO_TYPE_SZARRAY:
688 case MONO_TYPE_ARRAY:
689 inst->type = STACK_OBJ;
693 inst->type = STACK_I8;
697 inst->type = STACK_R8;
699 case MONO_TYPE_VALUETYPE:
700 if (type->data.klass->enumtype) {
701 type = mono_class_enum_basetype (type->data.klass);
705 inst->type = STACK_VTYPE;
708 case MONO_TYPE_TYPEDBYREF:
709 inst->klass = mono_defaults.typed_reference_class;
710 inst->type = STACK_VTYPE;
712 case MONO_TYPE_GENERICINST:
713 type = &type->data.generic_class->container_class->byval_arg;
717 g_assert (cfg->generic_sharing_context);
718 if (mini_is_gsharedvt_type (cfg, type)) {
719 g_assert (cfg->gsharedvt);
720 inst->type = STACK_VTYPE;
722 inst->type = STACK_OBJ;
726 g_error ("unknown type 0x%02x in eval stack type", type->type);
731 * The following tables are used to quickly validate the IL code in type_from_op ().
734 bin_num_table [STACK_MAX] [STACK_MAX] = {
735 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
736 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
737 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
738 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
739 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
740 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
747 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
750 /* reduce the size of this table */
752 bin_int_table [STACK_MAX] [STACK_MAX] = {
753 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
754 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
755 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
756 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
757 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
758 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
764 bin_comp_table [STACK_MAX] [STACK_MAX] = {
765 /* Inv i L p F & O vt */
767 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
768 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
769 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
770 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
771 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
772 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
773 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
776 /* reduce the size of this table */
778 shift_table [STACK_MAX] [STACK_MAX] = {
779 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
780 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
781 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
782 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
783 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
784 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
790 * Tables to map from the non-specific opcode to the matching
791 * type-specific opcode.
793 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
795 binops_op_map [STACK_MAX] = {
796 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
799 /* handles from CEE_NEG to CEE_CONV_U8 */
801 unops_op_map [STACK_MAX] = {
802 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
805 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
807 ovfops_op_map [STACK_MAX] = {
808 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
811 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
813 ovf2ops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
817 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
819 ovf3ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
823 /* handles from CEE_BEQ to CEE_BLT_UN */
825 beqops_op_map [STACK_MAX] = {
826 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
829 /* handles from CEE_CEQ to CEE_CLT_UN */
831 ceqops_op_map [STACK_MAX] = {
832 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
836 * Sets ins->type (the type on the eval stack) according to the
837 * type of the opcode and the arguments to it.
838 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
840 * FIXME: this function sets ins->type unconditionally in some cases, but
841 * it should set it to invalid for some types (a conv.x on an object)
844 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
846 switch (ins->opcode) {
853 /* FIXME: check unverifiable args for STACK_MP */
854 ins->type = bin_num_table [src1->type] [src2->type];
855 ins->opcode += binops_op_map [ins->type];
862 ins->type = bin_int_table [src1->type] [src2->type];
863 ins->opcode += binops_op_map [ins->type];
868 ins->type = shift_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
875 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
876 ins->opcode = OP_LCOMPARE;
877 else if (src1->type == STACK_R8)
878 ins->opcode = OP_FCOMPARE;
880 ins->opcode = OP_ICOMPARE;
882 case OP_ICOMPARE_IMM:
883 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
884 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
885 ins->opcode = OP_LCOMPARE_IMM;
897 ins->opcode += beqops_op_map [src1->type];
900 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
901 ins->opcode += ceqops_op_map [src1->type];
907 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
908 ins->opcode += ceqops_op_map [src1->type];
912 ins->type = neg_table [src1->type];
913 ins->opcode += unops_op_map [ins->type];
916 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
917 ins->type = src1->type;
919 ins->type = STACK_INV;
920 ins->opcode += unops_op_map [ins->type];
926 ins->type = STACK_I4;
927 ins->opcode += unops_op_map [src1->type];
930 ins->type = STACK_R8;
931 switch (src1->type) {
934 ins->opcode = OP_ICONV_TO_R_UN;
937 ins->opcode = OP_LCONV_TO_R_UN;
941 case CEE_CONV_OVF_I1:
942 case CEE_CONV_OVF_U1:
943 case CEE_CONV_OVF_I2:
944 case CEE_CONV_OVF_U2:
945 case CEE_CONV_OVF_I4:
946 case CEE_CONV_OVF_U4:
947 ins->type = STACK_I4;
948 ins->opcode += ovf3ops_op_map [src1->type];
950 case CEE_CONV_OVF_I_UN:
951 case CEE_CONV_OVF_U_UN:
952 ins->type = STACK_PTR;
953 ins->opcode += ovf2ops_op_map [src1->type];
955 case CEE_CONV_OVF_I1_UN:
956 case CEE_CONV_OVF_I2_UN:
957 case CEE_CONV_OVF_I4_UN:
958 case CEE_CONV_OVF_U1_UN:
959 case CEE_CONV_OVF_U2_UN:
960 case CEE_CONV_OVF_U4_UN:
961 ins->type = STACK_I4;
962 ins->opcode += ovf2ops_op_map [src1->type];
965 ins->type = STACK_PTR;
966 switch (src1->type) {
968 ins->opcode = OP_ICONV_TO_U;
972 #if SIZEOF_VOID_P == 8
973 ins->opcode = OP_LCONV_TO_U;
975 ins->opcode = OP_MOVE;
979 ins->opcode = OP_LCONV_TO_U;
982 ins->opcode = OP_FCONV_TO_U;
988 ins->type = STACK_I8;
989 ins->opcode += unops_op_map [src1->type];
991 case CEE_CONV_OVF_I8:
992 case CEE_CONV_OVF_U8:
993 ins->type = STACK_I8;
994 ins->opcode += ovf3ops_op_map [src1->type];
996 case CEE_CONV_OVF_U8_UN:
997 case CEE_CONV_OVF_I8_UN:
998 ins->type = STACK_I8;
999 ins->opcode += ovf2ops_op_map [src1->type];
1003 ins->type = STACK_R8;
1004 ins->opcode += unops_op_map [src1->type];
1007 ins->type = STACK_R8;
1011 ins->type = STACK_I4;
1012 ins->opcode += ovfops_op_map [src1->type];
1015 case CEE_CONV_OVF_I:
1016 case CEE_CONV_OVF_U:
1017 ins->type = STACK_PTR;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_ADD_OVF_UN:
1023 case CEE_MUL_OVF_UN:
1025 case CEE_SUB_OVF_UN:
1026 ins->type = bin_num_table [src1->type] [src2->type];
1027 ins->opcode += ovfops_op_map [src1->type];
1028 if (ins->type == STACK_R8)
1029 ins->type = STACK_INV;
1031 case OP_LOAD_MEMBASE:
1032 ins->type = STACK_PTR;
1034 case OP_LOADI1_MEMBASE:
1035 case OP_LOADU1_MEMBASE:
1036 case OP_LOADI2_MEMBASE:
1037 case OP_LOADU2_MEMBASE:
1038 case OP_LOADI4_MEMBASE:
1039 case OP_LOADU4_MEMBASE:
1040 ins->type = STACK_PTR;
1042 case OP_LOADI8_MEMBASE:
1043 ins->type = STACK_I8;
1045 case OP_LOADR4_MEMBASE:
1046 case OP_LOADR8_MEMBASE:
1047 ins->type = STACK_R8;
1050 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1054 if (ins->type == STACK_MP)
1055 ins->klass = mono_defaults.object_class;
1060 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1066 param_table [STACK_MAX] [STACK_MAX] = {
1071 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1075 switch (args->type) {
1085 for (i = 0; i < sig->param_count; ++i) {
1086 switch (args [i].type) {
1090 if (!sig->params [i]->byref)
1094 if (sig->params [i]->byref)
1096 switch (sig->params [i]->type) {
1097 case MONO_TYPE_CLASS:
1098 case MONO_TYPE_STRING:
1099 case MONO_TYPE_OBJECT:
1100 case MONO_TYPE_SZARRAY:
1101 case MONO_TYPE_ARRAY:
1108 if (sig->params [i]->byref)
1110 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1119 /*if (!param_table [args [i].type] [sig->params [i]->type])
1127 * When we need a pointer to the current domain many times in a method, we
1128 * call mono_domain_get() once and we store the result in a local variable.
1129 * This function returns the variable that represents the MonoDomain*.
1131 inline static MonoInst *
1132 mono_get_domainvar (MonoCompile *cfg)
1134 if (!cfg->domainvar)
1135 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1136 return cfg->domainvar;
1140 * The got_var contains the address of the Global Offset Table when AOT
1144 mono_get_got_var (MonoCompile *cfg)
1146 #ifdef MONO_ARCH_NEED_GOT_VAR
1147 if (!cfg->compile_aot)
1149 if (!cfg->got_var) {
1150 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1152 return cfg->got_var;
1159 mono_get_vtable_var (MonoCompile *cfg)
1161 g_assert (cfg->generic_sharing_context);
1163 if (!cfg->rgctx_var) {
1164 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1165 /* force the var to be stack allocated */
1166 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1169 return cfg->rgctx_var;
1173 type_from_stack_type (MonoInst *ins) {
1174 switch (ins->type) {
1175 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1176 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1177 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1178 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1180 return &ins->klass->this_arg;
1181 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1182 case STACK_VTYPE: return &ins->klass->byval_arg;
1184 g_error ("stack type %d to monotype not handled\n", ins->type);
1189 static G_GNUC_UNUSED int
1190 type_to_stack_type (MonoType *t)
1192 t = mono_type_get_underlying_type (t);
1196 case MONO_TYPE_BOOLEAN:
1199 case MONO_TYPE_CHAR:
1206 case MONO_TYPE_FNPTR:
1208 case MONO_TYPE_CLASS:
1209 case MONO_TYPE_STRING:
1210 case MONO_TYPE_OBJECT:
1211 case MONO_TYPE_SZARRAY:
1212 case MONO_TYPE_ARRAY:
1220 case MONO_TYPE_VALUETYPE:
1221 case MONO_TYPE_TYPEDBYREF:
1223 case MONO_TYPE_GENERICINST:
1224 if (mono_type_generic_inst_is_valuetype (t))
1230 g_assert_not_reached ();
1237 array_access_to_klass (int opcode)
1241 return mono_defaults.byte_class;
1243 return mono_defaults.uint16_class;
1246 return mono_defaults.int_class;
1249 return mono_defaults.sbyte_class;
1252 return mono_defaults.int16_class;
1255 return mono_defaults.int32_class;
1257 return mono_defaults.uint32_class;
1260 return mono_defaults.int64_class;
1263 return mono_defaults.single_class;
1266 return mono_defaults.double_class;
1267 case CEE_LDELEM_REF:
1268 case CEE_STELEM_REF:
1269 return mono_defaults.object_class;
1271 g_assert_not_reached ();
1277 * We try to share variables when possible
1280 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1285 /* inlining can result in deeper stacks */
1286 if (slot >= cfg->header->max_stack)
1287 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1289 pos = ins->type - 1 + slot * STACK_MAX;
1291 switch (ins->type) {
1298 if ((vnum = cfg->intvars [pos]))
1299 return cfg->varinfo [vnum];
1300 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1301 cfg->intvars [pos] = res->inst_c0;
1304 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1310 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1313 * Don't use this if a generic_context is set, since that means AOT can't
1314 * look up the method using just the image+token.
1315 * table == 0 means this is a reference made from a wrapper.
1317 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1318 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1319 jump_info_token->image = image;
1320 jump_info_token->token = token;
1321 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1326 * This function is called to handle items that are left on the evaluation stack
1327 * at basic block boundaries. What happens is that we save the values to local variables
1328 * and we reload them later when first entering the target basic block (with the
1329 * handle_loaded_temps () function).
1330 * A single joint point will use the same variables (stored in the array bb->out_stack or
1331 * bb->in_stack, if the basic block is before or after the joint point).
1333 * This function needs to be called _before_ emitting the last instruction of
1334 * the bb (i.e. before emitting a branch).
1335 * If the stack merge fails at a join point, cfg->unverifiable is set.
1338 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1341 MonoBasicBlock *bb = cfg->cbb;
1342 MonoBasicBlock *outb;
1343 MonoInst *inst, **locals;
1348 if (cfg->verbose_level > 3)
1349 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1350 if (!bb->out_scount) {
1351 bb->out_scount = count;
1352 //printf ("bblock %d has out:", bb->block_num);
1354 for (i = 0; i < bb->out_count; ++i) {
1355 outb = bb->out_bb [i];
1356 /* exception handlers are linked, but they should not be considered for stack args */
1357 if (outb->flags & BB_EXCEPTION_HANDLER)
1359 //printf (" %d", outb->block_num);
1360 if (outb->in_stack) {
1362 bb->out_stack = outb->in_stack;
1368 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1369 for (i = 0; i < count; ++i) {
1371 * try to reuse temps already allocated for this purpouse, if they occupy the same
1372 * stack slot and if they are of the same type.
1373 * This won't cause conflicts since if 'local' is used to
1374 * store one of the values in the in_stack of a bblock, then
1375 * the same variable will be used for the same outgoing stack
1377 * This doesn't work when inlining methods, since the bblocks
1378 * in the inlined methods do not inherit their in_stack from
1379 * the bblock they are inlined to. See bug #58863 for an
1382 if (cfg->inlined_method)
1383 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1385 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1390 for (i = 0; i < bb->out_count; ++i) {
1391 outb = bb->out_bb [i];
1392 /* exception handlers are linked, but they should not be considered for stack args */
1393 if (outb->flags & BB_EXCEPTION_HANDLER)
1395 if (outb->in_scount) {
1396 if (outb->in_scount != bb->out_scount) {
1397 cfg->unverifiable = TRUE;
1400 continue; /* check they are the same locals */
1402 outb->in_scount = count;
1403 outb->in_stack = bb->out_stack;
1406 locals = bb->out_stack;
1408 for (i = 0; i < count; ++i) {
1409 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1410 inst->cil_code = sp [i]->cil_code;
1411 sp [i] = locals [i];
1412 if (cfg->verbose_level > 3)
1413 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1417 * It is possible that the out bblocks already have in_stack assigned, and
1418 * the in_stacks differ. In this case, we will store to all the different
1425 /* Find a bblock which has a different in_stack */
1427 while (bindex < bb->out_count) {
1428 outb = bb->out_bb [bindex];
1429 /* exception handlers are linked, but they should not be considered for stack args */
1430 if (outb->flags & BB_EXCEPTION_HANDLER) {
1434 if (outb->in_stack != locals) {
1435 for (i = 0; i < count; ++i) {
1436 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1437 inst->cil_code = sp [i]->cil_code;
1438 sp [i] = locals [i];
1439 if (cfg->verbose_level > 3)
1440 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1442 locals = outb->in_stack;
1451 /* Emit code which loads interface_offsets [klass->interface_id]
1452 * The array is stored in memory before vtable.
1455 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1457 if (cfg->compile_aot) {
1458 int ioffset_reg = alloc_preg (cfg);
1459 int iid_reg = alloc_preg (cfg);
1461 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1462 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1466 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1471 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1473 int ibitmap_reg = alloc_preg (cfg);
1474 #ifdef COMPRESSED_INTERFACE_BITMAP
1476 MonoInst *res, *ins;
1477 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1478 MONO_ADD_INS (cfg->cbb, ins);
1480 if (cfg->compile_aot)
1481 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1483 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1484 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1485 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1487 int ibitmap_byte_reg = alloc_preg (cfg);
1489 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1491 if (cfg->compile_aot) {
1492 int iid_reg = alloc_preg (cfg);
1493 int shifted_iid_reg = alloc_preg (cfg);
1494 int ibitmap_byte_address_reg = alloc_preg (cfg);
1495 int masked_iid_reg = alloc_preg (cfg);
1496 int iid_one_bit_reg = alloc_preg (cfg);
1497 int iid_bit_reg = alloc_preg (cfg);
1498 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1499 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1503 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1514 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1515 * stored in "klass_reg" implements the interface "klass".
1518 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1520 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1524 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1525 * stored in "vtable_reg" implements the interface "klass".
1528 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1530 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1534 * Emit code which checks whenever the interface id of @klass is smaller than
1535 * than the value given by max_iid_reg.
1538 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1539 MonoBasicBlock *false_target)
1541 if (cfg->compile_aot) {
1542 int iid_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1547 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1551 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1554 /* Same as above, but obtains max_iid from a vtable */
1556 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1557 MonoBasicBlock *false_target)
1559 int max_iid_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1562 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1565 /* Same as above, but obtains max_iid from a klass */
1567 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1568 MonoBasicBlock *false_target)
1570 int max_iid_reg = alloc_preg (cfg);
1572 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1573 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1577 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1579 int idepth_reg = alloc_preg (cfg);
1580 int stypes_reg = alloc_preg (cfg);
1581 int stype = alloc_preg (cfg);
1583 mono_class_setup_supertypes (klass);
1585 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1586 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1590 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1594 } else if (cfg->compile_aot) {
1595 int const_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1597 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1605 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1607 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1611 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 int intf_reg = alloc_preg (cfg);
1615 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1616 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1619 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1621 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1625 * Variant of the above that takes a register to the class, not the vtable.
1628 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1630 int intf_bit_reg = alloc_preg (cfg);
1632 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1633 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1634 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1636 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1638 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1642 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1653 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1657 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1659 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1663 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1665 if (cfg->compile_aot) {
1666 int const_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1668 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1672 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1676 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1679 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1682 int rank_reg = alloc_preg (cfg);
1683 int eclass_reg = alloc_preg (cfg);
1685 g_assert (!klass_inst);
1686 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1688 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1689 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1690 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1691 if (klass->cast_class == mono_defaults.object_class) {
1692 int parent_reg = alloc_preg (cfg);
1693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1694 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1695 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1696 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1697 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1698 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1699 } else if (klass->cast_class == mono_defaults.enum_class) {
1700 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1701 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1702 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1704 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1705 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1708 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1709 /* Check that the object is a vector too */
1710 int bounds_reg = alloc_preg (cfg);
1711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1713 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1716 int idepth_reg = alloc_preg (cfg);
1717 int stypes_reg = alloc_preg (cfg);
1718 int stype = alloc_preg (cfg);
1720 mono_class_setup_supertypes (klass);
1722 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1723 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1724 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1725 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1727 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1729 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1734 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1736 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1740 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1744 g_assert (val == 0);
1749 if ((size <= 4) && (size <= align)) {
1752 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1755 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1760 #if SIZEOF_REGISTER == 8
1762 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1768 val_reg = alloc_preg (cfg);
1770 if (SIZEOF_REGISTER == 8)
1771 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1773 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1776 /* This could be optimized further if neccesary */
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1785 #if !NO_UNALIGNED_ACCESS
1786 if (SIZEOF_REGISTER == 8) {
1788 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1806 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1818 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1825 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1826 g_assert (size < 10000);
1829 /* This could be optimized further if neccesary */
1831 cur_reg = alloc_preg (cfg);
1832 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1833 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1840 #if !NO_UNALIGNED_ACCESS
1841 if (SIZEOF_REGISTER == 8) {
1843 cur_reg = alloc_preg (cfg);
1844 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1854 cur_reg = alloc_preg (cfg);
1855 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1862 cur_reg = alloc_preg (cfg);
1863 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1864 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1870 cur_reg = alloc_preg (cfg);
1871 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1872 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1880 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1884 if (cfg->compile_aot) {
1885 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1886 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1888 ins->sreg2 = c->dreg;
1889 MONO_ADD_INS (cfg->cbb, ins);
1891 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1893 ins->inst_offset = mini_get_tls_offset (tls_key);
1894 MONO_ADD_INS (cfg->cbb, ins);
1901 * Emit IR to push the current LMF onto the LMF stack.
1904 emit_push_lmf (MonoCompile *cfg)
1907 * Emit IR to push the LMF:
1908 * lmf_addr = <lmf_addr from tls>
1909 * lmf->lmf_addr = lmf_addr
1910 * lmf->prev_lmf = *lmf_addr
1913 int lmf_reg, prev_lmf_reg;
1914 MonoInst *ins, *lmf_ins;
1919 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1920 /* Load current lmf */
1921 lmf_ins = mono_get_lmf_intrinsic (cfg);
1923 MONO_ADD_INS (cfg->cbb, lmf_ins);
1924 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1925 lmf_reg = ins->dreg;
1926 /* Save previous_lmf */
1927 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1929 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1932 * Store lmf_addr in a variable, so it can be allocated to a global register.
1934 if (!cfg->lmf_addr_var)
1935 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1938 ins = mono_get_jit_tls_intrinsic (cfg);
1940 int jit_tls_dreg = ins->dreg;
1942 MONO_ADD_INS (cfg->cbb, ins);
1943 lmf_reg = alloc_preg (cfg);
1944 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1946 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1949 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1951 MONO_ADD_INS (cfg->cbb, lmf_ins);
1953 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1957 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1958 lmf_reg = ins->dreg;
1960 prev_lmf_reg = alloc_preg (cfg);
1961 /* Save previous_lmf */
1962 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1963 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1965 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1972 * Emit IR to pop the current LMF from the LMF stack.
1975 emit_pop_lmf (MonoCompile *cfg)
1977 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1983 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1984 lmf_reg = ins->dreg;
1986 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1987 /* Load previous_lmf */
1988 prev_lmf_reg = alloc_preg (cfg);
1989 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1991 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
1994 * Emit IR to pop the LMF:
1995 * *(lmf->lmf_addr) = lmf->prev_lmf
1997 /* This could be called before emit_push_lmf () */
1998 if (!cfg->lmf_addr_var)
1999 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2000 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2002 prev_lmf_reg = alloc_preg (cfg);
2003 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2004 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2009 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2012 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2015 type = mini_get_basic_type_from_generic (gsctx, type);
2016 type = mini_replace_type (type);
2017 switch (type->type) {
2018 case MONO_TYPE_VOID:
2019 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2022 case MONO_TYPE_BOOLEAN:
2025 case MONO_TYPE_CHAR:
2028 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2032 case MONO_TYPE_FNPTR:
2033 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2034 case MONO_TYPE_CLASS:
2035 case MONO_TYPE_STRING:
2036 case MONO_TYPE_OBJECT:
2037 case MONO_TYPE_SZARRAY:
2038 case MONO_TYPE_ARRAY:
2039 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2042 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2045 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2046 case MONO_TYPE_VALUETYPE:
2047 if (type->data.klass->enumtype) {
2048 type = mono_class_enum_basetype (type->data.klass);
2051 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2052 case MONO_TYPE_TYPEDBYREF:
2053 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2054 case MONO_TYPE_GENERICINST:
2055 type = &type->data.generic_class->container_class->byval_arg;
2058 case MONO_TYPE_MVAR:
2060 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2062 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2068 * target_type_is_incompatible:
2069 * @cfg: MonoCompile context
2071 * Check that the item @arg on the evaluation stack can be stored
2072 * in the target type (can be a local, or field, etc).
2073 * The cfg arg can be used to check if we need verification or just
2076 * Returns: non-0 value if arg can't be stored on a target.
2079 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2081 MonoType *simple_type;
2084 target = mini_replace_type (target);
2085 if (target->byref) {
2086 /* FIXME: check that the pointed to types match */
2087 if (arg->type == STACK_MP)
2088 return arg->klass != mono_class_from_mono_type (target);
2089 if (arg->type == STACK_PTR)
2094 simple_type = mono_type_get_underlying_type (target);
2095 switch (simple_type->type) {
2096 case MONO_TYPE_VOID:
2100 case MONO_TYPE_BOOLEAN:
2103 case MONO_TYPE_CHAR:
2106 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2110 /* STACK_MP is needed when setting pinned locals */
2111 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2116 case MONO_TYPE_FNPTR:
2118 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2119 * in native int. (#688008).
2121 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2124 case MONO_TYPE_CLASS:
2125 case MONO_TYPE_STRING:
2126 case MONO_TYPE_OBJECT:
2127 case MONO_TYPE_SZARRAY:
2128 case MONO_TYPE_ARRAY:
2129 if (arg->type != STACK_OBJ)
2131 /* FIXME: check type compatibility */
2135 if (arg->type != STACK_I8)
2140 if (arg->type != STACK_R8)
2143 case MONO_TYPE_VALUETYPE:
2144 if (arg->type != STACK_VTYPE)
2146 klass = mono_class_from_mono_type (simple_type);
2147 if (klass != arg->klass)
2150 case MONO_TYPE_TYPEDBYREF:
2151 if (arg->type != STACK_VTYPE)
2153 klass = mono_class_from_mono_type (simple_type);
2154 if (klass != arg->klass)
2157 case MONO_TYPE_GENERICINST:
2158 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2159 if (arg->type != STACK_VTYPE)
2161 klass = mono_class_from_mono_type (simple_type);
2162 if (klass != arg->klass)
2166 if (arg->type != STACK_OBJ)
2168 /* FIXME: check type compatibility */
2172 case MONO_TYPE_MVAR:
2173 g_assert (cfg->generic_sharing_context);
2174 if (mini_type_var_is_vt (cfg, simple_type)) {
2175 if (arg->type != STACK_VTYPE)
2178 if (arg->type != STACK_OBJ)
2183 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2189 * Prepare arguments for passing to a function call.
2190 * Return a non-zero value if the arguments can't be passed to the given
2192 * The type checks are not yet complete and some conversions may need
2193 * casts on 32 or 64 bit architectures.
2195 * FIXME: implement this using target_type_is_incompatible ()
2198 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2200 MonoType *simple_type;
2204 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2208 for (i = 0; i < sig->param_count; ++i) {
2209 if (sig->params [i]->byref) {
2210 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2214 simple_type = sig->params [i];
2215 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2217 switch (simple_type->type) {
2218 case MONO_TYPE_VOID:
2223 case MONO_TYPE_BOOLEAN:
2226 case MONO_TYPE_CHAR:
2229 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2235 case MONO_TYPE_FNPTR:
2236 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2239 case MONO_TYPE_CLASS:
2240 case MONO_TYPE_STRING:
2241 case MONO_TYPE_OBJECT:
2242 case MONO_TYPE_SZARRAY:
2243 case MONO_TYPE_ARRAY:
2244 if (args [i]->type != STACK_OBJ)
2249 if (args [i]->type != STACK_I8)
2254 if (args [i]->type != STACK_R8)
2257 case MONO_TYPE_VALUETYPE:
2258 if (simple_type->data.klass->enumtype) {
2259 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2262 if (args [i]->type != STACK_VTYPE)
2265 case MONO_TYPE_TYPEDBYREF:
2266 if (args [i]->type != STACK_VTYPE)
2269 case MONO_TYPE_GENERICINST:
2270 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2273 case MONO_TYPE_MVAR:
2275 if (args [i]->type != STACK_VTYPE)
2279 g_error ("unknown type 0x%02x in check_call_signature",
2287 callvirt_to_call (int opcode)
2290 case OP_CALL_MEMBASE:
2292 case OP_VOIDCALL_MEMBASE:
2294 case OP_FCALL_MEMBASE:
2296 case OP_VCALL_MEMBASE:
2298 case OP_LCALL_MEMBASE:
2301 g_assert_not_reached ();
2307 #ifdef MONO_ARCH_HAVE_IMT
2308 /* Either METHOD or IMT_ARG needs to be set */
2310 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2314 if (COMPILE_LLVM (cfg)) {
2315 method_reg = alloc_preg (cfg);
2318 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2319 } else if (cfg->compile_aot) {
2320 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2323 MONO_INST_NEW (cfg, ins, OP_PCONST);
2324 ins->inst_p0 = method;
2325 ins->dreg = method_reg;
2326 MONO_ADD_INS (cfg->cbb, ins);
2330 call->imt_arg_reg = method_reg;
2332 #ifdef MONO_ARCH_IMT_REG
2333 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2335 /* Need this to keep the IMT arg alive */
2336 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2341 #ifdef MONO_ARCH_IMT_REG
2342 method_reg = alloc_preg (cfg);
2345 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2346 } else if (cfg->compile_aot) {
2347 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2350 MONO_INST_NEW (cfg, ins, OP_PCONST);
2351 ins->inst_p0 = method;
2352 ins->dreg = method_reg;
2353 MONO_ADD_INS (cfg->cbb, ins);
2356 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2358 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2363 static MonoJumpInfo *
2364 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2366 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2370 ji->data.target = target;
2376 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2378 if (cfg->generic_sharing_context)
2379 return mono_class_check_context_used (klass);
2385 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2387 if (cfg->generic_sharing_context)
2388 return mono_method_check_context_used (method);
2394 * check_method_sharing:
2396 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2399 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2401 gboolean pass_vtable = FALSE;
2402 gboolean pass_mrgctx = FALSE;
2404 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2405 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2406 gboolean sharable = FALSE;
2408 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2411 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2412 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2413 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2415 sharable = sharing_enabled && context_sharable;
2419 * Pass vtable iff target method might
2420 * be shared, which means that sharing
2421 * is enabled for its class and its
2422 * context is sharable (and it's not a
2425 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2429 if (mini_method_get_context (cmethod) &&
2430 mini_method_get_context (cmethod)->method_inst) {
2431 g_assert (!pass_vtable);
2433 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2436 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2437 MonoGenericContext *context = mini_method_get_context (cmethod);
2438 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2440 if (sharing_enabled && context_sharable)
2442 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2447 if (out_pass_vtable)
2448 *out_pass_vtable = pass_vtable;
2449 if (out_pass_mrgctx)
2450 *out_pass_mrgctx = pass_mrgctx;
2453 inline static MonoCallInst *
2454 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2455 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2459 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2464 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2466 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2469 call->signature = sig;
2470 call->rgctx_reg = rgctx;
2471 sig_ret = mini_replace_type (sig->ret);
2473 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2476 if (mini_type_is_vtype (cfg, sig_ret)) {
2477 call->vret_var = cfg->vret_addr;
2478 //g_assert_not_reached ();
2480 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2481 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2484 temp->backend.is_pinvoke = sig->pinvoke;
2487 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2488 * address of return value to increase optimization opportunities.
2489 * Before vtype decomposition, the dreg of the call ins itself represents the
2490 * fact the call modifies the return value. After decomposition, the call will
2491 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2492 * will be transformed into an LDADDR.
2494 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2495 loada->dreg = alloc_preg (cfg);
2496 loada->inst_p0 = temp;
2497 /* We reference the call too since call->dreg could change during optimization */
2498 loada->inst_p1 = call;
2499 MONO_ADD_INS (cfg->cbb, loada);
2501 call->inst.dreg = temp->dreg;
2503 call->vret_var = loada;
2504 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2505 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2507 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2508 if (COMPILE_SOFT_FLOAT (cfg)) {
2510 * If the call has a float argument, we would need to do an r8->r4 conversion using
2511 * an icall, but that cannot be done during the call sequence since it would clobber
2512 * the call registers + the stack. So we do it before emitting the call.
2514 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2516 MonoInst *in = call->args [i];
2518 if (i >= sig->hasthis)
2519 t = sig->params [i - sig->hasthis];
2521 t = &mono_defaults.int_class->byval_arg;
2522 t = mono_type_get_underlying_type (t);
2524 if (!t->byref && t->type == MONO_TYPE_R4) {
2525 MonoInst *iargs [1];
2529 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2531 /* The result will be in an int vreg */
2532 call->args [i] = conv;
2538 call->need_unbox_trampoline = unbox_trampoline;
2541 if (COMPILE_LLVM (cfg))
2542 mono_llvm_emit_call (cfg, call);
2544 mono_arch_emit_call (cfg, call);
2546 mono_arch_emit_call (cfg, call);
2549 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2550 cfg->flags |= MONO_CFG_HAS_CALLS;
2556 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2558 #ifdef MONO_ARCH_RGCTX_REG
2559 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2560 cfg->uses_rgctx_reg = TRUE;
2561 call->rgctx_reg = TRUE;
2563 call->rgctx_arg_reg = rgctx_reg;
2570 inline static MonoInst*
2571 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2577 rgctx_reg = mono_alloc_preg (cfg);
2578 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2581 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2583 call->inst.sreg1 = addr->dreg;
2586 emit_imt_argument (cfg, call, NULL, imt_arg);
2588 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2591 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2593 return (MonoInst*)call;
2597 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2600 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2602 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2605 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2606 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2608 #ifndef DISABLE_REMOTING
2609 gboolean might_be_remote = FALSE;
2611 gboolean virtual = this != NULL;
2612 gboolean enable_for_aot = TRUE;
2616 gboolean need_unbox_trampoline;
2619 sig = mono_method_signature (method);
2622 rgctx_reg = mono_alloc_preg (cfg);
2623 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2626 if (method->string_ctor) {
2627 /* Create the real signature */
2628 /* FIXME: Cache these */
2629 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2630 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2635 context_used = mini_method_check_context_used (cfg, method);
2637 #ifndef DISABLE_REMOTING
2638 might_be_remote = this && sig->hasthis &&
2639 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2640 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2642 if (might_be_remote && context_used) {
2645 g_assert (cfg->generic_sharing_context);
2647 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2649 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2653 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2655 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2657 #ifndef DISABLE_REMOTING
2658 if (might_be_remote)
2659 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2662 call->method = method;
2663 call->inst.flags |= MONO_INST_HAS_METHOD;
2664 call->inst.inst_left = this;
2665 call->tail_call = tail;
2668 int vtable_reg, slot_reg, this_reg;
2671 this_reg = this->dreg;
2673 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2674 MonoInst *dummy_use;
2676 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2678 /* Make a call to delegate->invoke_impl */
2679 call->inst.inst_basereg = this_reg;
2680 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2681 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2683 /* We must emit a dummy use here because the delegate trampoline will
2684 replace the 'this' argument with the delegate target making this activation
2685 no longer a root for the delegate.
2686 This is an issue for delegates that target collectible code such as dynamic
2687 methods of GC'able assemblies.
2689 For a test case look into #667921.
2691 FIXME: a dummy use is not the best way to do it as the local register allocator
2692 will put it on a caller save register and spil it around the call.
2693 Ideally, we would either put it on a callee save register or only do the store part.
2695 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2697 return (MonoInst*)call;
2700 if ((!cfg->compile_aot || enable_for_aot) &&
2701 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2702 (MONO_METHOD_IS_FINAL (method) &&
2703 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2704 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2706 * the method is not virtual, we just need to ensure this is not null
2707 * and then we can call the method directly.
2709 #ifndef DISABLE_REMOTING
2710 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2712 * The check above ensures method is not gshared, this is needed since
2713 * gshared methods can't have wrappers.
2715 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2719 if (!method->string_ctor)
2720 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2722 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2723 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2725 * the method is virtual, but we can statically dispatch since either
2726 * it's class or the method itself are sealed.
2727 * But first we need to ensure it's not a null reference.
2729 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2731 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2733 vtable_reg = alloc_preg (cfg);
2734 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2735 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2737 #ifdef MONO_ARCH_HAVE_IMT
2739 guint32 imt_slot = mono_method_get_imt_slot (method);
2740 emit_imt_argument (cfg, call, call->method, imt_arg);
2741 slot_reg = vtable_reg;
2742 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2745 if (slot_reg == -1) {
2746 slot_reg = alloc_preg (cfg);
2747 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2748 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2751 slot_reg = vtable_reg;
2752 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2753 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2754 #ifdef MONO_ARCH_HAVE_IMT
2756 g_assert (mono_method_signature (method)->generic_param_count);
2757 emit_imt_argument (cfg, call, call->method, imt_arg);
2762 call->inst.sreg1 = slot_reg;
2763 call->inst.inst_offset = offset;
2764 call->virtual = TRUE;
2768 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2771 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2773 return (MonoInst*)call;
2777 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2779 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2783 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2790 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2793 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2795 return (MonoInst*)call;
2799 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2801 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2805 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2809 * mono_emit_abs_call:
2811 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2813 inline static MonoInst*
2814 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2815 MonoMethodSignature *sig, MonoInst **args)
2817 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2821 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2824 if (cfg->abs_patches == NULL)
2825 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2826 g_hash_table_insert (cfg->abs_patches, ji, ji);
2827 ins = mono_emit_native_call (cfg, ji, sig, args);
2828 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2833 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2835 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2836 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2840 * Native code might return non register sized integers
2841 * without initializing the upper bits.
2843 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2844 case OP_LOADI1_MEMBASE:
2845 widen_op = OP_ICONV_TO_I1;
2847 case OP_LOADU1_MEMBASE:
2848 widen_op = OP_ICONV_TO_U1;
2850 case OP_LOADI2_MEMBASE:
2851 widen_op = OP_ICONV_TO_I2;
2853 case OP_LOADU2_MEMBASE:
2854 widen_op = OP_ICONV_TO_U2;
2860 if (widen_op != -1) {
2861 int dreg = alloc_preg (cfg);
2864 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2865 widen->type = ins->type;
2875 get_memcpy_method (void)
2877 static MonoMethod *memcpy_method = NULL;
2878 if (!memcpy_method) {
2879 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2881 g_error ("Old corlib found. Install a new one");
2883 return memcpy_method;
2887 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2889 MonoClassField *field;
2890 gpointer iter = NULL;
2892 while ((field = mono_class_get_fields (klass, &iter))) {
2895 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2897 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2898 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2899 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2900 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2902 MonoClass *field_class = mono_class_from_mono_type (field->type);
2903 if (field_class->has_references)
2904 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2910 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2912 int card_table_shift_bits;
2913 gpointer card_table_mask;
2915 MonoInst *dummy_use;
2916 int nursery_shift_bits;
2917 size_t nursery_size;
2918 gboolean has_card_table_wb = FALSE;
2920 if (!cfg->gen_write_barriers)
2923 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2925 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2927 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2928 has_card_table_wb = TRUE;
2931 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2934 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2935 wbarrier->sreg1 = ptr->dreg;
2936 wbarrier->sreg2 = value->dreg;
2937 MONO_ADD_INS (cfg->cbb, wbarrier);
2938 } else if (card_table) {
2939 int offset_reg = alloc_preg (cfg);
2940 int card_reg = alloc_preg (cfg);
2943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2944 if (card_table_mask)
2945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2947 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2948 * IMM's larger than 32bits.
2950 if (cfg->compile_aot) {
2951 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2953 MONO_INST_NEW (cfg, ins, OP_PCONST);
2954 ins->inst_p0 = card_table;
2955 ins->dreg = card_reg;
2956 MONO_ADD_INS (cfg->cbb, ins);
2959 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2960 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2962 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2963 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2966 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2970 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2972 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2973 unsigned need_wb = 0;
2978 /*types with references can't have alignment smaller than sizeof(void*) */
2979 if (align < SIZEOF_VOID_P)
2982 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2983 if (size > 32 * SIZEOF_VOID_P)
2986 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2988 /* We don't unroll more than 5 stores to avoid code bloat. */
2989 if (size > 5 * SIZEOF_VOID_P) {
2990 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2991 size += (SIZEOF_VOID_P - 1);
2992 size &= ~(SIZEOF_VOID_P - 1);
2994 EMIT_NEW_ICONST (cfg, iargs [2], size);
2995 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2996 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3000 destreg = iargs [0]->dreg;
3001 srcreg = iargs [1]->dreg;
3004 dest_ptr_reg = alloc_preg (cfg);
3005 tmp_reg = alloc_preg (cfg);
3008 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3010 while (size >= SIZEOF_VOID_P) {
3011 MonoInst *load_inst;
3012 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3013 load_inst->dreg = tmp_reg;
3014 load_inst->inst_basereg = srcreg;
3015 load_inst->inst_offset = offset;
3016 MONO_ADD_INS (cfg->cbb, load_inst);
3018 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3021 emit_write_barrier (cfg, iargs [0], load_inst);
3023 offset += SIZEOF_VOID_P;
3024 size -= SIZEOF_VOID_P;
3027 /*tmp += sizeof (void*)*/
3028 if (size >= SIZEOF_VOID_P) {
3029 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3030 MONO_ADD_INS (cfg->cbb, iargs [0]);
3034 /* Those cannot be references since size < sizeof (void*) */
3036 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3037 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3043 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3050 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3060 * Emit code to copy a valuetype of type @klass whose address is stored in
3061 * @src->dreg to memory whose address is stored at @dest->dreg.
3064 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3066 MonoInst *iargs [4];
3067 int context_used, n;
3069 MonoMethod *memcpy_method;
3070 MonoInst *size_ins = NULL;
3071 MonoInst *memcpy_ins = NULL;
3075 * This check breaks with spilled vars... need to handle it during verification anyway.
3076 * g_assert (klass && klass == src->klass && klass == dest->klass);
3079 if (mini_is_gsharedvt_klass (cfg, klass)) {
3081 context_used = mini_class_check_context_used (cfg, klass);
3082 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3083 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3087 n = mono_class_native_size (klass, &align);
3089 n = mono_class_value_size (klass, &align);
3091 /* if native is true there should be no references in the struct */
3092 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3093 /* Avoid barriers when storing to the stack */
3094 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3095 (dest->opcode == OP_LDADDR))) {
3101 context_used = mini_class_check_context_used (cfg, klass);
3103 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3104 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3106 } else if (context_used) {
3107 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3109 if (cfg->compile_aot) {
3110 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3112 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3113 mono_class_compute_gc_descriptor (klass);
3118 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3120 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3125 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3126 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3127 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3132 iargs [2] = size_ins;
3134 EMIT_NEW_ICONST (cfg, iargs [2], n);
3136 memcpy_method = get_memcpy_method ();
3138 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3140 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3145 get_memset_method (void)
3147 static MonoMethod *memset_method = NULL;
3148 if (!memset_method) {
3149 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3151 g_error ("Old corlib found. Install a new one");
3153 return memset_method;
3157 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3159 MonoInst *iargs [3];
3160 int n, context_used;
3162 MonoMethod *memset_method;
3163 MonoInst *size_ins = NULL;
3164 MonoInst *bzero_ins = NULL;
3165 static MonoMethod *bzero_method;
3167 /* FIXME: Optimize this for the case when dest is an LDADDR */
3169 mono_class_init (klass);
3170 if (mini_is_gsharedvt_klass (cfg, klass)) {
3171 context_used = mini_class_check_context_used (cfg, klass);
3172 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3173 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3175 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3176 g_assert (bzero_method);
3178 iargs [1] = size_ins;
3179 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3183 n = mono_class_value_size (klass, &align);
3185 if (n <= sizeof (gpointer) * 5) {
3186 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3189 memset_method = get_memset_method ();
3191 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3192 EMIT_NEW_ICONST (cfg, iargs [2], n);
3193 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3198 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3200 MonoInst *this = NULL;
3202 g_assert (cfg->generic_sharing_context);
3204 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3205 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3206 !method->klass->valuetype)
3207 EMIT_NEW_ARGLOAD (cfg, this, 0);
3209 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3210 MonoInst *mrgctx_loc, *mrgctx_var;
3213 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3215 mrgctx_loc = mono_get_vtable_var (cfg);
3216 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3219 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3220 MonoInst *vtable_loc, *vtable_var;
3224 vtable_loc = mono_get_vtable_var (cfg);
3225 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3227 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3228 MonoInst *mrgctx_var = vtable_var;
3231 vtable_reg = alloc_preg (cfg);
3232 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3233 vtable_var->type = STACK_PTR;
3241 vtable_reg = alloc_preg (cfg);
3242 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3247 static MonoJumpInfoRgctxEntry *
3248 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3250 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3251 res->method = method;
3252 res->in_mrgctx = in_mrgctx;
3253 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3254 res->data->type = patch_type;
3255 res->data->data.target = patch_data;
3256 res->info_type = info_type;
3261 static inline MonoInst*
3262 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3264 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3268 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3269 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3271 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3272 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3274 return emit_rgctx_fetch (cfg, rgctx, entry);
3278 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3279 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3281 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3282 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3284 return emit_rgctx_fetch (cfg, rgctx, entry);
3288 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3289 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3291 MonoJumpInfoGSharedVtCall *call_info;
3292 MonoJumpInfoRgctxEntry *entry;
3295 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3296 call_info->sig = sig;
3297 call_info->method = cmethod;
3299 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3300 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3302 return emit_rgctx_fetch (cfg, rgctx, entry);
3307 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3308 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3310 MonoJumpInfoRgctxEntry *entry;
3313 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3314 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3316 return emit_rgctx_fetch (cfg, rgctx, entry);
3320 * emit_get_rgctx_method:
3322 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3323 * normal constants, else emit a load from the rgctx.
3326 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3327 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3329 if (!context_used) {
3332 switch (rgctx_type) {
3333 case MONO_RGCTX_INFO_METHOD:
3334 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3336 case MONO_RGCTX_INFO_METHOD_RGCTX:
3337 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3340 g_assert_not_reached ();
3343 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3344 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3346 return emit_rgctx_fetch (cfg, rgctx, entry);
3351 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3352 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3354 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3355 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3357 return emit_rgctx_fetch (cfg, rgctx, entry);
3361 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3363 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3364 MonoRuntimeGenericContextInfoTemplate *template;
3369 for (i = 0; i < info->num_entries; ++i) {
3370 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3372 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3376 if (info->num_entries == info->count_entries) {
3377 MonoRuntimeGenericContextInfoTemplate *new_entries;
3378 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3380 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3382 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3383 info->entries = new_entries;
3384 info->count_entries = new_count_entries;
3387 idx = info->num_entries;
3388 template = &info->entries [idx];
3389 template->info_type = rgctx_type;
3390 template->data = data;
3392 info->num_entries ++;
3398 * emit_get_gsharedvt_info:
3400 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3403 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3408 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3409 /* Load info->entries [idx] */
3410 dreg = alloc_preg (cfg);
3411 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3417 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3419 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3423 * On return the caller must check @klass for load errors.
3426 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3428 MonoInst *vtable_arg;
3432 context_used = mini_class_check_context_used (cfg, klass);
3435 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3436 klass, MONO_RGCTX_INFO_VTABLE);
3438 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3442 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3445 if (COMPILE_LLVM (cfg))
3446 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3448 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3449 #ifdef MONO_ARCH_VTABLE_REG
3450 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3451 cfg->uses_vtable_reg = TRUE;
3458 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3462 if (cfg->gen_seq_points && cfg->method == method) {
3463 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3465 ins->flags |= MONO_INST_NONEMPTY_STACK;
3466 MONO_ADD_INS (cfg->cbb, ins);
3471 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3473 if (mini_get_debug_options ()->better_cast_details) {
3474 int to_klass_reg = alloc_preg (cfg);
3475 int vtable_reg = alloc_preg (cfg);
3476 int klass_reg = alloc_preg (cfg);
3477 MonoBasicBlock *is_null_bb = NULL;
3481 NEW_BBLOCK (cfg, is_null_bb);
3483 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3484 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3487 tls_get = mono_get_jit_tls_intrinsic (cfg);
3489 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3493 MONO_ADD_INS (cfg->cbb, tls_get);
3494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3497 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3498 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3499 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3502 MONO_START_BB (cfg, is_null_bb);
3504 *out_bblock = cfg->cbb;
3510 reset_cast_details (MonoCompile *cfg)
3512 /* Reset the variables holding the cast details */
3513 if (mini_get_debug_options ()->better_cast_details) {
3514 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3516 MONO_ADD_INS (cfg->cbb, tls_get);
3517 /* It is enough to reset the from field */
3518 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3523 * On return the caller must check @array_class for load errors
3526 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3528 int vtable_reg = alloc_preg (cfg);
3531 context_used = mini_class_check_context_used (cfg, array_class);
3533 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3535 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3537 if (cfg->opt & MONO_OPT_SHARED) {
3538 int class_reg = alloc_preg (cfg);
3539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3540 if (cfg->compile_aot) {
3541 int klass_reg = alloc_preg (cfg);
3542 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3543 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3547 } else if (context_used) {
3548 MonoInst *vtable_ins;
3550 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3551 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3553 if (cfg->compile_aot) {
3557 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3559 vt_reg = alloc_preg (cfg);
3560 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3561 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3564 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3570 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3572 reset_cast_details (cfg);
3576 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3577 * generic code is generated.
3580 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3582 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3585 MonoInst *rgctx, *addr;
3587 /* FIXME: What if the class is shared? We might not
3588 have to get the address of the method from the
3590 addr = emit_get_rgctx_method (cfg, context_used, method,
3591 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3593 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3595 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3597 gboolean pass_vtable, pass_mrgctx;
3598 MonoInst *rgctx_arg = NULL;
3600 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3601 g_assert (!pass_mrgctx);
3604 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3607 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3610 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3615 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3619 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3620 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3621 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3622 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3624 obj_reg = sp [0]->dreg;
3625 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3626 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3628 /* FIXME: generics */
3629 g_assert (klass->rank == 0);
3632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3633 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3639 MonoInst *element_class;
3641 /* This assertion is from the unboxcast insn */
3642 g_assert (klass->rank == 0);
3644 element_class = emit_get_rgctx_klass (cfg, context_used,
3645 klass->element_class, MONO_RGCTX_INFO_KLASS);
3647 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3650 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3651 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3652 reset_cast_details (cfg);
3655 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3656 MONO_ADD_INS (cfg->cbb, add);
3657 add->type = STACK_MP;
3664 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3666 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3667 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3671 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3677 args [1] = klass_inst;
3680 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3682 NEW_BBLOCK (cfg, is_ref_bb);
3683 NEW_BBLOCK (cfg, is_nullable_bb);
3684 NEW_BBLOCK (cfg, end_bb);
3685 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3689 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3690 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3692 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3693 addr_reg = alloc_dreg (cfg, STACK_MP);
3697 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3698 MONO_ADD_INS (cfg->cbb, addr);
3700 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3703 MONO_START_BB (cfg, is_ref_bb);
3705 /* Save the ref to a temporary */
3706 dreg = alloc_ireg (cfg);
3707 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3708 addr->dreg = addr_reg;
3709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3710 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3713 MONO_START_BB (cfg, is_nullable_bb);
3716 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3717 MonoInst *unbox_call;
3718 MonoMethodSignature *unbox_sig;
3721 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3723 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3724 unbox_sig->ret = &klass->byval_arg;
3725 unbox_sig->param_count = 1;
3726 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3727 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3729 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3730 addr->dreg = addr_reg;
3733 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3736 MONO_START_BB (cfg, end_bb);
3739 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3741 *out_cbb = cfg->cbb;
3747 * Returns NULL and set the cfg exception on error.
3750 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3752 MonoInst *iargs [2];
3758 MonoInst *iargs [2];
3760 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3762 if (cfg->opt & MONO_OPT_SHARED)
3763 rgctx_info = MONO_RGCTX_INFO_KLASS;
3765 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3766 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3768 if (cfg->opt & MONO_OPT_SHARED) {
3769 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3771 alloc_ftn = mono_object_new;
3774 alloc_ftn = mono_object_new_specific;
3777 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3778 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3780 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3783 if (cfg->opt & MONO_OPT_SHARED) {
3784 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3785 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3787 alloc_ftn = mono_object_new;
3788 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3789 /* This happens often in argument checking code, eg. throw new FooException... */
3790 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3791 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3792 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3794 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3795 MonoMethod *managed_alloc = NULL;
3799 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3800 cfg->exception_ptr = klass;
3804 #ifndef MONO_CROSS_COMPILE
3805 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3808 if (managed_alloc) {
3809 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3810 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3812 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3814 guint32 lw = vtable->klass->instance_size;
3815 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3816 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3817 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3820 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3824 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3828 * Returns NULL and set the cfg exception on error.
3831 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3833 MonoInst *alloc, *ins;
3835 *out_cbb = cfg->cbb;
3837 if (mono_class_is_nullable (klass)) {
3838 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3841 /* FIXME: What if the class is shared? We might not
3842 have to get the method address from the RGCTX. */
3843 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3844 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3845 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3847 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3849 gboolean pass_vtable, pass_mrgctx;
3850 MonoInst *rgctx_arg = NULL;
3852 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3853 g_assert (!pass_mrgctx);
3856 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3859 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3862 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3866 if (mini_is_gsharedvt_klass (cfg, klass)) {
3867 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3868 MonoInst *res, *is_ref, *src_var, *addr;
3871 dreg = alloc_ireg (cfg);
3873 NEW_BBLOCK (cfg, is_ref_bb);
3874 NEW_BBLOCK (cfg, is_nullable_bb);
3875 NEW_BBLOCK (cfg, end_bb);
3876 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3877 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3878 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3884 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3887 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3888 ins->opcode = OP_STOREV_MEMBASE;
3890 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3891 res->type = STACK_OBJ;
3893 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3896 MONO_START_BB (cfg, is_ref_bb);
3897 addr_reg = alloc_ireg (cfg);
3899 /* val is a vtype, so has to load the value manually */
3900 src_var = get_vreg_to_inst (cfg, val->dreg);
3902 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3903 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3904 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3905 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3908 MONO_START_BB (cfg, is_nullable_bb);
3911 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3912 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3914 MonoMethodSignature *box_sig;
3917 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3918 * construct that method at JIT time, so have to do things by hand.
3920 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3921 box_sig->ret = &mono_defaults.object_class->byval_arg;
3922 box_sig->param_count = 1;
3923 box_sig->params [0] = &klass->byval_arg;
3924 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3925 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3926 res->type = STACK_OBJ;
3930 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3932 MONO_START_BB (cfg, end_bb);
3934 *out_cbb = cfg->cbb;
3938 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3942 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3949 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3952 MonoGenericContainer *container;
3953 MonoGenericInst *ginst;
3955 if (klass->generic_class) {
3956 container = klass->generic_class->container_class->generic_container;
3957 ginst = klass->generic_class->context.class_inst;
3958 } else if (klass->generic_container && context_used) {
3959 container = klass->generic_container;
3960 ginst = container->context.class_inst;
3965 for (i = 0; i < container->type_argc; ++i) {
3967 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3969 type = ginst->type_argv [i];
3970 if (mini_type_is_reference (cfg, type))
3976 // FIXME: This doesn't work yet (class libs tests fail?)
3977 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3980 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
3982 MonoMethod *mono_castclass;
3985 mono_castclass = mono_marshal_get_castclass_with_cache ();
3987 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
3988 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
3989 reset_cast_details (cfg);
3995 * Returns NULL and set the cfg exception on error.
3998 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4000 MonoBasicBlock *is_null_bb;
4001 int obj_reg = src->dreg;
4002 int vtable_reg = alloc_preg (cfg);
4003 MonoInst *klass_inst = NULL;
4008 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4009 MonoInst *cache_ins;
4011 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4016 /* klass - it's the second element of the cache entry*/
4017 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4020 args [2] = cache_ins;
4022 return emit_castclass_with_cache (cfg, klass, args, NULL);
4025 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4028 NEW_BBLOCK (cfg, is_null_bb);
4030 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4031 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4033 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4035 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4036 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4037 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4039 int klass_reg = alloc_preg (cfg);
4041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4043 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4044 /* the remoting code is broken, access the class for now */
4045 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4046 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4048 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4049 cfg->exception_ptr = klass;
4052 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4054 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4057 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4059 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4060 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4064 MONO_START_BB (cfg, is_null_bb);
4066 reset_cast_details (cfg);
4072 * Returns NULL and set the cfg exception on error.
4075 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4078 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4079 int obj_reg = src->dreg;
4080 int vtable_reg = alloc_preg (cfg);
4081 int res_reg = alloc_ireg_ref (cfg);
4082 MonoInst *klass_inst = NULL;
4087 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4088 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4089 MonoInst *cache_ins;
4091 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4096 /* klass - it's the second element of the cache entry*/
4097 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4100 args [2] = cache_ins;
4102 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4105 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4108 NEW_BBLOCK (cfg, is_null_bb);
4109 NEW_BBLOCK (cfg, false_bb);
4110 NEW_BBLOCK (cfg, end_bb);
4112 /* Do the assignment at the beginning, so the other assignment can be if converted */
4113 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4114 ins->type = STACK_OBJ;
4117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4118 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4120 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4122 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4123 g_assert (!context_used);
4124 /* the is_null_bb target simply copies the input register to the output */
4125 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4127 int klass_reg = alloc_preg (cfg);
4130 int rank_reg = alloc_preg (cfg);
4131 int eclass_reg = alloc_preg (cfg);
4133 g_assert (!context_used);
4134 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4135 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4136 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4138 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4139 if (klass->cast_class == mono_defaults.object_class) {
4140 int parent_reg = alloc_preg (cfg);
4141 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4142 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4143 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4144 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4145 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4146 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4147 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4148 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4149 } else if (klass->cast_class == mono_defaults.enum_class) {
4150 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4151 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4152 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4153 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4155 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4156 /* Check that the object is a vector too */
4157 int bounds_reg = alloc_preg (cfg);
4158 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4163 /* the is_null_bb target simply copies the input register to the output */
4164 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4166 } else if (mono_class_is_nullable (klass)) {
4167 g_assert (!context_used);
4168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4169 /* the is_null_bb target simply copies the input register to the output */
4170 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4172 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4173 g_assert (!context_used);
4174 /* the remoting code is broken, access the class for now */
4175 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4176 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4178 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4179 cfg->exception_ptr = klass;
4182 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4184 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4188 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4190 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4191 /* the is_null_bb target simply copies the input register to the output */
4192 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4197 MONO_START_BB (cfg, false_bb);
4199 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4200 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4202 MONO_START_BB (cfg, is_null_bb);
4204 MONO_START_BB (cfg, end_bb);
4210 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4212 /* This opcode takes as input an object reference and a class, and returns:
4213 0) if the object is an instance of the class,
4214 1) if the object is not instance of the class,
4215 2) if the object is a proxy whose type cannot be determined */
4218 #ifndef DISABLE_REMOTING
4219 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4221 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4223 int obj_reg = src->dreg;
4224 int dreg = alloc_ireg (cfg);
4226 #ifndef DISABLE_REMOTING
4227 int klass_reg = alloc_preg (cfg);
4230 NEW_BBLOCK (cfg, true_bb);
4231 NEW_BBLOCK (cfg, false_bb);
4232 NEW_BBLOCK (cfg, end_bb);
4233 #ifndef DISABLE_REMOTING
4234 NEW_BBLOCK (cfg, false2_bb);
4235 NEW_BBLOCK (cfg, no_proxy_bb);
4238 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4239 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4241 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4242 #ifndef DISABLE_REMOTING
4243 NEW_BBLOCK (cfg, interface_fail_bb);
4246 tmp_reg = alloc_preg (cfg);
4247 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4248 #ifndef DISABLE_REMOTING
4249 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4250 MONO_START_BB (cfg, interface_fail_bb);
4251 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4253 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4255 tmp_reg = alloc_preg (cfg);
4256 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4257 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4260 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4263 #ifndef DISABLE_REMOTING
4264 tmp_reg = alloc_preg (cfg);
4265 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4266 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4268 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4269 tmp_reg = alloc_preg (cfg);
4270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4273 tmp_reg = alloc_preg (cfg);
4274 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4276 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4278 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4279 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4281 MONO_START_BB (cfg, no_proxy_bb);
4283 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4285 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4289 MONO_START_BB (cfg, false_bb);
4291 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4292 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4294 #ifndef DISABLE_REMOTING
4295 MONO_START_BB (cfg, false2_bb);
4297 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4298 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4301 MONO_START_BB (cfg, true_bb);
4303 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4305 MONO_START_BB (cfg, end_bb);
4308 MONO_INST_NEW (cfg, ins, OP_ICONST);
4310 ins->type = STACK_I4;
4316 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4318 /* This opcode takes as input an object reference and a class, and returns:
4319 0) if the object is an instance of the class,
4320 1) if the object is a proxy whose type cannot be determined
4321 an InvalidCastException exception is thrown otherwhise*/
4324 #ifndef DISABLE_REMOTING
4325 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4327 MonoBasicBlock *ok_result_bb;
4329 int obj_reg = src->dreg;
4330 int dreg = alloc_ireg (cfg);
4331 int tmp_reg = alloc_preg (cfg);
4333 #ifndef DISABLE_REMOTING
4334 int klass_reg = alloc_preg (cfg);
4335 NEW_BBLOCK (cfg, end_bb);
4338 NEW_BBLOCK (cfg, ok_result_bb);
4340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4343 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4345 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4346 #ifndef DISABLE_REMOTING
4347 NEW_BBLOCK (cfg, interface_fail_bb);
4349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4350 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4351 MONO_START_BB (cfg, interface_fail_bb);
4352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4354 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4356 tmp_reg = alloc_preg (cfg);
4357 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4358 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4359 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4361 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4362 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4365 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4369 #ifndef DISABLE_REMOTING
4370 NEW_BBLOCK (cfg, no_proxy_bb);
4372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4374 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4376 tmp_reg = alloc_preg (cfg);
4377 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4380 tmp_reg = alloc_preg (cfg);
4381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4385 NEW_BBLOCK (cfg, fail_1_bb);
4387 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4389 MONO_START_BB (cfg, fail_1_bb);
4391 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4392 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4394 MONO_START_BB (cfg, no_proxy_bb);
4396 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4398 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4402 MONO_START_BB (cfg, ok_result_bb);
4404 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4406 #ifndef DISABLE_REMOTING
4407 MONO_START_BB (cfg, end_bb);
4411 MONO_INST_NEW (cfg, ins, OP_ICONST);
4413 ins->type = STACK_I4;
4419 * Returns NULL and set the cfg exception on error.
4421 static G_GNUC_UNUSED MonoInst*
4422 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4426 gpointer *trampoline;
4427 MonoInst *obj, *method_ins, *tramp_ins;
4431 obj = handle_alloc (cfg, klass, FALSE, 0);
4435 /* Inline the contents of mono_delegate_ctor */
4437 /* Set target field */
4438 /* Optimize away setting of NULL target */
4439 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4440 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4441 if (cfg->gen_write_barriers) {
4442 dreg = alloc_preg (cfg);
4443 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4444 emit_write_barrier (cfg, ptr, target);
4448 /* Set method field */
4449 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4450 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4451 if (cfg->gen_write_barriers) {
4452 dreg = alloc_preg (cfg);
4453 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4454 emit_write_barrier (cfg, ptr, method_ins);
4457 * To avoid looking up the compiled code belonging to the target method
4458 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4459 * store it, and we fill it after the method has been compiled.
4461 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4462 MonoInst *code_slot_ins;
4465 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4467 domain = mono_domain_get ();
4468 mono_domain_lock (domain);
4469 if (!domain_jit_info (domain)->method_code_hash)
4470 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4471 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4473 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4474 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4476 mono_domain_unlock (domain);
4478 if (cfg->compile_aot)
4479 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4481 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4483 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4486 /* Set invoke_impl field */
4487 if (cfg->compile_aot) {
4488 MonoClassMethodPair *del_tramp;
4490 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4491 del_tramp->klass = klass;
4492 del_tramp->method = context_used ? NULL : method;
4493 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4495 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4496 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4498 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4500 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4506 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4508 MonoJitICallInfo *info;
4510 /* Need to register the icall so it gets an icall wrapper */
4511 info = mono_get_array_new_va_icall (rank);
4513 cfg->flags |= MONO_CFG_HAS_VARARGS;
4515 /* mono_array_new_va () needs a vararg calling convention */
4516 cfg->disable_llvm = TRUE;
4518 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4519 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4523 mono_emit_load_got_addr (MonoCompile *cfg)
4525 MonoInst *getaddr, *dummy_use;
4527 if (!cfg->got_var || cfg->got_var_allocated)
4530 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4531 getaddr->cil_code = cfg->header->code;
4532 getaddr->dreg = cfg->got_var->dreg;
4534 /* Add it to the start of the first bblock */
4535 if (cfg->bb_entry->code) {
4536 getaddr->next = cfg->bb_entry->code;
4537 cfg->bb_entry->code = getaddr;
4540 MONO_ADD_INS (cfg->bb_entry, getaddr);
4542 cfg->got_var_allocated = TRUE;
4545 * Add a dummy use to keep the got_var alive, since real uses might
4546 * only be generated by the back ends.
4547 * Add it to end_bblock, so the variable's lifetime covers the whole
4549 * It would be better to make the usage of the got var explicit in all
4550 * cases when the backend needs it (i.e. calls, throw etc.), so this
4551 * wouldn't be needed.
4553 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4554 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4557 static int inline_limit;
4558 static gboolean inline_limit_inited;
4561 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4563 MonoMethodHeaderSummary header;
4565 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4566 MonoMethodSignature *sig = mono_method_signature (method);
4570 if (cfg->generic_sharing_context)
4573 if (cfg->inline_depth > 10)
4576 #ifdef MONO_ARCH_HAVE_LMF_OPS
4577 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4578 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4579 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4584 if (!mono_method_get_header_summary (method, &header))
4587 /*runtime, icall and pinvoke are checked by summary call*/
4588 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4589 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4590 (mono_class_is_marshalbyref (method->klass)) ||
4594 /* also consider num_locals? */
4595 /* Do the size check early to avoid creating vtables */
4596 if (!inline_limit_inited) {
4597 if (g_getenv ("MONO_INLINELIMIT"))
4598 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4600 inline_limit = INLINE_LENGTH_LIMIT;
4601 inline_limit_inited = TRUE;
4603 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4607 * if we can initialize the class of the method right away, we do,
4608 * otherwise we don't allow inlining if the class needs initialization,
4609 * since it would mean inserting a call to mono_runtime_class_init()
4610 * inside the inlined code
4612 if (!(cfg->opt & MONO_OPT_SHARED)) {
4613 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4614 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4615 vtable = mono_class_vtable (cfg->domain, method->klass);
4618 if (!cfg->compile_aot)
4619 mono_runtime_class_init (vtable);
4620 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4621 if (cfg->run_cctors && method->klass->has_cctor) {
4622 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4623 if (!method->klass->runtime_info)
4624 /* No vtable created yet */
4626 vtable = mono_class_vtable (cfg->domain, method->klass);
4629 /* This makes so that inline cannot trigger */
4630 /* .cctors: too many apps depend on them */
4631 /* running with a specific order... */
4632 if (! vtable->initialized)
4634 mono_runtime_class_init (vtable);
4636 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4637 if (!method->klass->runtime_info)
4638 /* No vtable created yet */
4640 vtable = mono_class_vtable (cfg->domain, method->klass);
4643 if (!vtable->initialized)
4648 * If we're compiling for shared code
4649 * the cctor will need to be run at aot method load time, for example,
4650 * or at the end of the compilation of the inlining method.
4652 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4657 * CAS - do not inline methods with declarative security
4658 * Note: this has to be before any possible return TRUE;
4660 if (mono_security_method_has_declsec (method))
4663 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4664 if (mono_arch_is_soft_float ()) {
4666 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4668 for (i = 0; i < sig->param_count; ++i)
4669 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4678 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4680 if (!cfg->compile_aot) {
4682 if (vtable->initialized)
4686 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4687 if (cfg->method == method)
4691 if (!mono_class_needs_cctor_run (klass, method))
4694 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4695 /* The initialization is already done before the method is called */
4702 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4706 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4709 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4712 mono_class_init (klass);
4713 size = mono_class_array_element_size (klass);
4716 mult_reg = alloc_preg (cfg);
4717 array_reg = arr->dreg;
4718 index_reg = index->dreg;
4720 #if SIZEOF_REGISTER == 8
4721 /* The array reg is 64 bits but the index reg is only 32 */
4722 if (COMPILE_LLVM (cfg)) {
4724 index2_reg = index_reg;
4726 index2_reg = alloc_preg (cfg);
4727 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4730 if (index->type == STACK_I8) {
4731 index2_reg = alloc_preg (cfg);
4732 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4734 index2_reg = index_reg;
4739 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4741 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4742 if (size == 1 || size == 2 || size == 4 || size == 8) {
4743 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4745 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4746 ins->klass = mono_class_get_element_class (klass);
4747 ins->type = STACK_MP;
4753 add_reg = alloc_ireg_mp (cfg);
4756 MonoInst *rgctx_ins;
4759 g_assert (cfg->generic_sharing_context);
4760 context_used = mini_class_check_context_used (cfg, klass);
4761 g_assert (context_used);
4762 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4763 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4767 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4768 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4769 ins->klass = mono_class_get_element_class (klass);
4770 ins->type = STACK_MP;
4771 MONO_ADD_INS (cfg->cbb, ins);
4776 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4778 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4780 int bounds_reg = alloc_preg (cfg);
4781 int add_reg = alloc_ireg_mp (cfg);
4782 int mult_reg = alloc_preg (cfg);
4783 int mult2_reg = alloc_preg (cfg);
4784 int low1_reg = alloc_preg (cfg);
4785 int low2_reg = alloc_preg (cfg);
4786 int high1_reg = alloc_preg (cfg);
4787 int high2_reg = alloc_preg (cfg);
4788 int realidx1_reg = alloc_preg (cfg);
4789 int realidx2_reg = alloc_preg (cfg);
4790 int sum_reg = alloc_preg (cfg);
4791 int index1, index2, tmpreg;
4795 mono_class_init (klass);
4796 size = mono_class_array_element_size (klass);
4798 index1 = index_ins1->dreg;
4799 index2 = index_ins2->dreg;
4801 #if SIZEOF_REGISTER == 8
4802 /* The array reg is 64 bits but the index reg is only 32 */
4803 if (COMPILE_LLVM (cfg)) {
4806 tmpreg = alloc_preg (cfg);
4807 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4809 tmpreg = alloc_preg (cfg);
4810 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4814 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4818 /* range checking */
4819 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4820 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4822 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4823 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4824 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4825 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4826 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4827 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4828 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4830 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4831 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4832 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4833 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4834 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4835 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4836 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4838 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4839 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4840 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4841 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4842 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4844 ins->type = STACK_MP;
4846 MONO_ADD_INS (cfg->cbb, ins);
4853 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4857 MonoMethod *addr_method;
4860 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4863 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4865 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4866 /* emit_ldelema_2 depends on OP_LMUL */
4867 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4868 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4872 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4873 addr_method = mono_marshal_get_array_address (rank, element_size);
4874 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4879 static MonoBreakPolicy
4880 always_insert_breakpoint (MonoMethod *method)
4882 return MONO_BREAK_POLICY_ALWAYS;
4885 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4888 * mono_set_break_policy:
4889 * policy_callback: the new callback function
4891 * Allow embedders to decide wherther to actually obey breakpoint instructions
4892 * (both break IL instructions and Debugger.Break () method calls), for example
4893 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4894 * untrusted or semi-trusted code.
4896 * @policy_callback will be called every time a break point instruction needs to
4897 * be inserted with the method argument being the method that calls Debugger.Break()
4898 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4899 * if it wants the breakpoint to not be effective in the given method.
4900 * #MONO_BREAK_POLICY_ALWAYS is the default.
4903 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4905 if (policy_callback)
4906 break_policy_func = policy_callback;
4908 break_policy_func = always_insert_breakpoint;
4912 should_insert_brekpoint (MonoMethod *method) {
4913 switch (break_policy_func (method)) {
4914 case MONO_BREAK_POLICY_ALWAYS:
4916 case MONO_BREAK_POLICY_NEVER:
4918 case MONO_BREAK_POLICY_ON_DBG:
4919 g_warning ("mdb no longer supported");
4922 g_warning ("Incorrect value returned from break policy callback");
4927 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4929 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4931 MonoInst *addr, *store, *load;
4932 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4934 /* the bounds check is already done by the callers */
4935 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4937 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4938 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4939 if (mini_type_is_reference (cfg, fsig->params [2]))
4940 emit_write_barrier (cfg, addr, load);
4942 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4943 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4950 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4952 return mini_type_is_reference (cfg, &klass->byval_arg);
4956 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
4958 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
4959 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
4960 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
4961 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
4962 MonoInst *iargs [3];
4965 mono_class_setup_vtable (obj_array);
4966 g_assert (helper->slot);
4968 if (sp [0]->type != STACK_OBJ)
4970 if (sp [2]->type != STACK_OBJ)
4977 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
4981 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4984 // FIXME-VT: OP_ICONST optimization
4985 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
4986 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4987 ins->opcode = OP_STOREV_MEMBASE;
4988 } else if (sp [1]->opcode == OP_ICONST) {
4989 int array_reg = sp [0]->dreg;
4990 int index_reg = sp [1]->dreg;
4991 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
4994 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
4995 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
4997 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
4998 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
4999 if (generic_class_is_reference_type (cfg, klass))
5000 emit_write_barrier (cfg, addr, sp [2]);
5007 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5012 eklass = mono_class_from_mono_type (fsig->params [2]);
5014 eklass = mono_class_from_mono_type (fsig->ret);
5018 return emit_array_store (cfg, eklass, args, FALSE);
5020 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5021 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5027 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5029 #ifdef MONO_ARCH_SIMD_INTRINSICS
5030 MonoInst *ins = NULL;
5032 if (cfg->opt & MONO_OPT_SIMD) {
5033 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5039 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5043 emit_memory_barrier (MonoCompile *cfg, int kind)
5045 MonoInst *ins = NULL;
5046 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5047 MONO_ADD_INS (cfg->cbb, ins);
5048 ins->backend.memory_barrier_kind = kind;
5054 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5056 MonoInst *ins = NULL;
5059 /* The LLVM backend supports these intrinsics */
5060 if (cmethod->klass == mono_defaults.math_class) {
5061 if (strcmp (cmethod->name, "Sin") == 0) {
5063 } else if (strcmp (cmethod->name, "Cos") == 0) {
5065 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5067 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5072 MONO_INST_NEW (cfg, ins, opcode);
5073 ins->type = STACK_R8;
5074 ins->dreg = mono_alloc_freg (cfg);
5075 ins->sreg1 = args [0]->dreg;
5076 MONO_ADD_INS (cfg->cbb, ins);
5080 if (cfg->opt & MONO_OPT_CMOV) {
5081 if (strcmp (cmethod->name, "Min") == 0) {
5082 if (fsig->params [0]->type == MONO_TYPE_I4)
5084 if (fsig->params [0]->type == MONO_TYPE_U4)
5085 opcode = OP_IMIN_UN;
5086 else if (fsig->params [0]->type == MONO_TYPE_I8)
5088 else if (fsig->params [0]->type == MONO_TYPE_U8)
5089 opcode = OP_LMIN_UN;
5090 } else if (strcmp (cmethod->name, "Max") == 0) {
5091 if (fsig->params [0]->type == MONO_TYPE_I4)
5093 if (fsig->params [0]->type == MONO_TYPE_U4)
5094 opcode = OP_IMAX_UN;
5095 else if (fsig->params [0]->type == MONO_TYPE_I8)
5097 else if (fsig->params [0]->type == MONO_TYPE_U8)
5098 opcode = OP_LMAX_UN;
5103 MONO_INST_NEW (cfg, ins, opcode);
5104 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5105 ins->dreg = mono_alloc_ireg (cfg);
5106 ins->sreg1 = args [0]->dreg;
5107 ins->sreg2 = args [1]->dreg;
5108 MONO_ADD_INS (cfg->cbb, ins);
5116 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5118 if (cmethod->klass == mono_defaults.array_class) {
5119 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5120 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5121 if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5122 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5129 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5131 MonoInst *ins = NULL;
5133 static MonoClass *runtime_helpers_class = NULL;
5134 if (! runtime_helpers_class)
5135 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5136 "System.Runtime.CompilerServices", "RuntimeHelpers");
5138 if (cmethod->klass == mono_defaults.string_class) {
5139 if (strcmp (cmethod->name, "get_Chars") == 0) {
5140 int dreg = alloc_ireg (cfg);
5141 int index_reg = alloc_preg (cfg);
5142 int mult_reg = alloc_preg (cfg);
5143 int add_reg = alloc_preg (cfg);
5145 #if SIZEOF_REGISTER == 8
5146 /* The array reg is 64 bits but the index reg is only 32 */
5147 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5149 index_reg = args [1]->dreg;
5151 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5153 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5154 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5155 add_reg = ins->dreg;
5156 /* Avoid a warning */
5158 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5162 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5163 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5164 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5166 type_from_op (ins, NULL, NULL);
5168 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5169 int dreg = alloc_ireg (cfg);
5170 /* Decompose later to allow more optimizations */
5171 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5172 ins->type = STACK_I4;
5173 ins->flags |= MONO_INST_FAULT;
5174 cfg->cbb->has_array_access = TRUE;
5175 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5178 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5179 int mult_reg = alloc_preg (cfg);
5180 int add_reg = alloc_preg (cfg);
5182 /* The corlib functions check for oob already. */
5183 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5184 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5185 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5186 return cfg->cbb->last_ins;
5189 } else if (cmethod->klass == mono_defaults.object_class) {
5191 if (strcmp (cmethod->name, "GetType") == 0) {
5192 int dreg = alloc_ireg_ref (cfg);
5193 int vt_reg = alloc_preg (cfg);
5194 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5195 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5196 type_from_op (ins, NULL, NULL);
5199 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5200 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5201 int dreg = alloc_ireg (cfg);
5202 int t1 = alloc_ireg (cfg);
5204 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5205 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5206 ins->type = STACK_I4;
5210 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5211 MONO_INST_NEW (cfg, ins, OP_NOP);
5212 MONO_ADD_INS (cfg->cbb, ins);
5216 } else if (cmethod->klass == mono_defaults.array_class) {
5217 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5218 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5220 #ifndef MONO_BIG_ARRAYS
5222 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5225 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5226 int dreg = alloc_ireg (cfg);
5227 int bounds_reg = alloc_ireg_mp (cfg);
5228 MonoBasicBlock *end_bb, *szarray_bb;
5229 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5231 NEW_BBLOCK (cfg, end_bb);
5232 NEW_BBLOCK (cfg, szarray_bb);
5234 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5235 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5236 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5237 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5238 /* Non-szarray case */
5240 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5241 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5243 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5244 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5245 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5246 MONO_START_BB (cfg, szarray_bb);
5249 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5250 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5252 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5253 MONO_START_BB (cfg, end_bb);
5255 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5256 ins->type = STACK_I4;
5262 if (cmethod->name [0] != 'g')
5265 if (strcmp (cmethod->name, "get_Rank") == 0) {
5266 int dreg = alloc_ireg (cfg);
5267 int vtable_reg = alloc_preg (cfg);
5268 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5269 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5270 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5271 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5272 type_from_op (ins, NULL, NULL);
5275 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5276 int dreg = alloc_ireg (cfg);
5278 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5279 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5280 type_from_op (ins, NULL, NULL);
5285 } else if (cmethod->klass == runtime_helpers_class) {
5287 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5288 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5292 } else if (cmethod->klass == mono_defaults.thread_class) {
5293 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5294 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5295 MONO_ADD_INS (cfg->cbb, ins);
5297 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5298 return emit_memory_barrier (cfg, FullBarrier);
5300 } else if (cmethod->klass == mono_defaults.monitor_class) {
5302 /* FIXME this should be integrated to the check below once we support the trampoline version */
5303 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5304 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5305 MonoMethod *fast_method = NULL;
5307 /* Avoid infinite recursion */
5308 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5311 fast_method = mono_monitor_get_fast_path (cmethod);
5315 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5319 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5320 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5323 if (COMPILE_LLVM (cfg)) {
5325 * Pass the argument normally, the LLVM backend will handle the
5326 * calling convention problems.
5328 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5330 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5331 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5332 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5333 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5336 return (MonoInst*)call;
5337 } else if (strcmp (cmethod->name, "Exit") == 0) {
5340 if (COMPILE_LLVM (cfg)) {
5341 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5343 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5344 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5345 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5346 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5349 return (MonoInst*)call;
5351 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5353 MonoMethod *fast_method = NULL;
5355 /* Avoid infinite recursion */
5356 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5357 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5358 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5361 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5362 strcmp (cmethod->name, "Exit") == 0)
5363 fast_method = mono_monitor_get_fast_path (cmethod);
5367 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5370 } else if (cmethod->klass->image == mono_defaults.corlib &&
5371 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5372 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5375 #if SIZEOF_REGISTER == 8
5376 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5377 /* 64 bit reads are already atomic */
5378 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5379 ins->dreg = mono_alloc_preg (cfg);
5380 ins->inst_basereg = args [0]->dreg;
5381 ins->inst_offset = 0;
5382 MONO_ADD_INS (cfg->cbb, ins);
5386 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5387 if (strcmp (cmethod->name, "Increment") == 0) {
5388 MonoInst *ins_iconst;
5391 if (fsig->params [0]->type == MONO_TYPE_I4) {
5392 opcode = OP_ATOMIC_ADD_NEW_I4;
5393 cfg->has_atomic_add_new_i4 = TRUE;
5395 #if SIZEOF_REGISTER == 8
5396 else if (fsig->params [0]->type == MONO_TYPE_I8)
5397 opcode = OP_ATOMIC_ADD_NEW_I8;
5400 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5401 ins_iconst->inst_c0 = 1;
5402 ins_iconst->dreg = mono_alloc_ireg (cfg);
5403 MONO_ADD_INS (cfg->cbb, ins_iconst);
5405 MONO_INST_NEW (cfg, ins, opcode);
5406 ins->dreg = mono_alloc_ireg (cfg);
5407 ins->inst_basereg = args [0]->dreg;
5408 ins->inst_offset = 0;
5409 ins->sreg2 = ins_iconst->dreg;
5410 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5411 MONO_ADD_INS (cfg->cbb, ins);
5413 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5414 MonoInst *ins_iconst;
5417 if (fsig->params [0]->type == MONO_TYPE_I4) {
5418 opcode = OP_ATOMIC_ADD_NEW_I4;
5419 cfg->has_atomic_add_new_i4 = TRUE;
5421 #if SIZEOF_REGISTER == 8
5422 else if (fsig->params [0]->type == MONO_TYPE_I8)
5423 opcode = OP_ATOMIC_ADD_NEW_I8;
5426 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5427 ins_iconst->inst_c0 = -1;
5428 ins_iconst->dreg = mono_alloc_ireg (cfg);
5429 MONO_ADD_INS (cfg->cbb, ins_iconst);
5431 MONO_INST_NEW (cfg, ins, opcode);
5432 ins->dreg = mono_alloc_ireg (cfg);
5433 ins->inst_basereg = args [0]->dreg;
5434 ins->inst_offset = 0;
5435 ins->sreg2 = ins_iconst->dreg;
5436 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5437 MONO_ADD_INS (cfg->cbb, ins);
5439 } else if (strcmp (cmethod->name, "Add") == 0) {
5442 if (fsig->params [0]->type == MONO_TYPE_I4) {
5443 opcode = OP_ATOMIC_ADD_NEW_I4;
5444 cfg->has_atomic_add_new_i4 = TRUE;
5446 #if SIZEOF_REGISTER == 8
5447 else if (fsig->params [0]->type == MONO_TYPE_I8)
5448 opcode = OP_ATOMIC_ADD_NEW_I8;
5452 MONO_INST_NEW (cfg, ins, opcode);
5453 ins->dreg = mono_alloc_ireg (cfg);
5454 ins->inst_basereg = args [0]->dreg;
5455 ins->inst_offset = 0;
5456 ins->sreg2 = args [1]->dreg;
5457 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5458 MONO_ADD_INS (cfg->cbb, ins);
5461 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5463 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5464 if (strcmp (cmethod->name, "Exchange") == 0) {
5466 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5468 if (fsig->params [0]->type == MONO_TYPE_I4) {
5469 opcode = OP_ATOMIC_EXCHANGE_I4;
5470 cfg->has_atomic_exchange_i4 = TRUE;
5472 #if SIZEOF_REGISTER == 8
5473 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5474 (fsig->params [0]->type == MONO_TYPE_I))
5475 opcode = OP_ATOMIC_EXCHANGE_I8;
5477 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5478 opcode = OP_ATOMIC_EXCHANGE_I4;
5479 cfg->has_atomic_exchange_i4 = TRUE;
5485 MONO_INST_NEW (cfg, ins, opcode);
5486 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5487 ins->inst_basereg = args [0]->dreg;
5488 ins->inst_offset = 0;
5489 ins->sreg2 = args [1]->dreg;
5490 MONO_ADD_INS (cfg->cbb, ins);
5492 switch (fsig->params [0]->type) {
5494 ins->type = STACK_I4;
5498 ins->type = STACK_I8;
5500 case MONO_TYPE_OBJECT:
5501 ins->type = STACK_OBJ;
5504 g_assert_not_reached ();
5507 if (cfg->gen_write_barriers && is_ref)
5508 emit_write_barrier (cfg, args [0], args [1]);
5510 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5512 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5513 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5515 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5516 if (fsig->params [1]->type == MONO_TYPE_I4)
5518 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5519 size = sizeof (gpointer);
5520 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5523 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5524 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5525 ins->sreg1 = args [0]->dreg;
5526 ins->sreg2 = args [1]->dreg;
5527 ins->sreg3 = args [2]->dreg;
5528 ins->type = STACK_I4;
5529 MONO_ADD_INS (cfg->cbb, ins);
5530 } else if (size == 8) {
5531 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5532 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5533 ins->sreg1 = args [0]->dreg;
5534 ins->sreg2 = args [1]->dreg;
5535 ins->sreg3 = args [2]->dreg;
5536 ins->type = STACK_I8;
5537 MONO_ADD_INS (cfg->cbb, ins);
5539 /* g_assert_not_reached (); */
5541 if (cfg->gen_write_barriers && is_ref)
5542 emit_write_barrier (cfg, args [0], args [1]);
5544 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5546 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5547 ins = emit_memory_barrier (cfg, FullBarrier);
5551 } else if (cmethod->klass->image == mono_defaults.corlib) {
5552 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5553 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5554 if (should_insert_brekpoint (cfg->method)) {
5555 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5557 MONO_INST_NEW (cfg, ins, OP_NOP);
5558 MONO_ADD_INS (cfg->cbb, ins);
5562 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5563 && strcmp (cmethod->klass->name, "Environment") == 0) {
5565 EMIT_NEW_ICONST (cfg, ins, 1);
5567 EMIT_NEW_ICONST (cfg, ins, 0);
5571 } else if (cmethod->klass == mono_defaults.math_class) {
5573 * There is general branches code for Min/Max, but it does not work for
5575 * http://everything2.com/?node_id=1051618
5577 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5578 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5580 MonoJumpInfoToken *ji;
5583 cfg->disable_llvm = TRUE;
5585 if (args [0]->opcode == OP_GOT_ENTRY) {
5586 pi = args [0]->inst_p1;
5587 g_assert (pi->opcode == OP_PATCH_INFO);
5588 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5591 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5592 ji = args [0]->inst_p0;
5595 NULLIFY_INS (args [0]);
5598 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5599 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5600 ins->dreg = mono_alloc_ireg (cfg);
5602 ins->inst_p0 = mono_string_to_utf8 (s);
5603 MONO_ADD_INS (cfg->cbb, ins);
5608 #ifdef MONO_ARCH_SIMD_INTRINSICS
5609 if (cfg->opt & MONO_OPT_SIMD) {
5610 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5616 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5620 if (COMPILE_LLVM (cfg)) {
5621 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5626 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5630 * This entry point could be used later for arbitrary method
5633 inline static MonoInst*
5634 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5635 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5637 if (method->klass == mono_defaults.string_class) {
5638 /* managed string allocation support */
5639 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5640 MonoInst *iargs [2];
5641 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5642 MonoMethod *managed_alloc = NULL;
5644 g_assert (vtable); /*Should not fail since it System.String*/
5645 #ifndef MONO_CROSS_COMPILE
5646 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5650 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5651 iargs [1] = args [0];
5652 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5659 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5661 MonoInst *store, *temp;
5664 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5665 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5668 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5669 * would be different than the MonoInst's used to represent arguments, and
5670 * the ldelema implementation can't deal with that.
5671 * Solution: When ldelema is used on an inline argument, create a var for
5672 * it, emit ldelema on that var, and emit the saving code below in
5673 * inline_method () if needed.
5675 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5676 cfg->args [i] = temp;
5677 /* This uses cfg->args [i] which is set by the preceeding line */
5678 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5679 store->cil_code = sp [0]->cil_code;
5684 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5685 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5687 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5689 check_inline_called_method_name_limit (MonoMethod *called_method)
5692 static const char *limit = NULL;
5694 if (limit == NULL) {
5695 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5697 if (limit_string != NULL)
5698 limit = limit_string;
5703 if (limit [0] != '\0') {
5704 char *called_method_name = mono_method_full_name (called_method, TRUE);
5706 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5707 g_free (called_method_name);
5709 //return (strncmp_result <= 0);
5710 return (strncmp_result == 0);
5717 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5719 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5722 static const char *limit = NULL;
5724 if (limit == NULL) {
5725 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5726 if (limit_string != NULL) {
5727 limit = limit_string;
5733 if (limit [0] != '\0') {
5734 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5736 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5737 g_free (caller_method_name);
5739 //return (strncmp_result <= 0);
5740 return (strncmp_result == 0);
5748 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5750 static double r8_0 = 0.0;
5754 rtype = mini_replace_type (rtype);
5758 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5759 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5760 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5761 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5762 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5763 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5764 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5765 ins->type = STACK_R8;
5766 ins->inst_p0 = (void*)&r8_0;
5768 MONO_ADD_INS (cfg->cbb, ins);
5769 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5770 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5771 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5772 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5773 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5775 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5780 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5784 rtype = mini_replace_type (rtype);
5788 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5789 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5790 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5791 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5792 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5793 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5794 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5795 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5796 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5797 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5798 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5799 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5801 emit_init_rvar (cfg, dreg, rtype);
5805 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5807 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5809 MonoInst *var = cfg->locals [local];
5810 if (COMPILE_SOFT_FLOAT (cfg)) {
5812 int reg = alloc_dreg (cfg, var->type);
5813 emit_init_rvar (cfg, reg, type);
5814 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5817 emit_init_rvar (cfg, var->dreg, type);
5819 emit_dummy_init_rvar (cfg, var->dreg, type);
5824 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5825 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5827 MonoInst *ins, *rvar = NULL;
5828 MonoMethodHeader *cheader;
5829 MonoBasicBlock *ebblock, *sbblock;
5831 MonoMethod *prev_inlined_method;
5832 MonoInst **prev_locals, **prev_args;
5833 MonoType **prev_arg_types;
5834 guint prev_real_offset;
5835 GHashTable *prev_cbb_hash;
5836 MonoBasicBlock **prev_cil_offset_to_bb;
5837 MonoBasicBlock *prev_cbb;
5838 unsigned char* prev_cil_start;
5839 guint32 prev_cil_offset_to_bb_len;
5840 MonoMethod *prev_current_method;
5841 MonoGenericContext *prev_generic_context;
5842 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5844 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5846 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5847 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5850 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5851 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5855 if (cfg->verbose_level > 2)
5856 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5858 if (!cmethod->inline_info) {
5859 cfg->stat_inlineable_methods++;
5860 cmethod->inline_info = 1;
5863 /* allocate local variables */
5864 cheader = mono_method_get_header (cmethod);
5866 if (cheader == NULL || mono_loader_get_last_error ()) {
5867 MonoLoaderError *error = mono_loader_get_last_error ();
5870 mono_metadata_free_mh (cheader);
5871 if (inline_always && error)
5872 mono_cfg_set_exception (cfg, error->exception_type);
5874 mono_loader_clear_error ();
5878 /*Must verify before creating locals as it can cause the JIT to assert.*/
5879 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5880 mono_metadata_free_mh (cheader);
5884 /* allocate space to store the return value */
5885 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5886 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5889 prev_locals = cfg->locals;
5890 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5891 for (i = 0; i < cheader->num_locals; ++i)
5892 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5894 /* allocate start and end blocks */
5895 /* This is needed so if the inline is aborted, we can clean up */
5896 NEW_BBLOCK (cfg, sbblock);
5897 sbblock->real_offset = real_offset;
5899 NEW_BBLOCK (cfg, ebblock);
5900 ebblock->block_num = cfg->num_bblocks++;
5901 ebblock->real_offset = real_offset;
5903 prev_args = cfg->args;
5904 prev_arg_types = cfg->arg_types;
5905 prev_inlined_method = cfg->inlined_method;
5906 cfg->inlined_method = cmethod;
5907 cfg->ret_var_set = FALSE;
5908 cfg->inline_depth ++;
5909 prev_real_offset = cfg->real_offset;
5910 prev_cbb_hash = cfg->cbb_hash;
5911 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
5912 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
5913 prev_cil_start = cfg->cil_start;
5914 prev_cbb = cfg->cbb;
5915 prev_current_method = cfg->current_method;
5916 prev_generic_context = cfg->generic_context;
5917 prev_ret_var_set = cfg->ret_var_set;
5919 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
5922 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
5924 ret_var_set = cfg->ret_var_set;
5926 cfg->inlined_method = prev_inlined_method;
5927 cfg->real_offset = prev_real_offset;
5928 cfg->cbb_hash = prev_cbb_hash;
5929 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
5930 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
5931 cfg->cil_start = prev_cil_start;
5932 cfg->locals = prev_locals;
5933 cfg->args = prev_args;
5934 cfg->arg_types = prev_arg_types;
5935 cfg->current_method = prev_current_method;
5936 cfg->generic_context = prev_generic_context;
5937 cfg->ret_var_set = prev_ret_var_set;
5938 cfg->inline_depth --;
5940 if ((costs >= 0 && costs < 60) || inline_always) {
5941 if (cfg->verbose_level > 2)
5942 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5944 cfg->stat_inlined_methods++;
5946 /* always add some code to avoid block split failures */
5947 MONO_INST_NEW (cfg, ins, OP_NOP);
5948 MONO_ADD_INS (prev_cbb, ins);
5950 prev_cbb->next_bb = sbblock;
5951 link_bblock (cfg, prev_cbb, sbblock);
5954 * Get rid of the begin and end bblocks if possible to aid local
5957 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
5959 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
5960 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
5962 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
5963 MonoBasicBlock *prev = ebblock->in_bb [0];
5964 mono_merge_basic_blocks (cfg, prev, ebblock);
5966 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
5967 mono_merge_basic_blocks (cfg, prev_cbb, prev);
5968 cfg->cbb = prev_cbb;
5972 * Its possible that the rvar is set in some prev bblock, but not in others.
5978 for (i = 0; i < ebblock->in_count; ++i) {
5979 bb = ebblock->in_bb [i];
5981 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
5984 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
5994 * If the inlined method contains only a throw, then the ret var is not
5995 * set, so set it to a dummy value.
5998 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6000 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6003 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6006 if (cfg->verbose_level > 2)
6007 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6008 cfg->exception_type = MONO_EXCEPTION_NONE;
6009 mono_loader_clear_error ();
6011 /* This gets rid of the newly added bblocks */
6012 cfg->cbb = prev_cbb;
6014 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6019 * Some of these comments may well be out-of-date.
6020 * Design decisions: we do a single pass over the IL code (and we do bblock
6021 * splitting/merging in the few cases when it's required: a back jump to an IL
6022 * address that was not already seen as bblock starting point).
6023 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6024 * Complex operations are decomposed in simpler ones right away. We need to let the
6025 * arch-specific code peek and poke inside this process somehow (except when the
6026 * optimizations can take advantage of the full semantic info of coarse opcodes).
6027 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6028 * MonoInst->opcode initially is the IL opcode or some simplification of that
6029 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6030 * opcode with value bigger than OP_LAST.
6031 * At this point the IR can be handed over to an interpreter, a dumb code generator
6032 * or to the optimizing code generator that will translate it to SSA form.
6034 * Profiling directed optimizations.
6035 * We may compile by default with few or no optimizations and instrument the code
6036 * or the user may indicate what methods to optimize the most either in a config file
6037 * or through repeated runs where the compiler applies offline the optimizations to
6038 * each method and then decides if it was worth it.
6041 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6042 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6043 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6044 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6045 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6046 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6047 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6048 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6050 /* offset from br.s -> br like opcodes */
6051 #define BIG_BRANCH_OFFSET 13
6054 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6056 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6058 return b == NULL || b == bb;
6062 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6064 unsigned char *ip = start;
6065 unsigned char *target;
6068 MonoBasicBlock *bblock;
6069 const MonoOpcode *opcode;
6072 cli_addr = ip - start;
6073 i = mono_opcode_value ((const guint8 **)&ip, end);
6076 opcode = &mono_opcodes [i];
6077 switch (opcode->argument) {
6078 case MonoInlineNone:
6081 case MonoInlineString:
6082 case MonoInlineType:
6083 case MonoInlineField:
6084 case MonoInlineMethod:
6087 case MonoShortInlineR:
6094 case MonoShortInlineVar:
6095 case MonoShortInlineI:
6098 case MonoShortInlineBrTarget:
6099 target = start + cli_addr + 2 + (signed char)ip [1];
6100 GET_BBLOCK (cfg, bblock, target);
6103 GET_BBLOCK (cfg, bblock, ip);
6105 case MonoInlineBrTarget:
6106 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6107 GET_BBLOCK (cfg, bblock, target);
6110 GET_BBLOCK (cfg, bblock, ip);
6112 case MonoInlineSwitch: {
6113 guint32 n = read32 (ip + 1);
6116 cli_addr += 5 + 4 * n;
6117 target = start + cli_addr;
6118 GET_BBLOCK (cfg, bblock, target);
6120 for (j = 0; j < n; ++j) {
6121 target = start + cli_addr + (gint32)read32 (ip);
6122 GET_BBLOCK (cfg, bblock, target);
6132 g_assert_not_reached ();
6135 if (i == CEE_THROW) {
6136 unsigned char *bb_start = ip - 1;
6138 /* Find the start of the bblock containing the throw */
6140 while ((bb_start >= start) && !bblock) {
6141 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6145 bblock->out_of_line = 1;
6155 static inline MonoMethod *
6156 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6160 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6161 method = mono_method_get_wrapper_data (m, token);
6163 method = mono_class_inflate_generic_method (method, context);
6165 method = mono_get_method_full (m->klass->image, token, klass, context);
6171 static inline MonoMethod *
6172 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6174 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6176 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6182 static inline MonoClass*
6183 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6187 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6188 klass = mono_method_get_wrapper_data (method, token);
6190 klass = mono_class_inflate_generic_class (klass, context);
6192 klass = mono_class_get_full (method->klass->image, token, context);
6195 mono_class_init (klass);
6199 static inline MonoMethodSignature*
6200 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6202 MonoMethodSignature *fsig;
6204 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6207 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6209 fsig = mono_inflate_generic_signature (fsig, context, &error);
6211 g_assert (mono_error_ok (&error));
6214 fsig = mono_metadata_parse_signature (method->klass->image, token);
6220 * Returns TRUE if the JIT should abort inlining because "callee"
6221 * is influenced by security attributes.
6224 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6228 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6232 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6233 if (result == MONO_JIT_SECURITY_OK)
6236 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6237 /* Generate code to throw a SecurityException before the actual call/link */
6238 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6241 NEW_ICONST (cfg, args [0], 4);
6242 NEW_METHODCONST (cfg, args [1], caller);
6243 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6244 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6245 /* don't hide previous results */
6246 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6247 cfg->exception_data = result;
6255 throw_exception (void)
6257 static MonoMethod *method = NULL;
6260 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6261 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6268 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6270 MonoMethod *thrower = throw_exception ();
6273 EMIT_NEW_PCONST (cfg, args [0], ex);
6274 mono_emit_method_call (cfg, thrower, args, NULL);
6278 * Return the original method is a wrapper is specified. We can only access
6279 * the custom attributes from the original method.
6282 get_original_method (MonoMethod *method)
6284 if (method->wrapper_type == MONO_WRAPPER_NONE)
6287 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6288 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6291 /* in other cases we need to find the original method */
6292 return mono_marshal_method_from_wrapper (method);
6296 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6297 MonoBasicBlock *bblock, unsigned char *ip)
6299 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6300 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6302 emit_throw_exception (cfg, ex);
6306 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6307 MonoBasicBlock *bblock, unsigned char *ip)
6309 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6310 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6312 emit_throw_exception (cfg, ex);
6316 * Check that the IL instructions at ip are the array initialization
6317 * sequence and return the pointer to the data and the size.
6320 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6323 * newarr[System.Int32]
6325 * ldtoken field valuetype ...
6326 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6328 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6329 guint32 token = read32 (ip + 7);
6330 guint32 field_token = read32 (ip + 2);
6331 guint32 field_index = field_token & 0xffffff;
6333 const char *data_ptr;
6335 MonoMethod *cmethod;
6336 MonoClass *dummy_class;
6337 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6343 *out_field_token = field_token;
6345 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6348 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6350 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6351 case MONO_TYPE_BOOLEAN:
6355 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6356 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6357 case MONO_TYPE_CHAR:
6374 if (size > mono_type_size (field->type, &dummy_align))
6377 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6378 if (!method->klass->image->dynamic) {
6379 field_index = read32 (ip + 2) & 0xffffff;
6380 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6381 data_ptr = mono_image_rva_map (method->klass->image, rva);
6382 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6383 /* for aot code we do the lookup on load */
6384 if (aot && data_ptr)
6385 return GUINT_TO_POINTER (rva);
6387 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6389 data_ptr = mono_field_get_data (field);
6397 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6399 char *method_fname = mono_method_full_name (method, TRUE);
6401 MonoMethodHeader *header = mono_method_get_header (method);
6403 if (header->code_size == 0)
6404 method_code = g_strdup ("method body is empty.");
6406 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6407 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6408 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6409 g_free (method_fname);
6410 g_free (method_code);
6411 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6415 set_exception_object (MonoCompile *cfg, MonoException *exception)
6417 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6418 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6419 cfg->exception_ptr = exception;
6423 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6426 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6427 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6428 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6429 /* Optimize reg-reg moves away */
6431 * Can't optimize other opcodes, since sp[0] might point to
6432 * the last ins of a decomposed opcode.
6434 sp [0]->dreg = (cfg)->locals [n]->dreg;
6436 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6441 * ldloca inhibits many optimizations so try to get rid of it in common
6444 static inline unsigned char *
6445 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6455 local = read16 (ip + 2);
6459 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6460 /* From the INITOBJ case */
6461 token = read32 (ip + 2);
6462 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6463 CHECK_TYPELOAD (klass);
6464 type = mini_replace_type (&klass->byval_arg);
6465 emit_init_local (cfg, local, type, TRUE);
6473 is_exception_class (MonoClass *class)
6476 if (class == mono_defaults.exception_class)
6478 class = class->parent;
6484 * is_jit_optimizer_disabled:
6486 * Determine whenever M's assembly has a DebuggableAttribute with the
6487 * IsJITOptimizerDisabled flag set.
6490 is_jit_optimizer_disabled (MonoMethod *m)
6492 MonoAssembly *ass = m->klass->image->assembly;
6493 MonoCustomAttrInfo* attrs;
6494 static MonoClass *klass;
6496 gboolean val = FALSE;
6499 if (ass->jit_optimizer_disabled_inited)
6500 return ass->jit_optimizer_disabled;
6503 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6506 ass->jit_optimizer_disabled = FALSE;
6507 mono_memory_barrier ();
6508 ass->jit_optimizer_disabled_inited = TRUE;
6512 attrs = mono_custom_attrs_from_assembly (ass);
6514 for (i = 0; i < attrs->num_attrs; ++i) {
6515 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6518 MonoMethodSignature *sig;
6520 if (!attr->ctor || attr->ctor->klass != klass)
6522 /* Decode the attribute. See reflection.c */
6523 len = attr->data_size;
6524 p = (const char*)attr->data;
6525 g_assert (read16 (p) == 0x0001);
6528 // FIXME: Support named parameters
6529 sig = mono_method_signature (attr->ctor);
6530 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6532 /* Two boolean arguments */
6536 mono_custom_attrs_free (attrs);
6539 ass->jit_optimizer_disabled = val;
6540 mono_memory_barrier ();
6541 ass->jit_optimizer_disabled_inited = TRUE;
6547 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6549 gboolean supported_tail_call;
6552 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6553 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6555 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6558 for (i = 0; i < fsig->param_count; ++i) {
6559 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6560 /* These can point to the current method's stack */
6561 supported_tail_call = FALSE;
6563 if (fsig->hasthis && cmethod->klass->valuetype)
6564 /* this might point to the current method's stack */
6565 supported_tail_call = FALSE;
6566 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6567 supported_tail_call = FALSE;
6568 if (cfg->method->save_lmf)
6569 supported_tail_call = FALSE;
6570 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6571 supported_tail_call = FALSE;
6572 if (call_opcode != CEE_CALL)
6573 supported_tail_call = FALSE;
6575 /* Debugging support */
6577 if (supported_tail_call) {
6578 if (!mono_debug_count ())
6579 supported_tail_call = FALSE;
6583 return supported_tail_call;
6586 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6587 * it to the thread local value based on the tls_offset field. Every other kind of access to
6588 * the field causes an assert.
6591 is_magic_tls_access (MonoClassField *field)
6593 if (strcmp (field->name, "tlsdata"))
6595 if (strcmp (field->parent->name, "ThreadLocal`1"))
6597 return field->parent->image == mono_defaults.corlib;
6600 /* emits the code needed to access a managed tls var (like ThreadStatic)
6601 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6602 * pointer for the current thread.
6603 * Returns the MonoInst* representing the address of the tls var.
6606 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6609 int static_data_reg, array_reg, dreg;
6610 int offset2_reg, idx_reg;
6611 // inlined access to the tls data
6612 // idx = (offset >> 24) - 1;
6613 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6614 static_data_reg = alloc_ireg (cfg);
6615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6616 idx_reg = alloc_ireg (cfg);
6617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6619 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6620 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6621 array_reg = alloc_ireg (cfg);
6622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6623 offset2_reg = alloc_ireg (cfg);
6624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6625 dreg = alloc_ireg (cfg);
6626 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6631 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6632 * this address is cached per-method in cached_tls_addr.
6635 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6637 MonoInst *load, *addr, *temp, *store, *thread_ins;
6638 MonoClassField *offset_field;
6640 if (*cached_tls_addr) {
6641 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6644 thread_ins = mono_get_thread_intrinsic (cfg);
6645 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6647 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6649 MONO_ADD_INS (cfg->cbb, thread_ins);
6651 MonoMethod *thread_method;
6652 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6653 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6655 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6656 addr->klass = mono_class_from_mono_type (tls_field->type);
6657 addr->type = STACK_MP;
6658 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6659 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6661 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6666 * mono_method_to_ir:
6668 * Translate the .net IL into linear IR.
6671 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6672 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6673 guint inline_offset, gboolean is_virtual_call)
6676 MonoInst *ins, **sp, **stack_start;
6677 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6678 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6679 MonoMethod *cmethod, *method_definition;
6680 MonoInst **arg_array;
6681 MonoMethodHeader *header;
6683 guint32 token, ins_flag;
6685 MonoClass *constrained_call = NULL;
6686 unsigned char *ip, *end, *target, *err_pos;
6687 MonoMethodSignature *sig;
6688 MonoGenericContext *generic_context = NULL;
6689 MonoGenericContainer *generic_container = NULL;
6690 MonoType **param_types;
6691 int i, n, start_new_bblock, dreg;
6692 int num_calls = 0, inline_costs = 0;
6693 int breakpoint_id = 0;
6695 MonoBoolean security, pinvoke;
6696 MonoSecurityManager* secman = NULL;
6697 MonoDeclSecurityActions actions;
6698 GSList *class_inits = NULL;
6699 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6701 gboolean init_locals, seq_points, skip_dead_blocks;
6702 gboolean disable_inline, sym_seq_points = FALSE;
6703 MonoInst *cached_tls_addr = NULL;
6704 MonoDebugMethodInfo *minfo;
6705 MonoBitSet *seq_point_locs = NULL;
6706 MonoBitSet *seq_point_set_locs = NULL;
6708 disable_inline = is_jit_optimizer_disabled (method);
6710 /* serialization and xdomain stuff may need access to private fields and methods */
6711 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6712 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6713 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6714 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6715 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6716 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6718 dont_verify |= mono_security_smcs_hack_enabled ();
6720 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6721 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6722 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6723 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6724 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6726 image = method->klass->image;
6727 header = mono_method_get_header (method);
6729 MonoLoaderError *error;
6731 if ((error = mono_loader_get_last_error ())) {
6732 mono_cfg_set_exception (cfg, error->exception_type);
6734 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6735 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6737 goto exception_exit;
6739 generic_container = mono_method_get_generic_container (method);
6740 sig = mono_method_signature (method);
6741 num_args = sig->hasthis + sig->param_count;
6742 ip = (unsigned char*)header->code;
6743 cfg->cil_start = ip;
6744 end = ip + header->code_size;
6745 cfg->stat_cil_code_size += header->code_size;
6747 seq_points = cfg->gen_seq_points && cfg->method == method;
6748 #ifdef PLATFORM_ANDROID
6749 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6752 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6753 /* We could hit a seq point before attaching to the JIT (#8338) */
6757 if (cfg->gen_seq_points && cfg->method == method) {
6758 minfo = mono_debug_lookup_method (method);
6760 int i, n_il_offsets;
6764 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6765 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6766 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6767 sym_seq_points = TRUE;
6768 for (i = 0; i < n_il_offsets; ++i) {
6769 if (il_offsets [i] < header->code_size)
6770 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6772 g_free (il_offsets);
6773 g_free (line_numbers);
6778 * Methods without init_locals set could cause asserts in various passes
6779 * (#497220). To work around this, we emit dummy initialization opcodes
6780 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6781 * on some platforms.
6783 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6784 init_locals = header->init_locals;
6788 method_definition = method;
6789 while (method_definition->is_inflated) {
6790 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6791 method_definition = imethod->declaring;
6794 /* SkipVerification is not allowed if core-clr is enabled */
6795 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6797 dont_verify_stloc = TRUE;
6800 if (sig->is_inflated)
6801 generic_context = mono_method_get_context (method);
6802 else if (generic_container)
6803 generic_context = &generic_container->context;
6804 cfg->generic_context = generic_context;
6806 if (!cfg->generic_sharing_context)
6807 g_assert (!sig->has_type_parameters);
6809 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6810 g_assert (method->is_inflated);
6811 g_assert (mono_method_get_context (method)->method_inst);
6813 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6814 g_assert (sig->generic_param_count);
6816 if (cfg->method == method) {
6817 cfg->real_offset = 0;
6819 cfg->real_offset = inline_offset;
6822 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6823 cfg->cil_offset_to_bb_len = header->code_size;
6825 cfg->current_method = method;
6827 if (cfg->verbose_level > 2)
6828 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6830 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6832 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6833 for (n = 0; n < sig->param_count; ++n)
6834 param_types [n + sig->hasthis] = sig->params [n];
6835 cfg->arg_types = param_types;
6837 dont_inline = g_list_prepend (dont_inline, method);
6838 if (cfg->method == method) {
6840 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6841 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6844 NEW_BBLOCK (cfg, start_bblock);
6845 cfg->bb_entry = start_bblock;
6846 start_bblock->cil_code = NULL;
6847 start_bblock->cil_length = 0;
6848 #if defined(__native_client_codegen__)
6849 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6850 ins->dreg = alloc_dreg (cfg, STACK_I4);
6851 MONO_ADD_INS (start_bblock, ins);
6855 NEW_BBLOCK (cfg, end_bblock);
6856 cfg->bb_exit = end_bblock;
6857 end_bblock->cil_code = NULL;
6858 end_bblock->cil_length = 0;
6859 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6860 g_assert (cfg->num_bblocks == 2);
6862 arg_array = cfg->args;
6864 if (header->num_clauses) {
6865 cfg->spvars = g_hash_table_new (NULL, NULL);
6866 cfg->exvars = g_hash_table_new (NULL, NULL);
6868 /* handle exception clauses */
6869 for (i = 0; i < header->num_clauses; ++i) {
6870 MonoBasicBlock *try_bb;
6871 MonoExceptionClause *clause = &header->clauses [i];
6872 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6873 try_bb->real_offset = clause->try_offset;
6874 try_bb->try_start = TRUE;
6875 try_bb->region = ((i + 1) << 8) | clause->flags;
6876 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6877 tblock->real_offset = clause->handler_offset;
6878 tblock->flags |= BB_EXCEPTION_HANDLER;
6881 * Linking the try block with the EH block hinders inlining as we won't be able to
6882 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6884 if (COMPILE_LLVM (cfg))
6885 link_bblock (cfg, try_bb, tblock);
6887 if (*(ip + clause->handler_offset) == CEE_POP)
6888 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6890 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6891 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6892 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6893 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6894 MONO_ADD_INS (tblock, ins);
6896 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6897 /* finally clauses already have a seq point */
6898 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
6899 MONO_ADD_INS (tblock, ins);
6902 /* todo: is a fault block unsafe to optimize? */
6903 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
6904 tblock->flags |= BB_EXCEPTION_UNSAFE;
6908 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
6910 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
6912 /* catch and filter blocks get the exception object on the stack */
6913 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
6914 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6915 MonoInst *dummy_use;
6917 /* mostly like handle_stack_args (), but just sets the input args */
6918 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
6919 tblock->in_scount = 1;
6920 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6921 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6924 * Add a dummy use for the exvar so its liveness info will be
6928 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
6930 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
6931 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
6932 tblock->flags |= BB_EXCEPTION_HANDLER;
6933 tblock->real_offset = clause->data.filter_offset;
6934 tblock->in_scount = 1;
6935 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
6936 /* The filter block shares the exvar with the handler block */
6937 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
6938 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6939 MONO_ADD_INS (tblock, ins);
6943 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
6944 clause->data.catch_class &&
6945 cfg->generic_sharing_context &&
6946 mono_class_check_context_used (clause->data.catch_class)) {
6948 * In shared generic code with catch
6949 * clauses containing type variables
6950 * the exception handling code has to
6951 * be able to get to the rgctx.
6952 * Therefore we have to make sure that
6953 * the vtable/mrgctx argument (for
6954 * static or generic methods) or the
6955 * "this" argument (for non-static
6956 * methods) are live.
6958 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6959 mini_method_get_context (method)->method_inst ||
6960 method->klass->valuetype) {
6961 mono_get_vtable_var (cfg);
6963 MonoInst *dummy_use;
6965 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
6970 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
6971 cfg->cbb = start_bblock;
6972 cfg->args = arg_array;
6973 mono_save_args (cfg, sig, inline_args);
6976 /* FIRST CODE BLOCK */
6977 NEW_BBLOCK (cfg, bblock);
6978 bblock->cil_code = ip;
6982 ADD_BBLOCK (cfg, bblock);
6984 if (cfg->method == method) {
6985 breakpoint_id = mono_debugger_method_has_breakpoint (method);
6986 if (breakpoint_id) {
6987 MONO_INST_NEW (cfg, ins, OP_BREAK);
6988 MONO_ADD_INS (bblock, ins);
6992 if (mono_security_cas_enabled ())
6993 secman = mono_security_manager_get_methods ();
6995 security = (secman && mono_security_method_has_declsec (method));
6996 /* at this point having security doesn't mean we have any code to generate */
6997 if (security && (cfg->method == method)) {
6998 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
6999 * And we do not want to enter the next section (with allocation) if we
7000 * have nothing to generate */
7001 security = mono_declsec_get_demands (method, &actions);
7004 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7005 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7007 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7008 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7009 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7011 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7012 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7016 mono_custom_attrs_free (custom);
7019 custom = mono_custom_attrs_from_class (wrapped->klass);
7020 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7024 mono_custom_attrs_free (custom);
7027 /* not a P/Invoke after all */
7032 /* we use a separate basic block for the initialization code */
7033 NEW_BBLOCK (cfg, init_localsbb);
7034 cfg->bb_init = init_localsbb;
7035 init_localsbb->real_offset = cfg->real_offset;
7036 start_bblock->next_bb = init_localsbb;
7037 init_localsbb->next_bb = bblock;
7038 link_bblock (cfg, start_bblock, init_localsbb);
7039 link_bblock (cfg, init_localsbb, bblock);
7041 cfg->cbb = init_localsbb;
7043 if (cfg->gsharedvt && cfg->method == method) {
7044 MonoGSharedVtMethodInfo *info;
7045 MonoInst *var, *locals_var;
7048 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7049 info->method = cfg->method;
7050 info->count_entries = 16;
7051 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7052 cfg->gsharedvt_info = info;
7054 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7055 /* prevent it from being register allocated */
7056 //var->flags |= MONO_INST_VOLATILE;
7057 cfg->gsharedvt_info_var = var;
7059 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7060 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7062 /* Allocate locals */
7063 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7064 /* prevent it from being register allocated */
7065 //locals_var->flags |= MONO_INST_VOLATILE;
7066 cfg->gsharedvt_locals_var = locals_var;
7068 dreg = alloc_ireg (cfg);
7069 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7071 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7072 ins->dreg = locals_var->dreg;
7074 MONO_ADD_INS (cfg->cbb, ins);
7075 cfg->gsharedvt_locals_var_ins = ins;
7077 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7080 ins->flags |= MONO_INST_INIT;
7084 /* at this point we know, if security is TRUE, that some code needs to be generated */
7085 if (security && (cfg->method == method)) {
7088 cfg->stat_cas_demand_generation++;
7090 if (actions.demand.blob) {
7091 /* Add code for SecurityAction.Demand */
7092 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7093 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7094 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7095 mono_emit_method_call (cfg, secman->demand, args, NULL);
7097 if (actions.noncasdemand.blob) {
7098 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7099 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7100 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7101 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7102 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7103 mono_emit_method_call (cfg, secman->demand, args, NULL);
7105 if (actions.demandchoice.blob) {
7106 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7107 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7108 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7109 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7110 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7114 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7116 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7119 if (mono_security_core_clr_enabled ()) {
7120 /* check if this is native code, e.g. an icall or a p/invoke */
7121 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7122 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7124 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7125 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7127 /* if this ia a native call then it can only be JITted from platform code */
7128 if ((icall || pinvk) && method->klass && method->klass->image) {
7129 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7130 MonoException *ex = icall ? mono_get_exception_security () :
7131 mono_get_exception_method_access ();
7132 emit_throw_exception (cfg, ex);
7139 CHECK_CFG_EXCEPTION;
7141 if (header->code_size == 0)
7144 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7149 if (cfg->method == method)
7150 mono_debug_init_method (cfg, bblock, breakpoint_id);
7152 for (n = 0; n < header->num_locals; ++n) {
7153 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7158 /* We force the vtable variable here for all shared methods
7159 for the possibility that they might show up in a stack
7160 trace where their exact instantiation is needed. */
7161 if (cfg->generic_sharing_context && method == cfg->method) {
7162 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7163 mini_method_get_context (method)->method_inst ||
7164 method->klass->valuetype) {
7165 mono_get_vtable_var (cfg);
7167 /* FIXME: Is there a better way to do this?
7168 We need the variable live for the duration
7169 of the whole method. */
7170 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7174 /* add a check for this != NULL to inlined methods */
7175 if (is_virtual_call) {
7178 NEW_ARGLOAD (cfg, arg_ins, 0);
7179 MONO_ADD_INS (cfg->cbb, arg_ins);
7180 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7183 skip_dead_blocks = !dont_verify;
7184 if (skip_dead_blocks) {
7185 original_bb = bb = mono_basic_block_split (method, &error);
7186 if (!mono_error_ok (&error)) {
7187 mono_error_cleanup (&error);
7193 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7194 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7197 start_new_bblock = 0;
7200 if (cfg->method == method)
7201 cfg->real_offset = ip - header->code;
7203 cfg->real_offset = inline_offset;
7208 if (start_new_bblock) {
7209 bblock->cil_length = ip - bblock->cil_code;
7210 if (start_new_bblock == 2) {
7211 g_assert (ip == tblock->cil_code);
7213 GET_BBLOCK (cfg, tblock, ip);
7215 bblock->next_bb = tblock;
7218 start_new_bblock = 0;
7219 for (i = 0; i < bblock->in_scount; ++i) {
7220 if (cfg->verbose_level > 3)
7221 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7222 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7226 g_slist_free (class_inits);
7229 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7230 link_bblock (cfg, bblock, tblock);
7231 if (sp != stack_start) {
7232 handle_stack_args (cfg, stack_start, sp - stack_start);
7234 CHECK_UNVERIFIABLE (cfg);
7236 bblock->next_bb = tblock;
7239 for (i = 0; i < bblock->in_scount; ++i) {
7240 if (cfg->verbose_level > 3)
7241 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7242 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7245 g_slist_free (class_inits);
7250 if (skip_dead_blocks) {
7251 int ip_offset = ip - header->code;
7253 if (ip_offset == bb->end)
7257 int op_size = mono_opcode_size (ip, end);
7258 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7260 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7262 if (ip_offset + op_size == bb->end) {
7263 MONO_INST_NEW (cfg, ins, OP_NOP);
7264 MONO_ADD_INS (bblock, ins);
7265 start_new_bblock = 1;
7273 * Sequence points are points where the debugger can place a breakpoint.
7274 * Currently, we generate these automatically at points where the IL
7277 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7279 * Make methods interruptable at the beginning, and at the targets of
7280 * backward branches.
7281 * Also, do this at the start of every bblock in methods with clauses too,
7282 * to be able to handle instructions with inprecise control flow like
7284 * Backward branches are handled at the end of method-to-ir ().
7286 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7288 /* Avoid sequence points on empty IL like .volatile */
7289 // FIXME: Enable this
7290 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7291 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7292 if (sp != stack_start)
7293 ins->flags |= MONO_INST_NONEMPTY_STACK;
7294 MONO_ADD_INS (cfg->cbb, ins);
7297 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7300 bblock->real_offset = cfg->real_offset;
7302 if ((cfg->method == method) && cfg->coverage_info) {
7303 guint32 cil_offset = ip - header->code;
7304 cfg->coverage_info->data [cil_offset].cil_code = ip;
7306 /* TODO: Use an increment here */
7307 #if defined(TARGET_X86)
7308 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7309 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7311 MONO_ADD_INS (cfg->cbb, ins);
7313 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7314 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7318 if (cfg->verbose_level > 3)
7319 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7323 if (seq_points && !sym_seq_points && sp != stack_start) {
7325 * The C# compiler uses these nops to notify the JIT that it should
7326 * insert seq points.
7328 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7329 MONO_ADD_INS (cfg->cbb, ins);
7331 if (cfg->keep_cil_nops)
7332 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7334 MONO_INST_NEW (cfg, ins, OP_NOP);
7336 MONO_ADD_INS (bblock, ins);
7339 if (should_insert_brekpoint (cfg->method)) {
7340 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7342 MONO_INST_NEW (cfg, ins, OP_NOP);
7345 MONO_ADD_INS (bblock, ins);
7351 CHECK_STACK_OVF (1);
7352 n = (*ip)-CEE_LDARG_0;
7354 EMIT_NEW_ARGLOAD (cfg, ins, n);
7362 CHECK_STACK_OVF (1);
7363 n = (*ip)-CEE_LDLOC_0;
7365 EMIT_NEW_LOCLOAD (cfg, ins, n);
7374 n = (*ip)-CEE_STLOC_0;
7377 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7379 emit_stloc_ir (cfg, sp, header, n);
7386 CHECK_STACK_OVF (1);
7389 EMIT_NEW_ARGLOAD (cfg, ins, n);
7395 CHECK_STACK_OVF (1);
7398 NEW_ARGLOADA (cfg, ins, n);
7399 MONO_ADD_INS (cfg->cbb, ins);
7409 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7411 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7416 CHECK_STACK_OVF (1);
7419 EMIT_NEW_LOCLOAD (cfg, ins, n);
7423 case CEE_LDLOCA_S: {
7424 unsigned char *tmp_ip;
7426 CHECK_STACK_OVF (1);
7427 CHECK_LOCAL (ip [1]);
7429 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7435 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7444 CHECK_LOCAL (ip [1]);
7445 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7447 emit_stloc_ir (cfg, sp, header, ip [1]);
7452 CHECK_STACK_OVF (1);
7453 EMIT_NEW_PCONST (cfg, ins, NULL);
7454 ins->type = STACK_OBJ;
7459 CHECK_STACK_OVF (1);
7460 EMIT_NEW_ICONST (cfg, ins, -1);
7473 CHECK_STACK_OVF (1);
7474 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7480 CHECK_STACK_OVF (1);
7482 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7488 CHECK_STACK_OVF (1);
7489 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7495 CHECK_STACK_OVF (1);
7496 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7497 ins->type = STACK_I8;
7498 ins->dreg = alloc_dreg (cfg, STACK_I8);
7500 ins->inst_l = (gint64)read64 (ip);
7501 MONO_ADD_INS (bblock, ins);
7507 gboolean use_aotconst = FALSE;
7509 #ifdef TARGET_POWERPC
7510 /* FIXME: Clean this up */
7511 if (cfg->compile_aot)
7512 use_aotconst = TRUE;
7515 /* FIXME: we should really allocate this only late in the compilation process */
7516 f = mono_domain_alloc (cfg->domain, sizeof (float));
7518 CHECK_STACK_OVF (1);
7524 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7526 dreg = alloc_freg (cfg);
7527 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7528 ins->type = STACK_R8;
7530 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7531 ins->type = STACK_R8;
7532 ins->dreg = alloc_dreg (cfg, STACK_R8);
7534 MONO_ADD_INS (bblock, ins);
7544 gboolean use_aotconst = FALSE;
7546 #ifdef TARGET_POWERPC
7547 /* FIXME: Clean this up */
7548 if (cfg->compile_aot)
7549 use_aotconst = TRUE;
7552 /* FIXME: we should really allocate this only late in the compilation process */
7553 d = mono_domain_alloc (cfg->domain, sizeof (double));
7555 CHECK_STACK_OVF (1);
7561 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7563 dreg = alloc_freg (cfg);
7564 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7565 ins->type = STACK_R8;
7567 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7568 ins->type = STACK_R8;
7569 ins->dreg = alloc_dreg (cfg, STACK_R8);
7571 MONO_ADD_INS (bblock, ins);
7580 MonoInst *temp, *store;
7582 CHECK_STACK_OVF (1);
7586 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7587 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7589 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7592 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7605 if (sp [0]->type == STACK_R8)
7606 /* we need to pop the value from the x86 FP stack */
7607 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7613 INLINE_FAILURE ("jmp");
7614 GSHAREDVT_FAILURE (*ip);
7617 if (stack_start != sp)
7619 token = read32 (ip + 1);
7620 /* FIXME: check the signature matches */
7621 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7623 if (!cmethod || mono_loader_get_last_error ())
7626 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7627 GENERIC_SHARING_FAILURE (CEE_JMP);
7629 if (mono_security_cas_enabled ())
7630 CHECK_CFG_EXCEPTION;
7632 if (ARCH_HAVE_OP_TAIL_CALL) {
7633 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7636 /* Handle tail calls similarly to calls */
7637 n = fsig->param_count + fsig->hasthis;
7641 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7642 call->method = cmethod;
7643 call->tail_call = TRUE;
7644 call->signature = mono_method_signature (cmethod);
7645 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7646 call->inst.inst_p0 = cmethod;
7647 for (i = 0; i < n; ++i)
7648 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7650 mono_arch_emit_call (cfg, call);
7651 MONO_ADD_INS (bblock, (MonoInst*)call);
7653 for (i = 0; i < num_args; ++i)
7654 /* Prevent arguments from being optimized away */
7655 arg_array [i]->flags |= MONO_INST_VOLATILE;
7657 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7658 ins = (MonoInst*)call;
7659 ins->inst_p0 = cmethod;
7660 MONO_ADD_INS (bblock, ins);
7664 start_new_bblock = 1;
7669 case CEE_CALLVIRT: {
7670 MonoInst *addr = NULL;
7671 MonoMethodSignature *fsig = NULL;
7673 int virtual = *ip == CEE_CALLVIRT;
7674 int calli = *ip == CEE_CALLI;
7675 gboolean pass_imt_from_rgctx = FALSE;
7676 MonoInst *imt_arg = NULL;
7677 MonoInst *keep_this_alive = NULL;
7678 gboolean pass_vtable = FALSE;
7679 gboolean pass_mrgctx = FALSE;
7680 MonoInst *vtable_arg = NULL;
7681 gboolean check_this = FALSE;
7682 gboolean supported_tail_call = FALSE;
7683 gboolean tail_call = FALSE;
7684 gboolean need_seq_point = FALSE;
7685 guint32 call_opcode = *ip;
7686 gboolean emit_widen = TRUE;
7687 gboolean push_res = TRUE;
7688 gboolean skip_ret = FALSE;
7689 gboolean delegate_invoke = FALSE;
7692 token = read32 (ip + 1);
7697 //GSHAREDVT_FAILURE (*ip);
7702 fsig = mini_get_signature (method, token, generic_context);
7703 n = fsig->param_count + fsig->hasthis;
7705 if (method->dynamic && fsig->pinvoke) {
7709 * This is a call through a function pointer using a pinvoke
7710 * signature. Have to create a wrapper and call that instead.
7711 * FIXME: This is very slow, need to create a wrapper at JIT time
7712 * instead based on the signature.
7714 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7715 EMIT_NEW_PCONST (cfg, args [1], fsig);
7717 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7720 MonoMethod *cil_method;
7722 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7723 cil_method = cmethod;
7725 if (constrained_call) {
7726 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7727 if (cfg->verbose_level > 2)
7728 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7729 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7730 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7731 cfg->generic_sharing_context)) {
7732 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7735 if (cfg->verbose_level > 2)
7736 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7738 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7740 * This is needed since get_method_constrained can't find
7741 * the method in klass representing a type var.
7742 * The type var is guaranteed to be a reference type in this
7745 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7746 g_assert (!cmethod->klass->valuetype);
7748 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7753 if (!cmethod || mono_loader_get_last_error ())
7755 if (!dont_verify && !cfg->skip_visibility) {
7756 MonoMethod *target_method = cil_method;
7757 if (method->is_inflated) {
7758 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7760 if (!mono_method_can_access_method (method_definition, target_method) &&
7761 !mono_method_can_access_method (method, cil_method))
7762 METHOD_ACCESS_FAILURE;
7765 if (mono_security_core_clr_enabled ())
7766 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7768 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7769 /* MS.NET seems to silently convert this to a callvirt */
7774 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7775 * converts to a callvirt.
7777 * tests/bug-515884.il is an example of this behavior
7779 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7780 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7781 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7785 if (!cmethod->klass->inited)
7786 if (!mono_class_init (cmethod->klass))
7787 TYPE_LOAD_ERROR (cmethod->klass);
7789 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7790 mini_class_is_system_array (cmethod->klass)) {
7791 array_rank = cmethod->klass->rank;
7792 fsig = mono_method_signature (cmethod);
7794 fsig = mono_method_signature (cmethod);
7799 if (fsig->pinvoke) {
7800 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7801 check_for_pending_exc, cfg->compile_aot);
7802 fsig = mono_method_signature (wrapper);
7803 } else if (constrained_call) {
7804 fsig = mono_method_signature (cmethod);
7806 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7810 mono_save_token_info (cfg, image, token, cil_method);
7812 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7814 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7815 * foo (bar (), baz ())
7816 * works correctly. MS does this also:
7817 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7818 * The problem with this approach is that the debugger will stop after all calls returning a value,
7819 * even for simple cases, like:
7822 /* Special case a few common successor opcodes */
7823 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7824 need_seq_point = TRUE;
7827 n = fsig->param_count + fsig->hasthis;
7829 /* Don't support calls made using type arguments for now */
7831 if (cfg->gsharedvt) {
7832 if (mini_is_gsharedvt_signature (cfg, fsig))
7833 GSHAREDVT_FAILURE (*ip);
7837 if (mono_security_cas_enabled ()) {
7838 if (check_linkdemand (cfg, method, cmethod))
7839 INLINE_FAILURE ("linkdemand");
7840 CHECK_CFG_EXCEPTION;
7843 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7844 g_assert_not_reached ();
7847 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7850 if (!cfg->generic_sharing_context && cmethod)
7851 g_assert (!mono_method_check_context_used (cmethod));
7855 //g_assert (!virtual || fsig->hasthis);
7859 if (constrained_call) {
7860 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7862 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7864 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7865 /* The 'Own method' case below */
7866 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7867 /* 'The type parameter is instantiated as a reference type' case below. */
7868 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7869 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7870 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7871 MonoInst *args [16];
7874 * This case handles calls to
7875 * - object:ToString()/Equals()/GetHashCode(),
7876 * - System.IComparable<T>:CompareTo()
7877 * - System.IEquatable<T>:Equals ()
7878 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7882 if (mono_method_check_context_used (cmethod))
7883 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7885 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7886 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7888 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7889 if (fsig->hasthis && fsig->param_count) {
7890 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7891 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7892 ins->dreg = alloc_preg (cfg);
7893 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7894 MONO_ADD_INS (cfg->cbb, ins);
7897 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
7900 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
7902 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
7903 addr_reg = ins->dreg;
7904 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
7906 EMIT_NEW_ICONST (cfg, args [3], 0);
7907 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
7910 EMIT_NEW_ICONST (cfg, args [3], 0);
7911 EMIT_NEW_ICONST (cfg, args [4], 0);
7913 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
7916 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
7917 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
7918 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
7922 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
7923 MONO_ADD_INS (cfg->cbb, add);
7925 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
7926 MONO_ADD_INS (cfg->cbb, ins);
7927 /* ins represents the call result */
7932 GSHAREDVT_FAILURE (*ip);
7936 * We have the `constrained.' prefix opcode.
7938 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
7940 * The type parameter is instantiated as a valuetype,
7941 * but that type doesn't override the method we're
7942 * calling, so we need to box `this'.
7944 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7945 ins->klass = constrained_call;
7946 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7947 CHECK_CFG_EXCEPTION;
7948 } else if (!constrained_call->valuetype) {
7949 int dreg = alloc_ireg_ref (cfg);
7952 * The type parameter is instantiated as a reference
7953 * type. We have a managed pointer on the stack, so
7954 * we need to dereference it here.
7956 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
7957 ins->type = STACK_OBJ;
7960 if (cmethod->klass->valuetype) {
7963 /* Interface method */
7966 mono_class_setup_vtable (constrained_call);
7967 CHECK_TYPELOAD (constrained_call);
7968 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
7970 TYPE_LOAD_ERROR (constrained_call);
7971 slot = mono_method_get_vtable_slot (cmethod);
7973 TYPE_LOAD_ERROR (cmethod->klass);
7974 cmethod = constrained_call->vtable [ioffset + slot];
7976 if (cmethod->klass == mono_defaults.enum_class) {
7977 /* Enum implements some interfaces, so treat this as the first case */
7978 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
7979 ins->klass = constrained_call;
7980 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
7981 CHECK_CFG_EXCEPTION;
7986 constrained_call = NULL;
7989 if (!calli && check_call_signature (cfg, fsig, sp))
7992 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
7993 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
7994 delegate_invoke = TRUE;
7997 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
7999 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8000 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8008 * If the callee is a shared method, then its static cctor
8009 * might not get called after the call was patched.
8011 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8012 emit_generic_class_init (cfg, cmethod->klass);
8013 CHECK_TYPELOAD (cmethod->klass);
8017 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8019 if (cfg->generic_sharing_context && cmethod) {
8020 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8022 context_used = mini_method_check_context_used (cfg, cmethod);
8024 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8025 /* Generic method interface
8026 calls are resolved via a
8027 helper function and don't
8029 if (!cmethod_context || !cmethod_context->method_inst)
8030 pass_imt_from_rgctx = TRUE;
8034 * If a shared method calls another
8035 * shared method then the caller must
8036 * have a generic sharing context
8037 * because the magic trampoline
8038 * requires it. FIXME: We shouldn't
8039 * have to force the vtable/mrgctx
8040 * variable here. Instead there
8041 * should be a flag in the cfg to
8042 * request a generic sharing context.
8045 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8046 mono_get_vtable_var (cfg);
8051 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8053 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8055 CHECK_TYPELOAD (cmethod->klass);
8056 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8061 g_assert (!vtable_arg);
8063 if (!cfg->compile_aot) {
8065 * emit_get_rgctx_method () calls mono_class_vtable () so check
8066 * for type load errors before.
8068 mono_class_setup_vtable (cmethod->klass);
8069 CHECK_TYPELOAD (cmethod->klass);
8072 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8074 /* !marshalbyref is needed to properly handle generic methods + remoting */
8075 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8076 MONO_METHOD_IS_FINAL (cmethod)) &&
8077 !mono_class_is_marshalbyref (cmethod->klass)) {
8084 if (pass_imt_from_rgctx) {
8085 g_assert (!pass_vtable);
8088 imt_arg = emit_get_rgctx_method (cfg, context_used,
8089 cmethod, MONO_RGCTX_INFO_METHOD);
8093 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8095 /* Calling virtual generic methods */
8096 if (cmethod && virtual &&
8097 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8098 !(MONO_METHOD_IS_FINAL (cmethod) &&
8099 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8100 fsig->generic_param_count &&
8101 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8102 MonoInst *this_temp, *this_arg_temp, *store;
8103 MonoInst *iargs [4];
8104 gboolean use_imt = FALSE;
8106 g_assert (fsig->is_inflated);
8108 /* Prevent inlining of methods that contain indirect calls */
8109 INLINE_FAILURE ("virtual generic call");
8111 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8112 GSHAREDVT_FAILURE (*ip);
8114 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8115 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8120 g_assert (!imt_arg);
8122 g_assert (cmethod->is_inflated);
8123 imt_arg = emit_get_rgctx_method (cfg, context_used,
8124 cmethod, MONO_RGCTX_INFO_METHOD);
8125 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8127 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8128 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8129 MONO_ADD_INS (bblock, store);
8131 /* FIXME: This should be a managed pointer */
8132 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8134 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8135 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8136 cmethod, MONO_RGCTX_INFO_METHOD);
8137 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8138 addr = mono_emit_jit_icall (cfg,
8139 mono_helper_compile_generic_method, iargs);
8141 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8143 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8150 * Implement a workaround for the inherent races involved in locking:
8156 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8157 * try block, the Exit () won't be executed, see:
8158 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8159 * To work around this, we extend such try blocks to include the last x bytes
8160 * of the Monitor.Enter () call.
8162 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8163 MonoBasicBlock *tbb;
8165 GET_BBLOCK (cfg, tbb, ip + 5);
8167 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8168 * from Monitor.Enter like ArgumentNullException.
8170 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8171 /* Mark this bblock as needing to be extended */
8172 tbb->extend_try_block = TRUE;
8176 /* Conversion to a JIT intrinsic */
8177 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8179 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8180 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8187 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8188 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8189 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8190 !g_list_find (dont_inline, cmethod)) {
8192 gboolean always = FALSE;
8194 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8195 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8196 /* Prevent inlining of methods that call wrappers */
8197 INLINE_FAILURE ("wrapper call");
8198 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8202 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8204 cfg->real_offset += 5;
8207 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8208 /* *sp is already set by inline_method */
8213 inline_costs += costs;
8219 /* Tail recursion elimination */
8220 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8221 gboolean has_vtargs = FALSE;
8224 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8225 INLINE_FAILURE ("tail call");
8227 /* keep it simple */
8228 for (i = fsig->param_count - 1; i >= 0; i--) {
8229 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8234 for (i = 0; i < n; ++i)
8235 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8236 MONO_INST_NEW (cfg, ins, OP_BR);
8237 MONO_ADD_INS (bblock, ins);
8238 tblock = start_bblock->out_bb [0];
8239 link_bblock (cfg, bblock, tblock);
8240 ins->inst_target_bb = tblock;
8241 start_new_bblock = 1;
8243 /* skip the CEE_RET, too */
8244 if (ip_in_bb (cfg, bblock, ip + 5))
8251 inline_costs += 10 * num_calls++;
8254 * Making generic calls out of gsharedvt methods.
8256 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8257 MonoRgctxInfoType info_type;
8260 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8261 //GSHAREDVT_FAILURE (*ip);
8262 // disable for possible remoting calls
8263 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8264 GSHAREDVT_FAILURE (*ip);
8265 if (fsig->generic_param_count) {
8266 /* virtual generic call */
8267 g_assert (mono_use_imt);
8268 g_assert (!imt_arg);
8269 /* Same as the virtual generic case above */
8270 imt_arg = emit_get_rgctx_method (cfg, context_used,
8271 cmethod, MONO_RGCTX_INFO_METHOD);
8272 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8277 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8278 /* test_0_multi_dim_arrays () in gshared.cs */
8279 GSHAREDVT_FAILURE (*ip);
8281 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8282 keep_this_alive = sp [0];
8284 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8285 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8287 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8288 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8290 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8292 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8294 * We pass the address to the gsharedvt trampoline in the rgctx reg
8296 MonoInst *callee = addr;
8298 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8300 GSHAREDVT_FAILURE (*ip);
8302 addr = emit_get_rgctx_sig (cfg, context_used,
8303 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8304 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8308 /* Generic sharing */
8309 /* FIXME: only do this for generic methods if
8310 they are not shared! */
8311 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8312 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8313 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8314 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8315 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8316 INLINE_FAILURE ("gshared");
8318 g_assert (cfg->generic_sharing_context && cmethod);
8322 * We are compiling a call to a
8323 * generic method from shared code,
8324 * which means that we have to look up
8325 * the method in the rgctx and do an
8329 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8331 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8332 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8336 /* Indirect calls */
8338 if (call_opcode == CEE_CALL)
8339 g_assert (context_used);
8340 else if (call_opcode == CEE_CALLI)
8341 g_assert (!vtable_arg);
8343 /* FIXME: what the hell is this??? */
8344 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8345 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8347 /* Prevent inlining of methods with indirect calls */
8348 INLINE_FAILURE ("indirect call");
8350 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8355 * Instead of emitting an indirect call, emit a direct call
8356 * with the contents of the aotconst as the patch info.
8358 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8359 info_type = addr->inst_c1;
8360 info_data = addr->inst_p0;
8362 info_type = addr->inst_right->inst_c1;
8363 info_data = addr->inst_right->inst_left;
8366 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8367 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8372 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8380 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8381 MonoInst *val = sp [fsig->param_count];
8383 if (val->type == STACK_OBJ) {
8384 MonoInst *iargs [2];
8389 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8392 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8393 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8394 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8395 emit_write_barrier (cfg, addr, val);
8396 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8397 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8399 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8400 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8401 if (!cmethod->klass->element_class->valuetype && !readonly)
8402 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8403 CHECK_TYPELOAD (cmethod->klass);
8406 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8409 g_assert_not_reached ();
8416 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8420 /* Tail prefix / tail call optimization */
8422 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8423 /* FIXME: runtime generic context pointer for jumps? */
8424 /* FIXME: handle this for generic sharing eventually */
8425 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8426 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8427 supported_tail_call = TRUE;
8429 if (supported_tail_call) {
8432 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8433 INLINE_FAILURE ("tail call");
8435 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8437 if (ARCH_HAVE_OP_TAIL_CALL) {
8438 /* Handle tail calls similarly to normal calls */
8441 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8442 call->tail_call = TRUE;
8443 call->method = cmethod;
8444 call->signature = mono_method_signature (cmethod);
8447 * We implement tail calls by storing the actual arguments into the
8448 * argument variables, then emitting a CEE_JMP.
8450 for (i = 0; i < n; ++i) {
8451 /* Prevent argument from being register allocated */
8452 arg_array [i]->flags |= MONO_INST_VOLATILE;
8453 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8455 ins = (MonoInst*)call;
8456 ins->inst_p0 = cmethod;
8457 ins->inst_p1 = arg_array [0];
8458 MONO_ADD_INS (bblock, ins);
8459 link_bblock (cfg, bblock, end_bblock);
8460 start_new_bblock = 1;
8462 // FIXME: Eliminate unreachable epilogs
8465 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8466 * only reachable from this call.
8468 GET_BBLOCK (cfg, tblock, ip + 5);
8469 if (tblock == bblock || tblock->in_count == 0)
8478 * Synchronized wrappers.
8479 * Its hard to determine where to replace a method with its synchronized
8480 * wrapper without causing an infinite recursion. The current solution is
8481 * to add the synchronized wrapper in the trampolines, and to
8482 * change the called method to a dummy wrapper, and resolve that wrapper
8483 * to the real method in mono_jit_compile_method ().
8485 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8486 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8487 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8488 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8492 INLINE_FAILURE ("call");
8493 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8494 imt_arg, vtable_arg);
8497 link_bblock (cfg, bblock, end_bblock);
8498 start_new_bblock = 1;
8500 // FIXME: Eliminate unreachable epilogs
8503 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8504 * only reachable from this call.
8506 GET_BBLOCK (cfg, tblock, ip + 5);
8507 if (tblock == bblock || tblock->in_count == 0)
8514 /* End of call, INS should contain the result of the call, if any */
8516 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8519 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8524 if (keep_this_alive) {
8525 MonoInst *dummy_use;
8527 /* See mono_emit_method_call_full () */
8528 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8531 CHECK_CFG_EXCEPTION;
8535 g_assert (*ip == CEE_RET);
8539 constrained_call = NULL;
8541 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8545 if (cfg->method != method) {
8546 /* return from inlined method */
8548 * If in_count == 0, that means the ret is unreachable due to
8549 * being preceeded by a throw. In that case, inline_method () will
8550 * handle setting the return value
8551 * (test case: test_0_inline_throw ()).
8553 if (return_var && cfg->cbb->in_count) {
8554 MonoType *ret_type = mono_method_signature (method)->ret;
8560 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8563 //g_assert (returnvar != -1);
8564 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8565 cfg->ret_var_set = TRUE;
8568 if (cfg->lmf_var && cfg->cbb->in_count)
8572 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8574 if (seq_points && !sym_seq_points) {
8576 * Place a seq point here too even through the IL stack is not
8577 * empty, so a step over on
8580 * will work correctly.
8582 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8583 MONO_ADD_INS (cfg->cbb, ins);
8586 g_assert (!return_var);
8590 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8593 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8596 if (!cfg->vret_addr) {
8599 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8601 EMIT_NEW_RETLOADA (cfg, ret_addr);
8603 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8604 ins->klass = mono_class_from_mono_type (ret_type);
8607 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8608 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8609 MonoInst *iargs [1];
8613 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8614 mono_arch_emit_setret (cfg, method, conv);
8616 mono_arch_emit_setret (cfg, method, *sp);
8619 mono_arch_emit_setret (cfg, method, *sp);
8624 if (sp != stack_start)
8626 MONO_INST_NEW (cfg, ins, OP_BR);
8628 ins->inst_target_bb = end_bblock;
8629 MONO_ADD_INS (bblock, ins);
8630 link_bblock (cfg, bblock, end_bblock);
8631 start_new_bblock = 1;
8635 MONO_INST_NEW (cfg, ins, OP_BR);
8637 target = ip + 1 + (signed char)(*ip);
8639 GET_BBLOCK (cfg, tblock, target);
8640 link_bblock (cfg, bblock, tblock);
8641 ins->inst_target_bb = tblock;
8642 if (sp != stack_start) {
8643 handle_stack_args (cfg, stack_start, sp - stack_start);
8645 CHECK_UNVERIFIABLE (cfg);
8647 MONO_ADD_INS (bblock, ins);
8648 start_new_bblock = 1;
8649 inline_costs += BRANCH_COST;
8663 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8665 target = ip + 1 + *(signed char*)ip;
8671 inline_costs += BRANCH_COST;
8675 MONO_INST_NEW (cfg, ins, OP_BR);
8678 target = ip + 4 + (gint32)read32(ip);
8680 GET_BBLOCK (cfg, tblock, target);
8681 link_bblock (cfg, bblock, tblock);
8682 ins->inst_target_bb = tblock;
8683 if (sp != stack_start) {
8684 handle_stack_args (cfg, stack_start, sp - stack_start);
8686 CHECK_UNVERIFIABLE (cfg);
8689 MONO_ADD_INS (bblock, ins);
8691 start_new_bblock = 1;
8692 inline_costs += BRANCH_COST;
8699 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8700 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8701 guint32 opsize = is_short ? 1 : 4;
8703 CHECK_OPSIZE (opsize);
8705 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8708 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8713 GET_BBLOCK (cfg, tblock, target);
8714 link_bblock (cfg, bblock, tblock);
8715 GET_BBLOCK (cfg, tblock, ip);
8716 link_bblock (cfg, bblock, tblock);
8718 if (sp != stack_start) {
8719 handle_stack_args (cfg, stack_start, sp - stack_start);
8720 CHECK_UNVERIFIABLE (cfg);
8723 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8724 cmp->sreg1 = sp [0]->dreg;
8725 type_from_op (cmp, sp [0], NULL);
8728 #if SIZEOF_REGISTER == 4
8729 if (cmp->opcode == OP_LCOMPARE_IMM) {
8730 /* Convert it to OP_LCOMPARE */
8731 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8732 ins->type = STACK_I8;
8733 ins->dreg = alloc_dreg (cfg, STACK_I8);
8735 MONO_ADD_INS (bblock, ins);
8736 cmp->opcode = OP_LCOMPARE;
8737 cmp->sreg2 = ins->dreg;
8740 MONO_ADD_INS (bblock, cmp);
8742 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8743 type_from_op (ins, sp [0], NULL);
8744 MONO_ADD_INS (bblock, ins);
8745 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8746 GET_BBLOCK (cfg, tblock, target);
8747 ins->inst_true_bb = tblock;
8748 GET_BBLOCK (cfg, tblock, ip);
8749 ins->inst_false_bb = tblock;
8750 start_new_bblock = 2;
8753 inline_costs += BRANCH_COST;
8768 MONO_INST_NEW (cfg, ins, *ip);
8770 target = ip + 4 + (gint32)read32(ip);
8776 inline_costs += BRANCH_COST;
8780 MonoBasicBlock **targets;
8781 MonoBasicBlock *default_bblock;
8782 MonoJumpInfoBBTable *table;
8783 int offset_reg = alloc_preg (cfg);
8784 int target_reg = alloc_preg (cfg);
8785 int table_reg = alloc_preg (cfg);
8786 int sum_reg = alloc_preg (cfg);
8787 gboolean use_op_switch;
8791 n = read32 (ip + 1);
8794 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8798 CHECK_OPSIZE (n * sizeof (guint32));
8799 target = ip + n * sizeof (guint32);
8801 GET_BBLOCK (cfg, default_bblock, target);
8802 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8804 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8805 for (i = 0; i < n; ++i) {
8806 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8807 targets [i] = tblock;
8808 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8812 if (sp != stack_start) {
8814 * Link the current bb with the targets as well, so handle_stack_args
8815 * will set their in_stack correctly.
8817 link_bblock (cfg, bblock, default_bblock);
8818 for (i = 0; i < n; ++i)
8819 link_bblock (cfg, bblock, targets [i]);
8821 handle_stack_args (cfg, stack_start, sp - stack_start);
8823 CHECK_UNVERIFIABLE (cfg);
8826 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8827 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8830 for (i = 0; i < n; ++i)
8831 link_bblock (cfg, bblock, targets [i]);
8833 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8834 table->table = targets;
8835 table->table_size = n;
8837 use_op_switch = FALSE;
8839 /* ARM implements SWITCH statements differently */
8840 /* FIXME: Make it use the generic implementation */
8841 if (!cfg->compile_aot)
8842 use_op_switch = TRUE;
8845 if (COMPILE_LLVM (cfg))
8846 use_op_switch = TRUE;
8848 cfg->cbb->has_jump_table = 1;
8850 if (use_op_switch) {
8851 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8852 ins->sreg1 = src1->dreg;
8853 ins->inst_p0 = table;
8854 ins->inst_many_bb = targets;
8855 ins->klass = GUINT_TO_POINTER (n);
8856 MONO_ADD_INS (cfg->cbb, ins);
8858 if (sizeof (gpointer) == 8)
8859 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8861 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8863 #if SIZEOF_REGISTER == 8
8864 /* The upper word might not be zero, and we add it to a 64 bit address later */
8865 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8868 if (cfg->compile_aot) {
8869 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8871 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8872 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8873 ins->inst_p0 = table;
8874 ins->dreg = table_reg;
8875 MONO_ADD_INS (cfg->cbb, ins);
8878 /* FIXME: Use load_memindex */
8879 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8880 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8881 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8883 start_new_bblock = 1;
8884 inline_costs += (BRANCH_COST * 2);
8904 dreg = alloc_freg (cfg);
8907 dreg = alloc_lreg (cfg);
8910 dreg = alloc_ireg_ref (cfg);
8913 dreg = alloc_preg (cfg);
8916 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
8917 ins->type = ldind_type [*ip - CEE_LDIND_I1];
8918 ins->flags |= ins_flag;
8920 MONO_ADD_INS (bblock, ins);
8922 if (ins->flags & MONO_INST_VOLATILE) {
8923 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
8924 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8925 emit_memory_barrier (cfg, FullBarrier);
8940 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
8941 ins->flags |= ins_flag;
8944 if (ins->flags & MONO_INST_VOLATILE) {
8945 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
8946 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
8947 emit_memory_barrier (cfg, FullBarrier);
8950 MONO_ADD_INS (bblock, ins);
8952 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
8953 emit_write_barrier (cfg, sp [0], sp [1]);
8962 MONO_INST_NEW (cfg, ins, (*ip));
8964 ins->sreg1 = sp [0]->dreg;
8965 ins->sreg2 = sp [1]->dreg;
8966 type_from_op (ins, sp [0], sp [1]);
8968 ins->dreg = alloc_dreg ((cfg), (ins)->type);
8970 /* Use the immediate opcodes if possible */
8971 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
8972 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
8973 if (imm_opcode != -1) {
8974 ins->opcode = imm_opcode;
8975 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
8978 sp [1]->opcode = OP_NOP;
8982 MONO_ADD_INS ((cfg)->cbb, (ins));
8984 *sp++ = mono_decompose_opcode (cfg, ins);
9001 MONO_INST_NEW (cfg, ins, (*ip));
9003 ins->sreg1 = sp [0]->dreg;
9004 ins->sreg2 = sp [1]->dreg;
9005 type_from_op (ins, sp [0], sp [1]);
9007 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9008 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9010 /* FIXME: Pass opcode to is_inst_imm */
9012 /* Use the immediate opcodes if possible */
9013 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9016 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9017 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9018 /* Keep emulated opcodes which are optimized away later */
9019 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9020 imm_opcode = mono_op_to_op_imm (ins->opcode);
9023 if (imm_opcode != -1) {
9024 ins->opcode = imm_opcode;
9025 if (sp [1]->opcode == OP_I8CONST) {
9026 #if SIZEOF_REGISTER == 8
9027 ins->inst_imm = sp [1]->inst_l;
9029 ins->inst_ls_word = sp [1]->inst_ls_word;
9030 ins->inst_ms_word = sp [1]->inst_ms_word;
9034 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9037 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9038 if (sp [1]->next == NULL)
9039 sp [1]->opcode = OP_NOP;
9042 MONO_ADD_INS ((cfg)->cbb, (ins));
9044 *sp++ = mono_decompose_opcode (cfg, ins);
9057 case CEE_CONV_OVF_I8:
9058 case CEE_CONV_OVF_U8:
9062 /* Special case this earlier so we have long constants in the IR */
9063 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9064 int data = sp [-1]->inst_c0;
9065 sp [-1]->opcode = OP_I8CONST;
9066 sp [-1]->type = STACK_I8;
9067 #if SIZEOF_REGISTER == 8
9068 if ((*ip) == CEE_CONV_U8)
9069 sp [-1]->inst_c0 = (guint32)data;
9071 sp [-1]->inst_c0 = data;
9073 sp [-1]->inst_ls_word = data;
9074 if ((*ip) == CEE_CONV_U8)
9075 sp [-1]->inst_ms_word = 0;
9077 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9079 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9086 case CEE_CONV_OVF_I4:
9087 case CEE_CONV_OVF_I1:
9088 case CEE_CONV_OVF_I2:
9089 case CEE_CONV_OVF_I:
9090 case CEE_CONV_OVF_U:
9093 if (sp [-1]->type == STACK_R8) {
9094 ADD_UNOP (CEE_CONV_OVF_I8);
9101 case CEE_CONV_OVF_U1:
9102 case CEE_CONV_OVF_U2:
9103 case CEE_CONV_OVF_U4:
9106 if (sp [-1]->type == STACK_R8) {
9107 ADD_UNOP (CEE_CONV_OVF_U8);
9114 case CEE_CONV_OVF_I1_UN:
9115 case CEE_CONV_OVF_I2_UN:
9116 case CEE_CONV_OVF_I4_UN:
9117 case CEE_CONV_OVF_I8_UN:
9118 case CEE_CONV_OVF_U1_UN:
9119 case CEE_CONV_OVF_U2_UN:
9120 case CEE_CONV_OVF_U4_UN:
9121 case CEE_CONV_OVF_U8_UN:
9122 case CEE_CONV_OVF_I_UN:
9123 case CEE_CONV_OVF_U_UN:
9130 CHECK_CFG_EXCEPTION;
9134 case CEE_ADD_OVF_UN:
9136 case CEE_MUL_OVF_UN:
9138 case CEE_SUB_OVF_UN:
9144 GSHAREDVT_FAILURE (*ip);
9147 token = read32 (ip + 1);
9148 klass = mini_get_class (method, token, generic_context);
9149 CHECK_TYPELOAD (klass);
9151 if (generic_class_is_reference_type (cfg, klass)) {
9152 MonoInst *store, *load;
9153 int dreg = alloc_ireg_ref (cfg);
9155 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9156 load->flags |= ins_flag;
9157 MONO_ADD_INS (cfg->cbb, load);
9159 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9160 store->flags |= ins_flag;
9161 MONO_ADD_INS (cfg->cbb, store);
9163 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9164 emit_write_barrier (cfg, sp [0], sp [1]);
9166 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9178 token = read32 (ip + 1);
9179 klass = mini_get_class (method, token, generic_context);
9180 CHECK_TYPELOAD (klass);
9182 /* Optimize the common ldobj+stloc combination */
9192 loc_index = ip [5] - CEE_STLOC_0;
9199 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9200 CHECK_LOCAL (loc_index);
9202 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9203 ins->dreg = cfg->locals [loc_index]->dreg;
9209 /* Optimize the ldobj+stobj combination */
9210 /* The reference case ends up being a load+store anyway */
9211 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
9216 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9223 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9232 CHECK_STACK_OVF (1);
9234 n = read32 (ip + 1);
9236 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9237 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9238 ins->type = STACK_OBJ;
9241 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9242 MonoInst *iargs [1];
9244 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9245 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9247 if (cfg->opt & MONO_OPT_SHARED) {
9248 MonoInst *iargs [3];
9250 if (cfg->compile_aot) {
9251 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9253 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9254 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9255 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9256 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9257 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9259 if (bblock->out_of_line) {
9260 MonoInst *iargs [2];
9262 if (image == mono_defaults.corlib) {
9264 * Avoid relocations in AOT and save some space by using a
9265 * version of helper_ldstr specialized to mscorlib.
9267 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9268 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9270 /* Avoid creating the string object */
9271 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9272 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9273 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9277 if (cfg->compile_aot) {
9278 NEW_LDSTRCONST (cfg, ins, image, n);
9280 MONO_ADD_INS (bblock, ins);
9283 NEW_PCONST (cfg, ins, NULL);
9284 ins->type = STACK_OBJ;
9285 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9287 OUT_OF_MEMORY_FAILURE;
9290 MONO_ADD_INS (bblock, ins);
9299 MonoInst *iargs [2];
9300 MonoMethodSignature *fsig;
9303 MonoInst *vtable_arg = NULL;
9306 token = read32 (ip + 1);
9307 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9308 if (!cmethod || mono_loader_get_last_error ())
9310 fsig = mono_method_get_signature (cmethod, image, token);
9314 mono_save_token_info (cfg, image, token, cmethod);
9316 if (!mono_class_init (cmethod->klass))
9317 TYPE_LOAD_ERROR (cmethod->klass);
9319 context_used = mini_method_check_context_used (cfg, cmethod);
9321 if (mono_security_cas_enabled ()) {
9322 if (check_linkdemand (cfg, method, cmethod))
9323 INLINE_FAILURE ("linkdemand");
9324 CHECK_CFG_EXCEPTION;
9325 } else if (mono_security_core_clr_enabled ()) {
9326 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9329 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9330 emit_generic_class_init (cfg, cmethod->klass);
9331 CHECK_TYPELOAD (cmethod->klass);
9335 if (cfg->gsharedvt) {
9336 if (mini_is_gsharedvt_variable_signature (sig))
9337 GSHAREDVT_FAILURE (*ip);
9341 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9342 mono_method_is_generic_sharable (cmethod, TRUE)) {
9343 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9344 mono_class_vtable (cfg->domain, cmethod->klass);
9345 CHECK_TYPELOAD (cmethod->klass);
9347 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9348 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9351 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9352 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9354 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9356 CHECK_TYPELOAD (cmethod->klass);
9357 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9362 n = fsig->param_count;
9366 * Generate smaller code for the common newobj <exception> instruction in
9367 * argument checking code.
9369 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9370 is_exception_class (cmethod->klass) && n <= 2 &&
9371 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9372 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9373 MonoInst *iargs [3];
9375 g_assert (!vtable_arg);
9379 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9382 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9386 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9391 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9394 g_assert_not_reached ();
9402 /* move the args to allow room for 'this' in the first position */
9408 /* check_call_signature () requires sp[0] to be set */
9409 this_ins.type = STACK_OBJ;
9411 if (check_call_signature (cfg, fsig, sp))
9416 if (mini_class_is_system_array (cmethod->klass)) {
9417 g_assert (!vtable_arg);
9419 *sp = emit_get_rgctx_method (cfg, context_used,
9420 cmethod, MONO_RGCTX_INFO_METHOD);
9422 /* Avoid varargs in the common case */
9423 if (fsig->param_count == 1)
9424 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9425 else if (fsig->param_count == 2)
9426 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9427 else if (fsig->param_count == 3)
9428 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9429 else if (fsig->param_count == 4)
9430 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9432 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9433 } else if (cmethod->string_ctor) {
9434 g_assert (!context_used);
9435 g_assert (!vtable_arg);
9436 /* we simply pass a null pointer */
9437 EMIT_NEW_PCONST (cfg, *sp, NULL);
9438 /* now call the string ctor */
9439 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9441 MonoInst* callvirt_this_arg = NULL;
9443 if (cmethod->klass->valuetype) {
9444 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9445 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9446 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9451 * The code generated by mini_emit_virtual_call () expects
9452 * iargs [0] to be a boxed instance, but luckily the vcall
9453 * will be transformed into a normal call there.
9455 } else if (context_used) {
9456 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9459 MonoVTable *vtable = NULL;
9461 if (!cfg->compile_aot)
9462 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9463 CHECK_TYPELOAD (cmethod->klass);
9466 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9467 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9468 * As a workaround, we call class cctors before allocating objects.
9470 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9471 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9472 if (cfg->verbose_level > 2)
9473 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9474 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9477 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9480 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9483 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9485 /* Now call the actual ctor */
9486 /* Avoid virtual calls to ctors if possible */
9487 if (mono_class_is_marshalbyref (cmethod->klass))
9488 callvirt_this_arg = sp [0];
9491 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9492 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9493 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9498 CHECK_CFG_EXCEPTION;
9499 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9500 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9501 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9502 !g_list_find (dont_inline, cmethod)) {
9505 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9506 cfg->real_offset += 5;
9509 inline_costs += costs - 5;
9511 INLINE_FAILURE ("inline failure");
9512 // FIXME-VT: Clean this up
9513 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9514 GSHAREDVT_FAILURE(*ip);
9515 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9517 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9520 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9521 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9522 } else if (context_used &&
9523 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
9524 !mono_class_generic_sharing_enabled (cmethod->klass))) {
9525 MonoInst *cmethod_addr;
9527 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9528 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9530 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9532 INLINE_FAILURE ("ctor call");
9533 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9534 callvirt_this_arg, NULL, vtable_arg);
9538 if (alloc == NULL) {
9540 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9541 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9555 token = read32 (ip + 1);
9556 klass = mini_get_class (method, token, generic_context);
9557 CHECK_TYPELOAD (klass);
9558 if (sp [0]->type != STACK_OBJ)
9561 context_used = mini_class_check_context_used (cfg, klass);
9563 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9570 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9573 if (cfg->compile_aot)
9574 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9576 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9578 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9580 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9583 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9584 MonoMethod *mono_castclass;
9585 MonoInst *iargs [1];
9588 mono_castclass = mono_marshal_get_castclass (klass);
9591 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9592 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9593 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9594 reset_cast_details (cfg);
9595 CHECK_CFG_EXCEPTION;
9596 g_assert (costs > 0);
9599 cfg->real_offset += 5;
9604 inline_costs += costs;
9607 ins = handle_castclass (cfg, klass, *sp, context_used);
9608 CHECK_CFG_EXCEPTION;
9618 token = read32 (ip + 1);
9619 klass = mini_get_class (method, token, generic_context);
9620 CHECK_TYPELOAD (klass);
9621 if (sp [0]->type != STACK_OBJ)
9624 context_used = mini_class_check_context_used (cfg, klass);
9626 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9627 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9634 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9637 if (cfg->compile_aot)
9638 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9640 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9642 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9645 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9646 MonoMethod *mono_isinst;
9647 MonoInst *iargs [1];
9650 mono_isinst = mono_marshal_get_isinst (klass);
9653 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9654 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9655 CHECK_CFG_EXCEPTION;
9656 g_assert (costs > 0);
9659 cfg->real_offset += 5;
9664 inline_costs += costs;
9667 ins = handle_isinst (cfg, klass, *sp, context_used);
9668 CHECK_CFG_EXCEPTION;
9675 case CEE_UNBOX_ANY: {
9679 token = read32 (ip + 1);
9680 klass = mini_get_class (method, token, generic_context);
9681 CHECK_TYPELOAD (klass);
9683 mono_save_token_info (cfg, image, token, klass);
9685 context_used = mini_class_check_context_used (cfg, klass);
9687 if (mini_is_gsharedvt_klass (cfg, klass)) {
9688 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9696 if (generic_class_is_reference_type (cfg, klass)) {
9697 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9698 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9705 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9708 /*FIXME AOT support*/
9709 if (cfg->compile_aot)
9710 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9712 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9714 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9715 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9718 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9719 MonoMethod *mono_castclass;
9720 MonoInst *iargs [1];
9723 mono_castclass = mono_marshal_get_castclass (klass);
9726 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9727 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9728 CHECK_CFG_EXCEPTION;
9729 g_assert (costs > 0);
9732 cfg->real_offset += 5;
9736 inline_costs += costs;
9738 ins = handle_castclass (cfg, klass, *sp, context_used);
9739 CHECK_CFG_EXCEPTION;
9747 if (mono_class_is_nullable (klass)) {
9748 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9755 ins = handle_unbox (cfg, klass, sp, context_used);
9761 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9774 token = read32 (ip + 1);
9775 klass = mini_get_class (method, token, generic_context);
9776 CHECK_TYPELOAD (klass);
9778 mono_save_token_info (cfg, image, token, klass);
9780 context_used = mini_class_check_context_used (cfg, klass);
9782 if (generic_class_is_reference_type (cfg, klass)) {
9788 if (klass == mono_defaults.void_class)
9790 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9792 /* frequent check in generic code: box (struct), brtrue */
9794 // FIXME: LLVM can't handle the inconsistent bb linking
9795 if (!mono_class_is_nullable (klass) &&
9796 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9797 (ip [5] == CEE_BRTRUE ||
9798 ip [5] == CEE_BRTRUE_S ||
9799 ip [5] == CEE_BRFALSE ||
9800 ip [5] == CEE_BRFALSE_S)) {
9801 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9803 MonoBasicBlock *true_bb, *false_bb;
9807 if (cfg->verbose_level > 3) {
9808 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9809 printf ("<box+brtrue opt>\n");
9817 target = ip + 1 + (signed char)(*ip);
9824 target = ip + 4 + (gint)(read32 (ip));
9828 g_assert_not_reached ();
9832 * We need to link both bblocks, since it is needed for handling stack
9833 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9834 * Branching to only one of them would lead to inconsistencies, so
9835 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9837 GET_BBLOCK (cfg, true_bb, target);
9838 GET_BBLOCK (cfg, false_bb, ip);
9840 mono_link_bblock (cfg, cfg->cbb, true_bb);
9841 mono_link_bblock (cfg, cfg->cbb, false_bb);
9843 if (sp != stack_start) {
9844 handle_stack_args (cfg, stack_start, sp - stack_start);
9846 CHECK_UNVERIFIABLE (cfg);
9849 if (COMPILE_LLVM (cfg)) {
9850 dreg = alloc_ireg (cfg);
9851 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9854 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9856 /* The JIT can't eliminate the iconst+compare */
9857 MONO_INST_NEW (cfg, ins, OP_BR);
9858 ins->inst_target_bb = is_true ? true_bb : false_bb;
9859 MONO_ADD_INS (cfg->cbb, ins);
9862 start_new_bblock = 1;
9866 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9868 CHECK_CFG_EXCEPTION;
9877 token = read32 (ip + 1);
9878 klass = mini_get_class (method, token, generic_context);
9879 CHECK_TYPELOAD (klass);
9881 mono_save_token_info (cfg, image, token, klass);
9883 context_used = mini_class_check_context_used (cfg, klass);
9885 if (mono_class_is_nullable (klass)) {
9888 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
9889 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
9893 ins = handle_unbox (cfg, klass, sp, context_used);
9906 MonoClassField *field;
9907 #ifndef DISABLE_REMOTING
9911 gboolean is_instance;
9913 gpointer addr = NULL;
9914 gboolean is_special_static;
9916 MonoInst *store_val = NULL;
9917 MonoInst *thread_ins;
9920 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
9922 if (op == CEE_STFLD) {
9930 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
9932 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
9935 if (op == CEE_STSFLD) {
9943 token = read32 (ip + 1);
9944 if (method->wrapper_type != MONO_WRAPPER_NONE) {
9945 field = mono_method_get_wrapper_data (method, token);
9946 klass = field->parent;
9949 field = mono_field_from_token (image, token, &klass, generic_context);
9953 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
9954 FIELD_ACCESS_FAILURE;
9955 mono_class_init (klass);
9957 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
9960 /* if the class is Critical then transparent code cannot access it's fields */
9961 if (!is_instance && mono_security_core_clr_enabled ())
9962 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9964 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
9965 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
9966 if (mono_security_core_clr_enabled ())
9967 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
9971 * LDFLD etc. is usable on static fields as well, so convert those cases to
9974 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
9986 g_assert_not_reached ();
9988 is_instance = FALSE;
9991 context_used = mini_class_check_context_used (cfg, klass);
9995 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
9996 if (op == CEE_STFLD) {
9997 if (target_type_is_incompatible (cfg, field->type, sp [1]))
9999 #ifndef DISABLE_REMOTING
10000 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10001 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10002 MonoInst *iargs [5];
10004 GSHAREDVT_FAILURE (op);
10006 iargs [0] = sp [0];
10007 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10008 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10009 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10011 iargs [4] = sp [1];
10013 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10014 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10015 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10016 CHECK_CFG_EXCEPTION;
10017 g_assert (costs > 0);
10019 cfg->real_offset += 5;
10022 inline_costs += costs;
10024 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10031 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10033 if (mini_is_gsharedvt_klass (cfg, klass)) {
10034 MonoInst *offset_ins;
10036 context_used = mini_class_check_context_used (cfg, klass);
10038 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10039 dreg = alloc_ireg_mp (cfg);
10040 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10041 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10042 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10044 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10046 if (sp [0]->opcode != OP_LDADDR)
10047 store->flags |= MONO_INST_FAULT;
10049 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10050 /* insert call to write barrier */
10054 dreg = alloc_ireg_mp (cfg);
10055 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10056 emit_write_barrier (cfg, ptr, sp [1]);
10059 store->flags |= ins_flag;
10066 #ifndef DISABLE_REMOTING
10067 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10068 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10069 MonoInst *iargs [4];
10071 GSHAREDVT_FAILURE (op);
10073 iargs [0] = sp [0];
10074 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10075 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10076 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10077 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10078 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10079 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10080 CHECK_CFG_EXCEPTION;
10082 g_assert (costs > 0);
10084 cfg->real_offset += 5;
10088 inline_costs += costs;
10090 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10096 if (sp [0]->type == STACK_VTYPE) {
10099 /* Have to compute the address of the variable */
10101 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10103 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10105 g_assert (var->klass == klass);
10107 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10111 if (op == CEE_LDFLDA) {
10112 if (is_magic_tls_access (field)) {
10113 GSHAREDVT_FAILURE (*ip);
10115 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10117 if (sp [0]->type == STACK_OBJ) {
10118 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10119 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10122 dreg = alloc_ireg_mp (cfg);
10124 if (mini_is_gsharedvt_klass (cfg, klass)) {
10125 MonoInst *offset_ins;
10127 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10128 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10130 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10132 ins->klass = mono_class_from_mono_type (field->type);
10133 ins->type = STACK_MP;
10139 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10141 if (mini_is_gsharedvt_klass (cfg, klass)) {
10142 MonoInst *offset_ins;
10144 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10145 dreg = alloc_ireg_mp (cfg);
10146 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10147 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10149 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10151 load->flags |= ins_flag;
10152 if (sp [0]->opcode != OP_LDADDR)
10153 load->flags |= MONO_INST_FAULT;
10167 * We can only support shared generic static
10168 * field access on architectures where the
10169 * trampoline code has been extended to handle
10170 * the generic class init.
10172 #ifndef MONO_ARCH_VTABLE_REG
10173 GENERIC_SHARING_FAILURE (op);
10176 context_used = mini_class_check_context_used (cfg, klass);
10178 ftype = mono_field_get_type (field);
10180 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10183 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10184 * to be called here.
10186 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10187 mono_class_vtable (cfg->domain, klass);
10188 CHECK_TYPELOAD (klass);
10190 mono_domain_lock (cfg->domain);
10191 if (cfg->domain->special_static_fields)
10192 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10193 mono_domain_unlock (cfg->domain);
10195 is_special_static = mono_class_field_is_special_static (field);
10197 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10198 thread_ins = mono_get_thread_intrinsic (cfg);
10202 /* Generate IR to compute the field address */
10203 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10205 * Fast access to TLS data
10206 * Inline version of get_thread_static_data () in
10210 int idx, static_data_reg, array_reg, dreg;
10212 GSHAREDVT_FAILURE (op);
10214 // offset &= 0x7fffffff;
10215 // idx = (offset >> 24) - 1;
10216 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10217 MONO_ADD_INS (cfg->cbb, thread_ins);
10218 static_data_reg = alloc_ireg (cfg);
10219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10221 if (cfg->compile_aot) {
10222 int offset_reg, offset2_reg, idx_reg;
10224 /* For TLS variables, this will return the TLS offset */
10225 EMIT_NEW_SFLDACONST (cfg, ins, field);
10226 offset_reg = ins->dreg;
10227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10228 idx_reg = alloc_ireg (cfg);
10229 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10230 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10231 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10232 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10233 array_reg = alloc_ireg (cfg);
10234 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10235 offset2_reg = alloc_ireg (cfg);
10236 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10237 dreg = alloc_ireg (cfg);
10238 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10240 offset = (gsize)addr & 0x7fffffff;
10241 idx = (offset >> 24) - 1;
10243 array_reg = alloc_ireg (cfg);
10244 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10245 dreg = alloc_ireg (cfg);
10246 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10248 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10249 (cfg->compile_aot && is_special_static) ||
10250 (context_used && is_special_static)) {
10251 MonoInst *iargs [2];
10253 g_assert (field->parent);
10254 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10255 if (context_used) {
10256 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10257 field, MONO_RGCTX_INFO_CLASS_FIELD);
10259 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10261 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10262 } else if (context_used) {
10263 MonoInst *static_data;
10266 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10267 method->klass->name_space, method->klass->name, method->name,
10268 depth, field->offset);
10271 if (mono_class_needs_cctor_run (klass, method))
10272 emit_generic_class_init (cfg, klass);
10275 * The pointer we're computing here is
10277 * super_info.static_data + field->offset
10279 static_data = emit_get_rgctx_klass (cfg, context_used,
10280 klass, MONO_RGCTX_INFO_STATIC_DATA);
10282 if (mini_is_gsharedvt_klass (cfg, klass)) {
10283 MonoInst *offset_ins;
10285 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10286 dreg = alloc_ireg_mp (cfg);
10287 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10288 } else if (field->offset == 0) {
10291 int addr_reg = mono_alloc_preg (cfg);
10292 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10294 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10295 MonoInst *iargs [2];
10297 g_assert (field->parent);
10298 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10299 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10300 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10302 MonoVTable *vtable = NULL;
10304 if (!cfg->compile_aot)
10305 vtable = mono_class_vtable (cfg->domain, klass);
10306 CHECK_TYPELOAD (klass);
10309 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10310 if (!(g_slist_find (class_inits, klass))) {
10311 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10312 if (cfg->verbose_level > 2)
10313 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10314 class_inits = g_slist_prepend (class_inits, klass);
10317 if (cfg->run_cctors) {
10319 /* This makes so that inline cannot trigger */
10320 /* .cctors: too many apps depend on them */
10321 /* running with a specific order... */
10323 if (! vtable->initialized)
10324 INLINE_FAILURE ("class init");
10325 ex = mono_runtime_class_init_full (vtable, FALSE);
10327 set_exception_object (cfg, ex);
10328 goto exception_exit;
10332 if (cfg->compile_aot)
10333 EMIT_NEW_SFLDACONST (cfg, ins, field);
10336 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10338 EMIT_NEW_PCONST (cfg, ins, addr);
10341 MonoInst *iargs [1];
10342 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10343 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10347 /* Generate IR to do the actual load/store operation */
10349 if (op == CEE_LDSFLDA) {
10350 ins->klass = mono_class_from_mono_type (ftype);
10351 ins->type = STACK_PTR;
10353 } else if (op == CEE_STSFLD) {
10356 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10357 store->flags |= ins_flag;
10359 gboolean is_const = FALSE;
10360 MonoVTable *vtable = NULL;
10361 gpointer addr = NULL;
10363 if (!context_used) {
10364 vtable = mono_class_vtable (cfg->domain, klass);
10365 CHECK_TYPELOAD (klass);
10367 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10368 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10369 int ro_type = ftype->type;
10371 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10372 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10373 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10376 GSHAREDVT_FAILURE (op);
10378 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10381 case MONO_TYPE_BOOLEAN:
10383 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10387 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10390 case MONO_TYPE_CHAR:
10392 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10396 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10401 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10405 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10410 case MONO_TYPE_PTR:
10411 case MONO_TYPE_FNPTR:
10412 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10413 type_to_eval_stack_type ((cfg), field->type, *sp);
10416 case MONO_TYPE_STRING:
10417 case MONO_TYPE_OBJECT:
10418 case MONO_TYPE_CLASS:
10419 case MONO_TYPE_SZARRAY:
10420 case MONO_TYPE_ARRAY:
10421 if (!mono_gc_is_moving ()) {
10422 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10423 type_to_eval_stack_type ((cfg), field->type, *sp);
10431 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10436 case MONO_TYPE_VALUETYPE:
10446 CHECK_STACK_OVF (1);
10448 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10449 load->flags |= ins_flag;
10462 token = read32 (ip + 1);
10463 klass = mini_get_class (method, token, generic_context);
10464 CHECK_TYPELOAD (klass);
10465 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10466 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10467 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10468 generic_class_is_reference_type (cfg, klass)) {
10469 /* insert call to write barrier */
10470 emit_write_barrier (cfg, sp [0], sp [1]);
10482 const char *data_ptr;
10484 guint32 field_token;
10490 token = read32 (ip + 1);
10492 klass = mini_get_class (method, token, generic_context);
10493 CHECK_TYPELOAD (klass);
10495 context_used = mini_class_check_context_used (cfg, klass);
10497 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10498 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10499 ins->sreg1 = sp [0]->dreg;
10500 ins->type = STACK_I4;
10501 ins->dreg = alloc_ireg (cfg);
10502 MONO_ADD_INS (cfg->cbb, ins);
10503 *sp = mono_decompose_opcode (cfg, ins);
10506 if (context_used) {
10507 MonoInst *args [3];
10508 MonoClass *array_class = mono_array_class_get (klass, 1);
10509 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10511 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10514 args [0] = emit_get_rgctx_klass (cfg, context_used,
10515 array_class, MONO_RGCTX_INFO_VTABLE);
10520 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10522 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10524 if (cfg->opt & MONO_OPT_SHARED) {
10525 /* Decompose now to avoid problems with references to the domainvar */
10526 MonoInst *iargs [3];
10528 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10529 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10530 iargs [2] = sp [0];
10532 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10534 /* Decompose later since it is needed by abcrem */
10535 MonoClass *array_type = mono_array_class_get (klass, 1);
10536 mono_class_vtable (cfg->domain, array_type);
10537 CHECK_TYPELOAD (array_type);
10539 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10540 ins->dreg = alloc_ireg_ref (cfg);
10541 ins->sreg1 = sp [0]->dreg;
10542 ins->inst_newa_class = klass;
10543 ins->type = STACK_OBJ;
10544 ins->klass = array_type;
10545 MONO_ADD_INS (cfg->cbb, ins);
10546 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10547 cfg->cbb->has_array_access = TRUE;
10549 /* Needed so mono_emit_load_get_addr () gets called */
10550 mono_get_got_var (cfg);
10560 * we inline/optimize the initialization sequence if possible.
10561 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10562 * for small sizes open code the memcpy
10563 * ensure the rva field is big enough
10565 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10566 MonoMethod *memcpy_method = get_memcpy_method ();
10567 MonoInst *iargs [3];
10568 int add_reg = alloc_ireg_mp (cfg);
10570 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10571 if (cfg->compile_aot) {
10572 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10574 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10576 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10577 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10586 if (sp [0]->type != STACK_OBJ)
10589 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10590 ins->dreg = alloc_preg (cfg);
10591 ins->sreg1 = sp [0]->dreg;
10592 ins->type = STACK_I4;
10593 /* This flag will be inherited by the decomposition */
10594 ins->flags |= MONO_INST_FAULT;
10595 MONO_ADD_INS (cfg->cbb, ins);
10596 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10597 cfg->cbb->has_array_access = TRUE;
10605 if (sp [0]->type != STACK_OBJ)
10608 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10610 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10611 CHECK_TYPELOAD (klass);
10612 /* we need to make sure that this array is exactly the type it needs
10613 * to be for correctness. the wrappers are lax with their usage
10614 * so we need to ignore them here
10616 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10617 MonoClass *array_class = mono_array_class_get (klass, 1);
10618 mini_emit_check_array_type (cfg, sp [0], array_class);
10619 CHECK_TYPELOAD (array_class);
10623 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10628 case CEE_LDELEM_I1:
10629 case CEE_LDELEM_U1:
10630 case CEE_LDELEM_I2:
10631 case CEE_LDELEM_U2:
10632 case CEE_LDELEM_I4:
10633 case CEE_LDELEM_U4:
10634 case CEE_LDELEM_I8:
10636 case CEE_LDELEM_R4:
10637 case CEE_LDELEM_R8:
10638 case CEE_LDELEM_REF: {
10644 if (*ip == CEE_LDELEM) {
10646 token = read32 (ip + 1);
10647 klass = mini_get_class (method, token, generic_context);
10648 CHECK_TYPELOAD (klass);
10649 mono_class_init (klass);
10652 klass = array_access_to_klass (*ip);
10654 if (sp [0]->type != STACK_OBJ)
10657 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10659 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10660 // FIXME-VT: OP_ICONST optimization
10661 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10662 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10663 ins->opcode = OP_LOADV_MEMBASE;
10664 } else if (sp [1]->opcode == OP_ICONST) {
10665 int array_reg = sp [0]->dreg;
10666 int index_reg = sp [1]->dreg;
10667 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10669 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10670 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10672 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10673 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10676 if (*ip == CEE_LDELEM)
10683 case CEE_STELEM_I1:
10684 case CEE_STELEM_I2:
10685 case CEE_STELEM_I4:
10686 case CEE_STELEM_I8:
10687 case CEE_STELEM_R4:
10688 case CEE_STELEM_R8:
10689 case CEE_STELEM_REF:
10694 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10696 if (*ip == CEE_STELEM) {
10698 token = read32 (ip + 1);
10699 klass = mini_get_class (method, token, generic_context);
10700 CHECK_TYPELOAD (klass);
10701 mono_class_init (klass);
10704 klass = array_access_to_klass (*ip);
10706 if (sp [0]->type != STACK_OBJ)
10709 emit_array_store (cfg, klass, sp, TRUE);
10711 if (*ip == CEE_STELEM)
10718 case CEE_CKFINITE: {
10722 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10723 ins->sreg1 = sp [0]->dreg;
10724 ins->dreg = alloc_freg (cfg);
10725 ins->type = STACK_R8;
10726 MONO_ADD_INS (bblock, ins);
10728 *sp++ = mono_decompose_opcode (cfg, ins);
10733 case CEE_REFANYVAL: {
10734 MonoInst *src_var, *src;
10736 int klass_reg = alloc_preg (cfg);
10737 int dreg = alloc_preg (cfg);
10739 GSHAREDVT_FAILURE (*ip);
10742 MONO_INST_NEW (cfg, ins, *ip);
10745 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10746 CHECK_TYPELOAD (klass);
10747 mono_class_init (klass);
10749 context_used = mini_class_check_context_used (cfg, klass);
10752 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10754 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10755 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10756 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10758 if (context_used) {
10759 MonoInst *klass_ins;
10761 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10762 klass, MONO_RGCTX_INFO_KLASS);
10765 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10766 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10768 mini_emit_class_check (cfg, klass_reg, klass);
10770 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10771 ins->type = STACK_MP;
10776 case CEE_MKREFANY: {
10777 MonoInst *loc, *addr;
10779 GSHAREDVT_FAILURE (*ip);
10782 MONO_INST_NEW (cfg, ins, *ip);
10785 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10786 CHECK_TYPELOAD (klass);
10787 mono_class_init (klass);
10789 context_used = mini_class_check_context_used (cfg, klass);
10791 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10792 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10794 if (context_used) {
10795 MonoInst *const_ins;
10796 int type_reg = alloc_preg (cfg);
10798 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10800 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10802 } else if (cfg->compile_aot) {
10803 int const_reg = alloc_preg (cfg);
10804 int type_reg = alloc_preg (cfg);
10806 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10809 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10811 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10812 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10814 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10816 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10817 ins->type = STACK_VTYPE;
10818 ins->klass = mono_defaults.typed_reference_class;
10823 case CEE_LDTOKEN: {
10825 MonoClass *handle_class;
10827 CHECK_STACK_OVF (1);
10830 n = read32 (ip + 1);
10832 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10833 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10834 handle = mono_method_get_wrapper_data (method, n);
10835 handle_class = mono_method_get_wrapper_data (method, n + 1);
10836 if (handle_class == mono_defaults.typehandle_class)
10837 handle = &((MonoClass*)handle)->byval_arg;
10840 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10844 mono_class_init (handle_class);
10845 if (cfg->generic_sharing_context) {
10846 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10847 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10848 /* This case handles ldtoken
10849 of an open type, like for
10852 } else if (handle_class == mono_defaults.typehandle_class) {
10853 /* If we get a MONO_TYPE_CLASS
10854 then we need to provide the
10856 instantiation of it. */
10857 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10860 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10861 } else if (handle_class == mono_defaults.fieldhandle_class)
10862 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
10863 else if (handle_class == mono_defaults.methodhandle_class)
10864 context_used = mini_method_check_context_used (cfg, handle);
10866 g_assert_not_reached ();
10869 if ((cfg->opt & MONO_OPT_SHARED) &&
10870 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
10871 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
10872 MonoInst *addr, *vtvar, *iargs [3];
10873 int method_context_used;
10875 method_context_used = mini_method_check_context_used (cfg, method);
10877 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10879 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10880 EMIT_NEW_ICONST (cfg, iargs [1], n);
10881 if (method_context_used) {
10882 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
10883 method, MONO_RGCTX_INFO_METHOD);
10884 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
10886 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
10887 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
10889 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10893 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10895 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
10896 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
10897 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
10898 (cmethod->klass == mono_defaults.systemtype_class) &&
10899 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
10900 MonoClass *tclass = mono_class_from_mono_type (handle);
10902 mono_class_init (tclass);
10903 if (context_used) {
10904 ins = emit_get_rgctx_klass (cfg, context_used,
10905 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
10906 } else if (cfg->compile_aot) {
10907 if (method->wrapper_type) {
10908 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
10909 /* Special case for static synchronized wrappers */
10910 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
10912 /* FIXME: n is not a normal token */
10914 EMIT_NEW_PCONST (cfg, ins, NULL);
10917 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
10920 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
10922 ins->type = STACK_OBJ;
10923 ins->klass = cmethod->klass;
10926 MonoInst *addr, *vtvar;
10928 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
10930 if (context_used) {
10931 if (handle_class == mono_defaults.typehandle_class) {
10932 ins = emit_get_rgctx_klass (cfg, context_used,
10933 mono_class_from_mono_type (handle),
10934 MONO_RGCTX_INFO_TYPE);
10935 } else if (handle_class == mono_defaults.methodhandle_class) {
10936 ins = emit_get_rgctx_method (cfg, context_used,
10937 handle, MONO_RGCTX_INFO_METHOD);
10938 } else if (handle_class == mono_defaults.fieldhandle_class) {
10939 ins = emit_get_rgctx_field (cfg, context_used,
10940 handle, MONO_RGCTX_INFO_CLASS_FIELD);
10942 g_assert_not_reached ();
10944 } else if (cfg->compile_aot) {
10945 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
10947 EMIT_NEW_PCONST (cfg, ins, handle);
10949 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10950 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
10951 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10961 MONO_INST_NEW (cfg, ins, OP_THROW);
10963 ins->sreg1 = sp [0]->dreg;
10965 bblock->out_of_line = TRUE;
10966 MONO_ADD_INS (bblock, ins);
10967 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10968 MONO_ADD_INS (bblock, ins);
10971 link_bblock (cfg, bblock, end_bblock);
10972 start_new_bblock = 1;
10974 case CEE_ENDFINALLY:
10975 /* mono_save_seq_point_info () depends on this */
10976 if (sp != stack_start)
10977 emit_seq_point (cfg, method, ip, FALSE, FALSE);
10978 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
10979 MONO_ADD_INS (bblock, ins);
10981 start_new_bblock = 1;
10984 * Control will leave the method so empty the stack, otherwise
10985 * the next basic block will start with a nonempty stack.
10987 while (sp != stack_start) {
10992 case CEE_LEAVE_S: {
10995 if (*ip == CEE_LEAVE) {
10997 target = ip + 5 + (gint32)read32(ip + 1);
11000 target = ip + 2 + (signed char)(ip [1]);
11003 /* empty the stack */
11004 while (sp != stack_start) {
11009 * If this leave statement is in a catch block, check for a
11010 * pending exception, and rethrow it if necessary.
11011 * We avoid doing this in runtime invoke wrappers, since those are called
11012 * by native code which excepts the wrapper to catch all exceptions.
11014 for (i = 0; i < header->num_clauses; ++i) {
11015 MonoExceptionClause *clause = &header->clauses [i];
11018 * Use <= in the final comparison to handle clauses with multiple
11019 * leave statements, like in bug #78024.
11020 * The ordering of the exception clauses guarantees that we find the
11021 * innermost clause.
11023 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11025 MonoBasicBlock *dont_throw;
11030 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11033 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11035 NEW_BBLOCK (cfg, dont_throw);
11038 * Currently, we always rethrow the abort exception, despite the
11039 * fact that this is not correct. See thread6.cs for an example.
11040 * But propagating the abort exception is more important than
11041 * getting the sematics right.
11043 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11044 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11045 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11047 MONO_START_BB (cfg, dont_throw);
11052 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11054 MonoExceptionClause *clause;
11056 for (tmp = handlers; tmp; tmp = tmp->next) {
11057 clause = tmp->data;
11058 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11060 link_bblock (cfg, bblock, tblock);
11061 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11062 ins->inst_target_bb = tblock;
11063 ins->inst_eh_block = clause;
11064 MONO_ADD_INS (bblock, ins);
11065 bblock->has_call_handler = 1;
11066 if (COMPILE_LLVM (cfg)) {
11067 MonoBasicBlock *target_bb;
11070 * Link the finally bblock with the target, since it will
11071 * conceptually branch there.
11072 * FIXME: Have to link the bblock containing the endfinally.
11074 GET_BBLOCK (cfg, target_bb, target);
11075 link_bblock (cfg, tblock, target_bb);
11078 g_list_free (handlers);
11081 MONO_INST_NEW (cfg, ins, OP_BR);
11082 MONO_ADD_INS (bblock, ins);
11083 GET_BBLOCK (cfg, tblock, target);
11084 link_bblock (cfg, bblock, tblock);
11085 ins->inst_target_bb = tblock;
11086 start_new_bblock = 1;
11088 if (*ip == CEE_LEAVE)
11097 * Mono specific opcodes
11099 case MONO_CUSTOM_PREFIX: {
11101 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11105 case CEE_MONO_ICALL: {
11107 MonoJitICallInfo *info;
11109 token = read32 (ip + 2);
11110 func = mono_method_get_wrapper_data (method, token);
11111 info = mono_find_jit_icall_by_addr (func);
11113 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11116 CHECK_STACK (info->sig->param_count);
11117 sp -= info->sig->param_count;
11119 ins = mono_emit_jit_icall (cfg, info->func, sp);
11120 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11124 inline_costs += 10 * num_calls++;
11128 case CEE_MONO_LDPTR: {
11131 CHECK_STACK_OVF (1);
11133 token = read32 (ip + 2);
11135 ptr = mono_method_get_wrapper_data (method, token);
11136 /* FIXME: Generalize this */
11137 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11138 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11143 EMIT_NEW_PCONST (cfg, ins, ptr);
11146 inline_costs += 10 * num_calls++;
11147 /* Can't embed random pointers into AOT code */
11151 case CEE_MONO_JIT_ICALL_ADDR: {
11152 MonoJitICallInfo *callinfo;
11155 CHECK_STACK_OVF (1);
11157 token = read32 (ip + 2);
11159 ptr = mono_method_get_wrapper_data (method, token);
11160 callinfo = mono_find_jit_icall_by_addr (ptr);
11161 g_assert (callinfo);
11162 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11165 inline_costs += 10 * num_calls++;
11168 case CEE_MONO_ICALL_ADDR: {
11169 MonoMethod *cmethod;
11172 CHECK_STACK_OVF (1);
11174 token = read32 (ip + 2);
11176 cmethod = mono_method_get_wrapper_data (method, token);
11178 if (cfg->compile_aot) {
11179 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11181 ptr = mono_lookup_internal_call (cmethod);
11183 EMIT_NEW_PCONST (cfg, ins, ptr);
11189 case CEE_MONO_VTADDR: {
11190 MonoInst *src_var, *src;
11196 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11197 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11202 case CEE_MONO_NEWOBJ: {
11203 MonoInst *iargs [2];
11205 CHECK_STACK_OVF (1);
11207 token = read32 (ip + 2);
11208 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11209 mono_class_init (klass);
11210 NEW_DOMAINCONST (cfg, iargs [0]);
11211 MONO_ADD_INS (cfg->cbb, iargs [0]);
11212 NEW_CLASSCONST (cfg, iargs [1], klass);
11213 MONO_ADD_INS (cfg->cbb, iargs [1]);
11214 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11216 inline_costs += 10 * num_calls++;
11219 case CEE_MONO_OBJADDR:
11222 MONO_INST_NEW (cfg, ins, OP_MOVE);
11223 ins->dreg = alloc_ireg_mp (cfg);
11224 ins->sreg1 = sp [0]->dreg;
11225 ins->type = STACK_MP;
11226 MONO_ADD_INS (cfg->cbb, ins);
11230 case CEE_MONO_LDNATIVEOBJ:
11232 * Similar to LDOBJ, but instead load the unmanaged
11233 * representation of the vtype to the stack.
11238 token = read32 (ip + 2);
11239 klass = mono_method_get_wrapper_data (method, token);
11240 g_assert (klass->valuetype);
11241 mono_class_init (klass);
11244 MonoInst *src, *dest, *temp;
11247 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11248 temp->backend.is_pinvoke = 1;
11249 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11250 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11252 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11253 dest->type = STACK_VTYPE;
11254 dest->klass = klass;
11260 case CEE_MONO_RETOBJ: {
11262 * Same as RET, but return the native representation of a vtype
11265 g_assert (cfg->ret);
11266 g_assert (mono_method_signature (method)->pinvoke);
11271 token = read32 (ip + 2);
11272 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11274 if (!cfg->vret_addr) {
11275 g_assert (cfg->ret_var_is_local);
11277 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11279 EMIT_NEW_RETLOADA (cfg, ins);
11281 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11283 if (sp != stack_start)
11286 MONO_INST_NEW (cfg, ins, OP_BR);
11287 ins->inst_target_bb = end_bblock;
11288 MONO_ADD_INS (bblock, ins);
11289 link_bblock (cfg, bblock, end_bblock);
11290 start_new_bblock = 1;
11294 case CEE_MONO_CISINST:
11295 case CEE_MONO_CCASTCLASS: {
11300 token = read32 (ip + 2);
11301 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11302 if (ip [1] == CEE_MONO_CISINST)
11303 ins = handle_cisinst (cfg, klass, sp [0]);
11305 ins = handle_ccastclass (cfg, klass, sp [0]);
11311 case CEE_MONO_SAVE_LMF:
11312 case CEE_MONO_RESTORE_LMF:
11313 #ifdef MONO_ARCH_HAVE_LMF_OPS
11314 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11315 MONO_ADD_INS (bblock, ins);
11316 cfg->need_lmf_area = TRUE;
11320 case CEE_MONO_CLASSCONST:
11321 CHECK_STACK_OVF (1);
11323 token = read32 (ip + 2);
11324 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11327 inline_costs += 10 * num_calls++;
11329 case CEE_MONO_NOT_TAKEN:
11330 bblock->out_of_line = TRUE;
11333 case CEE_MONO_TLS: {
11336 CHECK_STACK_OVF (1);
11338 key = (gint32)read32 (ip + 2);
11339 g_assert (key < TLS_KEY_NUM);
11341 ins = mono_create_tls_get (cfg, key);
11343 if (cfg->compile_aot) {
11345 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11346 ins->dreg = alloc_preg (cfg);
11347 ins->type = STACK_PTR;
11349 g_assert_not_reached ();
11352 ins->type = STACK_PTR;
11353 MONO_ADD_INS (bblock, ins);
11358 case CEE_MONO_DYN_CALL: {
11359 MonoCallInst *call;
11361 /* It would be easier to call a trampoline, but that would put an
11362 * extra frame on the stack, confusing exception handling. So
11363 * implement it inline using an opcode for now.
11366 if (!cfg->dyn_call_var) {
11367 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11368 /* prevent it from being register allocated */
11369 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11372 /* Has to use a call inst since it local regalloc expects it */
11373 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11374 ins = (MonoInst*)call;
11376 ins->sreg1 = sp [0]->dreg;
11377 ins->sreg2 = sp [1]->dreg;
11378 MONO_ADD_INS (bblock, ins);
11380 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11383 inline_costs += 10 * num_calls++;
11387 case CEE_MONO_MEMORY_BARRIER: {
11389 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11393 case CEE_MONO_JIT_ATTACH: {
11394 MonoInst *args [16];
11395 MonoInst *ad_ins, *lmf_ins;
11396 MonoBasicBlock *next_bb = NULL;
11398 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11400 EMIT_NEW_PCONST (cfg, ins, NULL);
11401 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11407 ad_ins = mono_get_domain_intrinsic (cfg);
11408 lmf_ins = mono_get_lmf_intrinsic (cfg);
11411 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11412 NEW_BBLOCK (cfg, next_bb);
11414 MONO_ADD_INS (cfg->cbb, ad_ins);
11415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11416 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11418 MONO_ADD_INS (cfg->cbb, lmf_ins);
11419 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11420 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11423 if (cfg->compile_aot) {
11424 /* AOT code is only used in the root domain */
11425 EMIT_NEW_PCONST (cfg, args [0], NULL);
11427 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11429 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11430 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11433 MONO_START_BB (cfg, next_bb);
11439 case CEE_MONO_JIT_DETACH: {
11440 MonoInst *args [16];
11442 /* Restore the original domain */
11443 dreg = alloc_ireg (cfg);
11444 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11445 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11450 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11456 case CEE_PREFIX1: {
11459 case CEE_ARGLIST: {
11460 /* somewhat similar to LDTOKEN */
11461 MonoInst *addr, *vtvar;
11462 CHECK_STACK_OVF (1);
11463 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11465 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11466 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11468 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11469 ins->type = STACK_VTYPE;
11470 ins->klass = mono_defaults.argumenthandle_class;
11483 * The following transforms:
11484 * CEE_CEQ into OP_CEQ
11485 * CEE_CGT into OP_CGT
11486 * CEE_CGT_UN into OP_CGT_UN
11487 * CEE_CLT into OP_CLT
11488 * CEE_CLT_UN into OP_CLT_UN
11490 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11492 MONO_INST_NEW (cfg, ins, cmp->opcode);
11494 cmp->sreg1 = sp [0]->dreg;
11495 cmp->sreg2 = sp [1]->dreg;
11496 type_from_op (cmp, sp [0], sp [1]);
11498 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11499 cmp->opcode = OP_LCOMPARE;
11500 else if (sp [0]->type == STACK_R8)
11501 cmp->opcode = OP_FCOMPARE;
11503 cmp->opcode = OP_ICOMPARE;
11504 MONO_ADD_INS (bblock, cmp);
11505 ins->type = STACK_I4;
11506 ins->dreg = alloc_dreg (cfg, ins->type);
11507 type_from_op (ins, sp [0], sp [1]);
11509 if (cmp->opcode == OP_FCOMPARE) {
11511 * The backends expect the fceq opcodes to do the
11514 cmp->opcode = OP_NOP;
11515 ins->sreg1 = cmp->sreg1;
11516 ins->sreg2 = cmp->sreg2;
11518 MONO_ADD_INS (bblock, ins);
11524 MonoInst *argconst;
11525 MonoMethod *cil_method;
11527 CHECK_STACK_OVF (1);
11529 n = read32 (ip + 2);
11530 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11531 if (!cmethod || mono_loader_get_last_error ())
11533 mono_class_init (cmethod->klass);
11535 mono_save_token_info (cfg, image, n, cmethod);
11537 context_used = mini_method_check_context_used (cfg, cmethod);
11539 cil_method = cmethod;
11540 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11541 METHOD_ACCESS_FAILURE;
11543 if (mono_security_cas_enabled ()) {
11544 if (check_linkdemand (cfg, method, cmethod))
11545 INLINE_FAILURE ("linkdemand");
11546 CHECK_CFG_EXCEPTION;
11547 } else if (mono_security_core_clr_enabled ()) {
11548 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11552 * Optimize the common case of ldftn+delegate creation
11554 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11555 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11556 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11557 MonoInst *target_ins;
11558 MonoMethod *invoke;
11559 int invoke_context_used;
11561 invoke = mono_get_delegate_invoke (ctor_method->klass);
11562 if (!invoke || !mono_method_signature (invoke))
11565 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11567 target_ins = sp [-1];
11569 if (mono_security_core_clr_enabled ())
11570 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11572 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11573 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11574 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11575 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11576 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11580 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11581 /* FIXME: SGEN support */
11582 if (invoke_context_used == 0) {
11584 if (cfg->verbose_level > 3)
11585 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11587 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11588 CHECK_CFG_EXCEPTION;
11597 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11598 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11602 inline_costs += 10 * num_calls++;
11605 case CEE_LDVIRTFTN: {
11606 MonoInst *args [2];
11610 n = read32 (ip + 2);
11611 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11612 if (!cmethod || mono_loader_get_last_error ())
11614 mono_class_init (cmethod->klass);
11616 context_used = mini_method_check_context_used (cfg, cmethod);
11618 if (mono_security_cas_enabled ()) {
11619 if (check_linkdemand (cfg, method, cmethod))
11620 INLINE_FAILURE ("linkdemand");
11621 CHECK_CFG_EXCEPTION;
11622 } else if (mono_security_core_clr_enabled ()) {
11623 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11629 args [1] = emit_get_rgctx_method (cfg, context_used,
11630 cmethod, MONO_RGCTX_INFO_METHOD);
11633 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11635 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11638 inline_costs += 10 * num_calls++;
11642 CHECK_STACK_OVF (1);
11644 n = read16 (ip + 2);
11646 EMIT_NEW_ARGLOAD (cfg, ins, n);
11651 CHECK_STACK_OVF (1);
11653 n = read16 (ip + 2);
11655 NEW_ARGLOADA (cfg, ins, n);
11656 MONO_ADD_INS (cfg->cbb, ins);
11664 n = read16 (ip + 2);
11666 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11668 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11672 CHECK_STACK_OVF (1);
11674 n = read16 (ip + 2);
11676 EMIT_NEW_LOCLOAD (cfg, ins, n);
11681 unsigned char *tmp_ip;
11682 CHECK_STACK_OVF (1);
11684 n = read16 (ip + 2);
11687 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11693 EMIT_NEW_LOCLOADA (cfg, ins, n);
11702 n = read16 (ip + 2);
11704 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11706 emit_stloc_ir (cfg, sp, header, n);
11713 if (sp != stack_start)
11715 if (cfg->method != method)
11717 * Inlining this into a loop in a parent could lead to
11718 * stack overflows which is different behavior than the
11719 * non-inlined case, thus disable inlining in this case.
11721 goto inline_failure;
11723 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11724 ins->dreg = alloc_preg (cfg);
11725 ins->sreg1 = sp [0]->dreg;
11726 ins->type = STACK_PTR;
11727 MONO_ADD_INS (cfg->cbb, ins);
11729 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11731 ins->flags |= MONO_INST_INIT;
11736 case CEE_ENDFILTER: {
11737 MonoExceptionClause *clause, *nearest;
11738 int cc, nearest_num;
11742 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11744 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11745 ins->sreg1 = (*sp)->dreg;
11746 MONO_ADD_INS (bblock, ins);
11747 start_new_bblock = 1;
11752 for (cc = 0; cc < header->num_clauses; ++cc) {
11753 clause = &header->clauses [cc];
11754 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11755 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11756 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11761 g_assert (nearest);
11762 if ((ip - header->code) != nearest->handler_offset)
11767 case CEE_UNALIGNED_:
11768 ins_flag |= MONO_INST_UNALIGNED;
11769 /* FIXME: record alignment? we can assume 1 for now */
11773 case CEE_VOLATILE_:
11774 ins_flag |= MONO_INST_VOLATILE;
11778 ins_flag |= MONO_INST_TAILCALL;
11779 cfg->flags |= MONO_CFG_HAS_TAIL;
11780 /* Can't inline tail calls at this time */
11781 inline_costs += 100000;
11788 token = read32 (ip + 2);
11789 klass = mini_get_class (method, token, generic_context);
11790 CHECK_TYPELOAD (klass);
11791 if (generic_class_is_reference_type (cfg, klass))
11792 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11794 mini_emit_initobj (cfg, *sp, NULL, klass);
11798 case CEE_CONSTRAINED_:
11800 token = read32 (ip + 2);
11801 constrained_call = mini_get_class (method, token, generic_context);
11802 CHECK_TYPELOAD (constrained_call);
11806 case CEE_INITBLK: {
11807 MonoInst *iargs [3];
11811 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11812 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11813 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11814 /* emit_memset only works when val == 0 */
11815 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11817 iargs [0] = sp [0];
11818 iargs [1] = sp [1];
11819 iargs [2] = sp [2];
11820 if (ip [1] == CEE_CPBLK) {
11821 MonoMethod *memcpy_method = get_memcpy_method ();
11822 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11824 MonoMethod *memset_method = get_memset_method ();
11825 mono_emit_method_call (cfg, memset_method, iargs, NULL);
11835 ins_flag |= MONO_INST_NOTYPECHECK;
11837 ins_flag |= MONO_INST_NORANGECHECK;
11838 /* we ignore the no-nullcheck for now since we
11839 * really do it explicitly only when doing callvirt->call
11843 case CEE_RETHROW: {
11845 int handler_offset = -1;
11847 for (i = 0; i < header->num_clauses; ++i) {
11848 MonoExceptionClause *clause = &header->clauses [i];
11849 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
11850 handler_offset = clause->handler_offset;
11855 bblock->flags |= BB_EXCEPTION_UNSAFE;
11857 g_assert (handler_offset != -1);
11859 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
11860 MONO_INST_NEW (cfg, ins, OP_RETHROW);
11861 ins->sreg1 = load->dreg;
11862 MONO_ADD_INS (bblock, ins);
11864 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11865 MONO_ADD_INS (bblock, ins);
11868 link_bblock (cfg, bblock, end_bblock);
11869 start_new_bblock = 1;
11877 GSHAREDVT_FAILURE (*ip);
11879 CHECK_STACK_OVF (1);
11881 token = read32 (ip + 2);
11882 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
11883 MonoType *type = mono_type_create_from_typespec (image, token);
11884 val = mono_type_size (type, &ialign);
11886 MonoClass *klass = mono_class_get_full (image, token, generic_context);
11887 CHECK_TYPELOAD (klass);
11888 mono_class_init (klass);
11889 val = mono_type_size (&klass->byval_arg, &ialign);
11891 EMIT_NEW_ICONST (cfg, ins, val);
11896 case CEE_REFANYTYPE: {
11897 MonoInst *src_var, *src;
11899 GSHAREDVT_FAILURE (*ip);
11905 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11907 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11908 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11909 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
11914 case CEE_READONLY_:
11927 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
11937 g_warning ("opcode 0x%02x not handled", *ip);
11941 if (start_new_bblock != 1)
11944 bblock->cil_length = ip - bblock->cil_code;
11945 if (bblock->next_bb) {
11946 /* This could already be set because of inlining, #693905 */
11947 MonoBasicBlock *bb = bblock;
11949 while (bb->next_bb)
11951 bb->next_bb = end_bblock;
11953 bblock->next_bb = end_bblock;
11956 if (cfg->method == method && cfg->domainvar) {
11958 MonoInst *get_domain;
11960 cfg->cbb = init_localsbb;
11962 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
11963 MONO_ADD_INS (cfg->cbb, get_domain);
11965 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
11967 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
11968 MONO_ADD_INS (cfg->cbb, store);
11971 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
11972 if (cfg->compile_aot)
11973 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
11974 mono_get_got_var (cfg);
11977 if (cfg->method == method && cfg->got_var)
11978 mono_emit_load_got_addr (cfg);
11980 if (init_localsbb) {
11981 cfg->cbb = init_localsbb;
11983 for (i = 0; i < header->num_locals; ++i) {
11984 emit_init_local (cfg, i, header->locals [i], init_locals);
11988 if (cfg->init_ref_vars && cfg->method == method) {
11989 /* Emit initialization for ref vars */
11990 // FIXME: Avoid duplication initialization for IL locals.
11991 for (i = 0; i < cfg->num_varinfo; ++i) {
11992 MonoInst *ins = cfg->varinfo [i];
11994 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
11995 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
11999 if (cfg->lmf_var && cfg->method == method) {
12000 cfg->cbb = init_localsbb;
12001 emit_push_lmf (cfg);
12005 MonoBasicBlock *bb;
12008 * Make seq points at backward branch targets interruptable.
12010 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12011 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12012 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12015 /* Add a sequence point for method entry/exit events */
12017 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12018 MONO_ADD_INS (init_localsbb, ins);
12019 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12020 MONO_ADD_INS (cfg->bb_exit, ins);
12024 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12025 * the code they refer to was dead (#11880).
12027 if (sym_seq_points) {
12028 for (i = 0; i < header->code_size; ++i) {
12029 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12032 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12033 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12040 if (cfg->method == method) {
12041 MonoBasicBlock *bb;
12042 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12043 bb->region = mono_find_block_region (cfg, bb->real_offset);
12045 mono_create_spvar_for_region (cfg, bb->region);
12046 if (cfg->verbose_level > 2)
12047 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12051 g_slist_free (class_inits);
12052 dont_inline = g_list_remove (dont_inline, method);
12054 if (inline_costs < 0) {
12057 /* Method is too large */
12058 mname = mono_method_full_name (method, TRUE);
12059 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12060 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12062 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12063 mono_basic_block_free (original_bb);
12067 if ((cfg->verbose_level > 2) && (cfg->method == method))
12068 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12070 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12071 mono_basic_block_free (original_bb);
12072 return inline_costs;
12075 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12082 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12086 set_exception_type_from_invalid_il (cfg, method, ip);
12090 g_slist_free (class_inits);
12091 mono_basic_block_free (original_bb);
12092 dont_inline = g_list_remove (dont_inline, method);
12093 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12098 store_membase_reg_to_store_membase_imm (int opcode)
12101 case OP_STORE_MEMBASE_REG:
12102 return OP_STORE_MEMBASE_IMM;
12103 case OP_STOREI1_MEMBASE_REG:
12104 return OP_STOREI1_MEMBASE_IMM;
12105 case OP_STOREI2_MEMBASE_REG:
12106 return OP_STOREI2_MEMBASE_IMM;
12107 case OP_STOREI4_MEMBASE_REG:
12108 return OP_STOREI4_MEMBASE_IMM;
12109 case OP_STOREI8_MEMBASE_REG:
12110 return OP_STOREI8_MEMBASE_IMM;
12112 g_assert_not_reached ();
12119 mono_op_to_op_imm (int opcode)
12123 return OP_IADD_IMM;
12125 return OP_ISUB_IMM;
12127 return OP_IDIV_IMM;
12129 return OP_IDIV_UN_IMM;
12131 return OP_IREM_IMM;
12133 return OP_IREM_UN_IMM;
12135 return OP_IMUL_IMM;
12137 return OP_IAND_IMM;
12141 return OP_IXOR_IMM;
12143 return OP_ISHL_IMM;
12145 return OP_ISHR_IMM;
12147 return OP_ISHR_UN_IMM;
12150 return OP_LADD_IMM;
12152 return OP_LSUB_IMM;
12154 return OP_LAND_IMM;
12158 return OP_LXOR_IMM;
12160 return OP_LSHL_IMM;
12162 return OP_LSHR_IMM;
12164 return OP_LSHR_UN_IMM;
12167 return OP_COMPARE_IMM;
12169 return OP_ICOMPARE_IMM;
12171 return OP_LCOMPARE_IMM;
12173 case OP_STORE_MEMBASE_REG:
12174 return OP_STORE_MEMBASE_IMM;
12175 case OP_STOREI1_MEMBASE_REG:
12176 return OP_STOREI1_MEMBASE_IMM;
12177 case OP_STOREI2_MEMBASE_REG:
12178 return OP_STOREI2_MEMBASE_IMM;
12179 case OP_STOREI4_MEMBASE_REG:
12180 return OP_STOREI4_MEMBASE_IMM;
12182 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12184 return OP_X86_PUSH_IMM;
12185 case OP_X86_COMPARE_MEMBASE_REG:
12186 return OP_X86_COMPARE_MEMBASE_IMM;
12188 #if defined(TARGET_AMD64)
12189 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12190 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12192 case OP_VOIDCALL_REG:
12193 return OP_VOIDCALL;
12201 return OP_LOCALLOC_IMM;
12208 ldind_to_load_membase (int opcode)
12212 return OP_LOADI1_MEMBASE;
12214 return OP_LOADU1_MEMBASE;
12216 return OP_LOADI2_MEMBASE;
12218 return OP_LOADU2_MEMBASE;
12220 return OP_LOADI4_MEMBASE;
12222 return OP_LOADU4_MEMBASE;
12224 return OP_LOAD_MEMBASE;
12225 case CEE_LDIND_REF:
12226 return OP_LOAD_MEMBASE;
12228 return OP_LOADI8_MEMBASE;
12230 return OP_LOADR4_MEMBASE;
12232 return OP_LOADR8_MEMBASE;
12234 g_assert_not_reached ();
12241 stind_to_store_membase (int opcode)
12245 return OP_STOREI1_MEMBASE_REG;
12247 return OP_STOREI2_MEMBASE_REG;
12249 return OP_STOREI4_MEMBASE_REG;
12251 case CEE_STIND_REF:
12252 return OP_STORE_MEMBASE_REG;
12254 return OP_STOREI8_MEMBASE_REG;
12256 return OP_STORER4_MEMBASE_REG;
12258 return OP_STORER8_MEMBASE_REG;
12260 g_assert_not_reached ();
12267 mono_load_membase_to_load_mem (int opcode)
12269 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12270 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12272 case OP_LOAD_MEMBASE:
12273 return OP_LOAD_MEM;
12274 case OP_LOADU1_MEMBASE:
12275 return OP_LOADU1_MEM;
12276 case OP_LOADU2_MEMBASE:
12277 return OP_LOADU2_MEM;
12278 case OP_LOADI4_MEMBASE:
12279 return OP_LOADI4_MEM;
12280 case OP_LOADU4_MEMBASE:
12281 return OP_LOADU4_MEM;
12282 #if SIZEOF_REGISTER == 8
12283 case OP_LOADI8_MEMBASE:
12284 return OP_LOADI8_MEM;
12293 op_to_op_dest_membase (int store_opcode, int opcode)
12295 #if defined(TARGET_X86)
12296 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12301 return OP_X86_ADD_MEMBASE_REG;
12303 return OP_X86_SUB_MEMBASE_REG;
12305 return OP_X86_AND_MEMBASE_REG;
12307 return OP_X86_OR_MEMBASE_REG;
12309 return OP_X86_XOR_MEMBASE_REG;
12312 return OP_X86_ADD_MEMBASE_IMM;
12315 return OP_X86_SUB_MEMBASE_IMM;
12318 return OP_X86_AND_MEMBASE_IMM;
12321 return OP_X86_OR_MEMBASE_IMM;
12324 return OP_X86_XOR_MEMBASE_IMM;
12330 #if defined(TARGET_AMD64)
12331 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12336 return OP_X86_ADD_MEMBASE_REG;
12338 return OP_X86_SUB_MEMBASE_REG;
12340 return OP_X86_AND_MEMBASE_REG;
12342 return OP_X86_OR_MEMBASE_REG;
12344 return OP_X86_XOR_MEMBASE_REG;
12346 return OP_X86_ADD_MEMBASE_IMM;
12348 return OP_X86_SUB_MEMBASE_IMM;
12350 return OP_X86_AND_MEMBASE_IMM;
12352 return OP_X86_OR_MEMBASE_IMM;
12354 return OP_X86_XOR_MEMBASE_IMM;
12356 return OP_AMD64_ADD_MEMBASE_REG;
12358 return OP_AMD64_SUB_MEMBASE_REG;
12360 return OP_AMD64_AND_MEMBASE_REG;
12362 return OP_AMD64_OR_MEMBASE_REG;
12364 return OP_AMD64_XOR_MEMBASE_REG;
12367 return OP_AMD64_ADD_MEMBASE_IMM;
12370 return OP_AMD64_SUB_MEMBASE_IMM;
12373 return OP_AMD64_AND_MEMBASE_IMM;
12376 return OP_AMD64_OR_MEMBASE_IMM;
12379 return OP_AMD64_XOR_MEMBASE_IMM;
12389 op_to_op_store_membase (int store_opcode, int opcode)
12391 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12394 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12395 return OP_X86_SETEQ_MEMBASE;
12397 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12398 return OP_X86_SETNE_MEMBASE;
12406 op_to_op_src1_membase (int load_opcode, int opcode)
12409 /* FIXME: This has sign extension issues */
12411 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12412 return OP_X86_COMPARE_MEMBASE8_IMM;
12415 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12420 return OP_X86_PUSH_MEMBASE;
12421 case OP_COMPARE_IMM:
12422 case OP_ICOMPARE_IMM:
12423 return OP_X86_COMPARE_MEMBASE_IMM;
12426 return OP_X86_COMPARE_MEMBASE_REG;
12430 #ifdef TARGET_AMD64
12431 /* FIXME: This has sign extension issues */
12433 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12434 return OP_X86_COMPARE_MEMBASE8_IMM;
12439 #ifdef __mono_ilp32__
12440 if (load_opcode == OP_LOADI8_MEMBASE)
12442 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12444 return OP_X86_PUSH_MEMBASE;
12446 /* FIXME: This only works for 32 bit immediates
12447 case OP_COMPARE_IMM:
12448 case OP_LCOMPARE_IMM:
12449 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12450 return OP_AMD64_COMPARE_MEMBASE_IMM;
12452 case OP_ICOMPARE_IMM:
12453 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12454 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12458 #ifdef __mono_ilp32__
12459 if (load_opcode == OP_LOAD_MEMBASE)
12460 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12461 if (load_opcode == OP_LOADI8_MEMBASE)
12463 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12465 return OP_AMD64_COMPARE_MEMBASE_REG;
12468 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12469 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12478 op_to_op_src2_membase (int load_opcode, int opcode)
12481 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12487 return OP_X86_COMPARE_REG_MEMBASE;
12489 return OP_X86_ADD_REG_MEMBASE;
12491 return OP_X86_SUB_REG_MEMBASE;
12493 return OP_X86_AND_REG_MEMBASE;
12495 return OP_X86_OR_REG_MEMBASE;
12497 return OP_X86_XOR_REG_MEMBASE;
12501 #ifdef TARGET_AMD64
12502 #ifdef __mono_ilp32__
12503 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12505 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12509 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12511 return OP_X86_ADD_REG_MEMBASE;
12513 return OP_X86_SUB_REG_MEMBASE;
12515 return OP_X86_AND_REG_MEMBASE;
12517 return OP_X86_OR_REG_MEMBASE;
12519 return OP_X86_XOR_REG_MEMBASE;
12521 #ifdef __mono_ilp32__
12522 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12524 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12529 return OP_AMD64_COMPARE_REG_MEMBASE;
12531 return OP_AMD64_ADD_REG_MEMBASE;
12533 return OP_AMD64_SUB_REG_MEMBASE;
12535 return OP_AMD64_AND_REG_MEMBASE;
12537 return OP_AMD64_OR_REG_MEMBASE;
12539 return OP_AMD64_XOR_REG_MEMBASE;
12548 mono_op_to_op_imm_noemul (int opcode)
12551 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12557 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12564 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12569 return mono_op_to_op_imm (opcode);
12574 * mono_handle_global_vregs:
12576 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12580 mono_handle_global_vregs (MonoCompile *cfg)
12582 gint32 *vreg_to_bb;
12583 MonoBasicBlock *bb;
12586 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12588 #ifdef MONO_ARCH_SIMD_INTRINSICS
12589 if (cfg->uses_simd_intrinsics)
12590 mono_simd_simplify_indirection (cfg);
12593 /* Find local vregs used in more than one bb */
12594 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12595 MonoInst *ins = bb->code;
12596 int block_num = bb->block_num;
12598 if (cfg->verbose_level > 2)
12599 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12602 for (; ins; ins = ins->next) {
12603 const char *spec = INS_INFO (ins->opcode);
12604 int regtype = 0, regindex;
12607 if (G_UNLIKELY (cfg->verbose_level > 2))
12608 mono_print_ins (ins);
12610 g_assert (ins->opcode >= MONO_CEE_LAST);
12612 for (regindex = 0; regindex < 4; regindex ++) {
12615 if (regindex == 0) {
12616 regtype = spec [MONO_INST_DEST];
12617 if (regtype == ' ')
12620 } else if (regindex == 1) {
12621 regtype = spec [MONO_INST_SRC1];
12622 if (regtype == ' ')
12625 } else if (regindex == 2) {
12626 regtype = spec [MONO_INST_SRC2];
12627 if (regtype == ' ')
12630 } else if (regindex == 3) {
12631 regtype = spec [MONO_INST_SRC3];
12632 if (regtype == ' ')
12637 #if SIZEOF_REGISTER == 4
12638 /* In the LLVM case, the long opcodes are not decomposed */
12639 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12641 * Since some instructions reference the original long vreg,
12642 * and some reference the two component vregs, it is quite hard
12643 * to determine when it needs to be global. So be conservative.
12645 if (!get_vreg_to_inst (cfg, vreg)) {
12646 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12648 if (cfg->verbose_level > 2)
12649 printf ("LONG VREG R%d made global.\n", vreg);
12653 * Make the component vregs volatile since the optimizations can
12654 * get confused otherwise.
12656 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12657 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12661 g_assert (vreg != -1);
12663 prev_bb = vreg_to_bb [vreg];
12664 if (prev_bb == 0) {
12665 /* 0 is a valid block num */
12666 vreg_to_bb [vreg] = block_num + 1;
12667 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12668 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12671 if (!get_vreg_to_inst (cfg, vreg)) {
12672 if (G_UNLIKELY (cfg->verbose_level > 2))
12673 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12677 if (vreg_is_ref (cfg, vreg))
12678 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12680 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12683 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12686 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12689 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12692 g_assert_not_reached ();
12696 /* Flag as having been used in more than one bb */
12697 vreg_to_bb [vreg] = -1;
12703 /* If a variable is used in only one bblock, convert it into a local vreg */
12704 for (i = 0; i < cfg->num_varinfo; i++) {
12705 MonoInst *var = cfg->varinfo [i];
12706 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12708 switch (var->type) {
12714 #if SIZEOF_REGISTER == 8
12717 #if !defined(TARGET_X86)
12718 /* Enabling this screws up the fp stack on x86 */
12721 if (mono_arch_is_soft_float ())
12724 /* Arguments are implicitly global */
12725 /* Putting R4 vars into registers doesn't work currently */
12726 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12727 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12729 * Make that the variable's liveness interval doesn't contain a call, since
12730 * that would cause the lvreg to be spilled, making the whole optimization
12733 /* This is too slow for JIT compilation */
12735 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12737 int def_index, call_index, ins_index;
12738 gboolean spilled = FALSE;
12743 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12744 const char *spec = INS_INFO (ins->opcode);
12746 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12747 def_index = ins_index;
12749 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12750 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12751 if (call_index > def_index) {
12757 if (MONO_IS_CALL (ins))
12758 call_index = ins_index;
12768 if (G_UNLIKELY (cfg->verbose_level > 2))
12769 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12770 var->flags |= MONO_INST_IS_DEAD;
12771 cfg->vreg_to_inst [var->dreg] = NULL;
12778 * Compress the varinfo and vars tables so the liveness computation is faster and
12779 * takes up less space.
12782 for (i = 0; i < cfg->num_varinfo; ++i) {
12783 MonoInst *var = cfg->varinfo [i];
12784 if (pos < i && cfg->locals_start == i)
12785 cfg->locals_start = pos;
12786 if (!(var->flags & MONO_INST_IS_DEAD)) {
12788 cfg->varinfo [pos] = cfg->varinfo [i];
12789 cfg->varinfo [pos]->inst_c0 = pos;
12790 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12791 cfg->vars [pos].idx = pos;
12792 #if SIZEOF_REGISTER == 4
12793 if (cfg->varinfo [pos]->type == STACK_I8) {
12794 /* Modify the two component vars too */
12797 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12798 var1->inst_c0 = pos;
12799 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12800 var1->inst_c0 = pos;
12807 cfg->num_varinfo = pos;
12808 if (cfg->locals_start > cfg->num_varinfo)
12809 cfg->locals_start = cfg->num_varinfo;
12813 * mono_spill_global_vars:
12815 * Generate spill code for variables which are not allocated to registers,
12816 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12817 * code is generated which could be optimized by the local optimization passes.
12820 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12822 MonoBasicBlock *bb;
12824 int orig_next_vreg;
12825 guint32 *vreg_to_lvreg;
12827 guint32 i, lvregs_len;
12828 gboolean dest_has_lvreg = FALSE;
12829 guint32 stacktypes [128];
12830 MonoInst **live_range_start, **live_range_end;
12831 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12832 int *gsharedvt_vreg_to_idx = NULL;
12834 *need_local_opts = FALSE;
12836 memset (spec2, 0, sizeof (spec2));
12838 /* FIXME: Move this function to mini.c */
12839 stacktypes ['i'] = STACK_PTR;
12840 stacktypes ['l'] = STACK_I8;
12841 stacktypes ['f'] = STACK_R8;
12842 #ifdef MONO_ARCH_SIMD_INTRINSICS
12843 stacktypes ['x'] = STACK_VTYPE;
12846 #if SIZEOF_REGISTER == 4
12847 /* Create MonoInsts for longs */
12848 for (i = 0; i < cfg->num_varinfo; i++) {
12849 MonoInst *ins = cfg->varinfo [i];
12851 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
12852 switch (ins->type) {
12857 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
12860 g_assert (ins->opcode == OP_REGOFFSET);
12862 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
12864 tree->opcode = OP_REGOFFSET;
12865 tree->inst_basereg = ins->inst_basereg;
12866 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
12868 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
12870 tree->opcode = OP_REGOFFSET;
12871 tree->inst_basereg = ins->inst_basereg;
12872 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
12882 if (cfg->compute_gc_maps) {
12883 /* registers need liveness info even for !non refs */
12884 for (i = 0; i < cfg->num_varinfo; i++) {
12885 MonoInst *ins = cfg->varinfo [i];
12887 if (ins->opcode == OP_REGVAR)
12888 ins->flags |= MONO_INST_GC_TRACK;
12892 if (cfg->gsharedvt) {
12893 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
12895 for (i = 0; i < cfg->num_varinfo; ++i) {
12896 MonoInst *ins = cfg->varinfo [i];
12899 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
12900 if (i >= cfg->locals_start) {
12902 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
12903 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
12904 ins->opcode = OP_GSHAREDVT_LOCAL;
12905 ins->inst_imm = idx;
12908 gsharedvt_vreg_to_idx [ins->dreg] = -1;
12909 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
12915 /* FIXME: widening and truncation */
12918 * As an optimization, when a variable allocated to the stack is first loaded into
12919 * an lvreg, we will remember the lvreg and use it the next time instead of loading
12920 * the variable again.
12922 orig_next_vreg = cfg->next_vreg;
12923 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
12924 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
12928 * These arrays contain the first and last instructions accessing a given
12930 * Since we emit bblocks in the same order we process them here, and we
12931 * don't split live ranges, these will precisely describe the live range of
12932 * the variable, i.e. the instruction range where a valid value can be found
12933 * in the variables location.
12934 * The live range is computed using the liveness info computed by the liveness pass.
12935 * We can't use vmv->range, since that is an abstract live range, and we need
12936 * one which is instruction precise.
12937 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
12939 /* FIXME: Only do this if debugging info is requested */
12940 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
12941 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
12942 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12943 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
12945 /* Add spill loads/stores */
12946 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12949 if (cfg->verbose_level > 2)
12950 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
12952 /* Clear vreg_to_lvreg array */
12953 for (i = 0; i < lvregs_len; i++)
12954 vreg_to_lvreg [lvregs [i]] = 0;
12958 MONO_BB_FOR_EACH_INS (bb, ins) {
12959 const char *spec = INS_INFO (ins->opcode);
12960 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
12961 gboolean store, no_lvreg;
12962 int sregs [MONO_MAX_SRC_REGS];
12964 if (G_UNLIKELY (cfg->verbose_level > 2))
12965 mono_print_ins (ins);
12967 if (ins->opcode == OP_NOP)
12971 * We handle LDADDR here as well, since it can only be decomposed
12972 * when variable addresses are known.
12974 if (ins->opcode == OP_LDADDR) {
12975 MonoInst *var = ins->inst_p0;
12977 if (var->opcode == OP_VTARG_ADDR) {
12978 /* Happens on SPARC/S390 where vtypes are passed by reference */
12979 MonoInst *vtaddr = var->inst_left;
12980 if (vtaddr->opcode == OP_REGVAR) {
12981 ins->opcode = OP_MOVE;
12982 ins->sreg1 = vtaddr->dreg;
12984 else if (var->inst_left->opcode == OP_REGOFFSET) {
12985 ins->opcode = OP_LOAD_MEMBASE;
12986 ins->inst_basereg = vtaddr->inst_basereg;
12987 ins->inst_offset = vtaddr->inst_offset;
12990 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
12991 /* gsharedvt arg passed by ref */
12992 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
12994 ins->opcode = OP_LOAD_MEMBASE;
12995 ins->inst_basereg = var->inst_basereg;
12996 ins->inst_offset = var->inst_offset;
12997 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
12998 MonoInst *load, *load2, *load3;
12999 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13000 int reg1, reg2, reg3;
13001 MonoInst *info_var = cfg->gsharedvt_info_var;
13002 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13006 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13009 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13011 g_assert (info_var);
13012 g_assert (locals_var);
13014 /* Mark the instruction used to compute the locals var as used */
13015 cfg->gsharedvt_locals_var_ins = NULL;
13017 /* Load the offset */
13018 if (info_var->opcode == OP_REGOFFSET) {
13019 reg1 = alloc_ireg (cfg);
13020 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13021 } else if (info_var->opcode == OP_REGVAR) {
13023 reg1 = info_var->dreg;
13025 g_assert_not_reached ();
13027 reg2 = alloc_ireg (cfg);
13028 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13029 /* Load the locals area address */
13030 reg3 = alloc_ireg (cfg);
13031 if (locals_var->opcode == OP_REGOFFSET) {
13032 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13033 } else if (locals_var->opcode == OP_REGVAR) {
13034 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13036 g_assert_not_reached ();
13038 /* Compute the address */
13039 ins->opcode = OP_PADD;
13043 mono_bblock_insert_before_ins (bb, ins, load3);
13044 mono_bblock_insert_before_ins (bb, load3, load2);
13046 mono_bblock_insert_before_ins (bb, load2, load);
13048 g_assert (var->opcode == OP_REGOFFSET);
13050 ins->opcode = OP_ADD_IMM;
13051 ins->sreg1 = var->inst_basereg;
13052 ins->inst_imm = var->inst_offset;
13055 *need_local_opts = TRUE;
13056 spec = INS_INFO (ins->opcode);
13059 if (ins->opcode < MONO_CEE_LAST) {
13060 mono_print_ins (ins);
13061 g_assert_not_reached ();
13065 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13069 if (MONO_IS_STORE_MEMBASE (ins)) {
13070 tmp_reg = ins->dreg;
13071 ins->dreg = ins->sreg2;
13072 ins->sreg2 = tmp_reg;
13075 spec2 [MONO_INST_DEST] = ' ';
13076 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13077 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13078 spec2 [MONO_INST_SRC3] = ' ';
13080 } else if (MONO_IS_STORE_MEMINDEX (ins))
13081 g_assert_not_reached ();
13086 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13087 printf ("\t %.3s %d", spec, ins->dreg);
13088 num_sregs = mono_inst_get_src_registers (ins, sregs);
13089 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13090 printf (" %d", sregs [srcindex]);
13097 regtype = spec [MONO_INST_DEST];
13098 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13101 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13102 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13103 MonoInst *store_ins;
13105 MonoInst *def_ins = ins;
13106 int dreg = ins->dreg; /* The original vreg */
13108 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13110 if (var->opcode == OP_REGVAR) {
13111 ins->dreg = var->dreg;
13112 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13114 * Instead of emitting a load+store, use a _membase opcode.
13116 g_assert (var->opcode == OP_REGOFFSET);
13117 if (ins->opcode == OP_MOVE) {
13121 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13122 ins->inst_basereg = var->inst_basereg;
13123 ins->inst_offset = var->inst_offset;
13126 spec = INS_INFO (ins->opcode);
13130 g_assert (var->opcode == OP_REGOFFSET);
13132 prev_dreg = ins->dreg;
13134 /* Invalidate any previous lvreg for this vreg */
13135 vreg_to_lvreg [ins->dreg] = 0;
13139 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13141 store_opcode = OP_STOREI8_MEMBASE_REG;
13144 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13146 #if SIZEOF_REGISTER != 8
13147 if (regtype == 'l') {
13148 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13149 mono_bblock_insert_after_ins (bb, ins, store_ins);
13150 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13151 mono_bblock_insert_after_ins (bb, ins, store_ins);
13152 def_ins = store_ins;
13157 g_assert (store_opcode != OP_STOREV_MEMBASE);
13159 /* Try to fuse the store into the instruction itself */
13160 /* FIXME: Add more instructions */
13161 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13162 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13163 ins->inst_imm = ins->inst_c0;
13164 ins->inst_destbasereg = var->inst_basereg;
13165 ins->inst_offset = var->inst_offset;
13166 spec = INS_INFO (ins->opcode);
13167 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13168 ins->opcode = store_opcode;
13169 ins->inst_destbasereg = var->inst_basereg;
13170 ins->inst_offset = var->inst_offset;
13174 tmp_reg = ins->dreg;
13175 ins->dreg = ins->sreg2;
13176 ins->sreg2 = tmp_reg;
13179 spec2 [MONO_INST_DEST] = ' ';
13180 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13181 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13182 spec2 [MONO_INST_SRC3] = ' ';
13184 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13185 // FIXME: The backends expect the base reg to be in inst_basereg
13186 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13188 ins->inst_basereg = var->inst_basereg;
13189 ins->inst_offset = var->inst_offset;
13190 spec = INS_INFO (ins->opcode);
13192 /* printf ("INS: "); mono_print_ins (ins); */
13193 /* Create a store instruction */
13194 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13196 /* Insert it after the instruction */
13197 mono_bblock_insert_after_ins (bb, ins, store_ins);
13199 def_ins = store_ins;
13202 * We can't assign ins->dreg to var->dreg here, since the
13203 * sregs could use it. So set a flag, and do it after
13206 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13207 dest_has_lvreg = TRUE;
13212 if (def_ins && !live_range_start [dreg]) {
13213 live_range_start [dreg] = def_ins;
13214 live_range_start_bb [dreg] = bb;
13217 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13220 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13221 tmp->inst_c1 = dreg;
13222 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13229 num_sregs = mono_inst_get_src_registers (ins, sregs);
13230 for (srcindex = 0; srcindex < 3; ++srcindex) {
13231 regtype = spec [MONO_INST_SRC1 + srcindex];
13232 sreg = sregs [srcindex];
13234 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13235 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13236 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13237 MonoInst *use_ins = ins;
13238 MonoInst *load_ins;
13239 guint32 load_opcode;
13241 if (var->opcode == OP_REGVAR) {
13242 sregs [srcindex] = var->dreg;
13243 //mono_inst_set_src_registers (ins, sregs);
13244 live_range_end [sreg] = use_ins;
13245 live_range_end_bb [sreg] = bb;
13247 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13250 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13251 /* var->dreg is a hreg */
13252 tmp->inst_c1 = sreg;
13253 mono_bblock_insert_after_ins (bb, ins, tmp);
13259 g_assert (var->opcode == OP_REGOFFSET);
13261 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13263 g_assert (load_opcode != OP_LOADV_MEMBASE);
13265 if (vreg_to_lvreg [sreg]) {
13266 g_assert (vreg_to_lvreg [sreg] != -1);
13268 /* The variable is already loaded to an lvreg */
13269 if (G_UNLIKELY (cfg->verbose_level > 2))
13270 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13271 sregs [srcindex] = vreg_to_lvreg [sreg];
13272 //mono_inst_set_src_registers (ins, sregs);
13276 /* Try to fuse the load into the instruction */
13277 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13278 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13279 sregs [0] = var->inst_basereg;
13280 //mono_inst_set_src_registers (ins, sregs);
13281 ins->inst_offset = var->inst_offset;
13282 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13283 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13284 sregs [1] = var->inst_basereg;
13285 //mono_inst_set_src_registers (ins, sregs);
13286 ins->inst_offset = var->inst_offset;
13288 if (MONO_IS_REAL_MOVE (ins)) {
13289 ins->opcode = OP_NOP;
13292 //printf ("%d ", srcindex); mono_print_ins (ins);
13294 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13296 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13297 if (var->dreg == prev_dreg) {
13299 * sreg refers to the value loaded by the load
13300 * emitted below, but we need to use ins->dreg
13301 * since it refers to the store emitted earlier.
13305 g_assert (sreg != -1);
13306 vreg_to_lvreg [var->dreg] = sreg;
13307 g_assert (lvregs_len < 1024);
13308 lvregs [lvregs_len ++] = var->dreg;
13312 sregs [srcindex] = sreg;
13313 //mono_inst_set_src_registers (ins, sregs);
13315 #if SIZEOF_REGISTER != 8
13316 if (regtype == 'l') {
13317 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13318 mono_bblock_insert_before_ins (bb, ins, load_ins);
13319 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13320 mono_bblock_insert_before_ins (bb, ins, load_ins);
13321 use_ins = load_ins;
13326 #if SIZEOF_REGISTER == 4
13327 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13329 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13330 mono_bblock_insert_before_ins (bb, ins, load_ins);
13331 use_ins = load_ins;
13335 if (var->dreg < orig_next_vreg) {
13336 live_range_end [var->dreg] = use_ins;
13337 live_range_end_bb [var->dreg] = bb;
13340 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13343 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13344 tmp->inst_c1 = var->dreg;
13345 mono_bblock_insert_after_ins (bb, ins, tmp);
13349 mono_inst_set_src_registers (ins, sregs);
13351 if (dest_has_lvreg) {
13352 g_assert (ins->dreg != -1);
13353 vreg_to_lvreg [prev_dreg] = ins->dreg;
13354 g_assert (lvregs_len < 1024);
13355 lvregs [lvregs_len ++] = prev_dreg;
13356 dest_has_lvreg = FALSE;
13360 tmp_reg = ins->dreg;
13361 ins->dreg = ins->sreg2;
13362 ins->sreg2 = tmp_reg;
13365 if (MONO_IS_CALL (ins)) {
13366 /* Clear vreg_to_lvreg array */
13367 for (i = 0; i < lvregs_len; i++)
13368 vreg_to_lvreg [lvregs [i]] = 0;
13370 } else if (ins->opcode == OP_NOP) {
13372 MONO_INST_NULLIFY_SREGS (ins);
13375 if (cfg->verbose_level > 2)
13376 mono_print_ins_index (1, ins);
13379 /* Extend the live range based on the liveness info */
13380 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13381 for (i = 0; i < cfg->num_varinfo; i ++) {
13382 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13384 if (vreg_is_volatile (cfg, vi->vreg))
13385 /* The liveness info is incomplete */
13388 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13389 /* Live from at least the first ins of this bb */
13390 live_range_start [vi->vreg] = bb->code;
13391 live_range_start_bb [vi->vreg] = bb;
13394 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13395 /* Live at least until the last ins of this bb */
13396 live_range_end [vi->vreg] = bb->last_ins;
13397 live_range_end_bb [vi->vreg] = bb;
13403 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13405 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13406 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13408 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13409 for (i = 0; i < cfg->num_varinfo; ++i) {
13410 int vreg = MONO_VARINFO (cfg, i)->vreg;
13413 if (live_range_start [vreg]) {
13414 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13416 ins->inst_c1 = vreg;
13417 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13419 if (live_range_end [vreg]) {
13420 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13422 ins->inst_c1 = vreg;
13423 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13424 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13426 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13432 if (cfg->gsharedvt_locals_var_ins) {
13433 /* Nullify if unused */
13434 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13435 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13438 g_free (live_range_start);
13439 g_free (live_range_end);
13440 g_free (live_range_start_bb);
13441 g_free (live_range_end_bb);
13446 * - use 'iadd' instead of 'int_add'
13447 * - handling ovf opcodes: decompose in method_to_ir.
13448 * - unify iregs/fregs
13449 * -> partly done, the missing parts are:
13450 * - a more complete unification would involve unifying the hregs as well, so
13451 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13452 * would no longer map to the machine hregs, so the code generators would need to
13453 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13454 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13455 * fp/non-fp branches speeds it up by about 15%.
13456 * - use sext/zext opcodes instead of shifts
13458 * - get rid of TEMPLOADs if possible and use vregs instead
13459 * - clean up usage of OP_P/OP_ opcodes
13460 * - cleanup usage of DUMMY_USE
13461 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13463 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13464 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13465 * - make sure handle_stack_args () is called before the branch is emitted
13466 * - when the new IR is done, get rid of all unused stuff
13467 * - COMPARE/BEQ as separate instructions or unify them ?
13468 * - keeping them separate allows specialized compare instructions like
13469 * compare_imm, compare_membase
13470 * - most back ends unify fp compare+branch, fp compare+ceq
13471 * - integrate mono_save_args into inline_method
13472 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13473 * - handle long shift opts on 32 bit platforms somehow: they require
13474 * 3 sregs (2 for arg1 and 1 for arg2)
13475 * - make byref a 'normal' type.
13476 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13477 * variable if needed.
13478 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13479 * like inline_method.
13480 * - remove inlining restrictions
13481 * - fix LNEG and enable cfold of INEG
13482 * - generalize x86 optimizations like ldelema as a peephole optimization
13483 * - add store_mem_imm for amd64
13484 * - optimize the loading of the interruption flag in the managed->native wrappers
13485 * - avoid special handling of OP_NOP in passes
13486 * - move code inserting instructions into one function/macro.
13487 * - try a coalescing phase after liveness analysis
13488 * - add float -> vreg conversion + local optimizations on !x86
13489 * - figure out how to handle decomposed branches during optimizations, ie.
13490 * compare+branch, op_jump_table+op_br etc.
13491 * - promote RuntimeXHandles to vregs
13492 * - vtype cleanups:
13493 * - add a NEW_VARLOADA_VREG macro
13494 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13495 * accessing vtype fields.
13496 * - get rid of I8CONST on 64 bit platforms
13497 * - dealing with the increase in code size due to branches created during opcode
13499 * - use extended basic blocks
13500 * - all parts of the JIT
13501 * - handle_global_vregs () && local regalloc
13502 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13503 * - sources of increase in code size:
13506 * - isinst and castclass
13507 * - lvregs not allocated to global registers even if used multiple times
13508 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13510 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13511 * - add all micro optimizations from the old JIT
13512 * - put tree optimizations into the deadce pass
13513 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13514 * specific function.
13515 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13516 * fcompare + branchCC.
13517 * - create a helper function for allocating a stack slot, taking into account
13518 * MONO_CFG_HAS_SPILLUP.
13520 * - merge the ia64 switch changes.
13521 * - optimize mono_regstate2_alloc_int/float.
13522 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13523 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13524 * parts of the tree could be separated by other instructions, killing the tree
13525 * arguments, or stores killing loads etc. Also, should we fold loads into other
13526 * instructions if the result of the load is used multiple times ?
13527 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13528 * - LAST MERGE: 108395.
13529 * - when returning vtypes in registers, generate IR and append it to the end of the
13530 * last bb instead of doing it in the epilog.
13531 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13539 - When to decompose opcodes:
13540 - earlier: this makes some optimizations hard to implement, since the low level IR
13541 no longer contains the neccessary information. But it is easier to do.
13542 - later: harder to implement, enables more optimizations.
13543 - Branches inside bblocks:
13544 - created when decomposing complex opcodes.
13545 - branches to another bblock: harmless, but not tracked by the branch
13546 optimizations, so need to branch to a label at the start of the bblock.
13547 - branches to inside the same bblock: very problematic, trips up the local
13548 reg allocator. Can be fixed by spitting the current bblock, but that is a
13549 complex operation, since some local vregs can become global vregs etc.
13550 - Local/global vregs:
13551 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13552 local register allocator.
13553 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13554 structure, created by mono_create_var (). Assigned to hregs or the stack by
13555 the global register allocator.
13556 - When to do optimizations like alu->alu_imm:
13557 - earlier -> saves work later on since the IR will be smaller/simpler
13558 - later -> can work on more instructions
13559 - Handling of valuetypes:
13560 - When a vtype is pushed on the stack, a new temporary is created, an
13561 instruction computing its address (LDADDR) is emitted and pushed on
13562 the stack. Need to optimize cases when the vtype is used immediately as in
13563 argument passing, stloc etc.
13564 - Instead of the to_end stuff in the old JIT, simply call the function handling
13565 the values on the stack before emitting the last instruction of the bb.
13568 #endif /* DISABLE_JIT */