2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
371 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
372 * foo<T> (int i) { ldarg.0; box T; }
374 #define UNVERIFIED do { \
375 if (cfg->gsharedvt) { \
376 if (cfg->verbose_level > 2) \
377 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
379 goto exception_exit; \
381 if (mini_get_debug_options ()->break_on_unverified) \
387 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
389 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
391 #define GET_BBLOCK(cfg,tblock,ip) do { \
392 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
394 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
395 NEW_BBLOCK (cfg, (tblock)); \
396 (tblock)->cil_code = (ip); \
397 ADD_BBLOCK (cfg, (tblock)); \
401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
402 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
403 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
404 (dest)->dreg = alloc_ireg_mp ((cfg)); \
405 (dest)->sreg1 = (sr1); \
406 (dest)->sreg2 = (sr2); \
407 (dest)->inst_imm = (imm); \
408 (dest)->backend.shift_amount = (shift); \
409 MONO_ADD_INS ((cfg)->cbb, (dest)); \
413 #if SIZEOF_REGISTER == 8
414 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
415 /* FIXME: Need to add many more cases */ \
416 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
418 int dr = alloc_preg (cfg); \
419 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
420 (ins)->sreg2 = widen->dreg; \
424 #define ADD_WIDEN_OP(ins, arg1, arg2)
427 #define ADD_BINOP(op) do { \
428 MONO_INST_NEW (cfg, ins, (op)); \
430 ins->sreg1 = sp [0]->dreg; \
431 ins->sreg2 = sp [1]->dreg; \
432 type_from_op (ins, sp [0], sp [1]); \
434 /* Have to insert a widening op */ \
435 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
436 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
437 MONO_ADD_INS ((cfg)->cbb, (ins)); \
438 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
441 #define ADD_UNOP(op) do { \
442 MONO_INST_NEW (cfg, ins, (op)); \
444 ins->sreg1 = sp [0]->dreg; \
445 type_from_op (ins, sp [0], NULL); \
447 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
448 MONO_ADD_INS ((cfg)->cbb, (ins)); \
449 *sp++ = mono_decompose_opcode (cfg, ins); \
452 #define ADD_BINCOND(next_block) do { \
455 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
456 cmp->sreg1 = sp [0]->dreg; \
457 cmp->sreg2 = sp [1]->dreg; \
458 type_from_op (cmp, sp [0], sp [1]); \
460 type_from_op (ins, sp [0], sp [1]); \
461 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
462 GET_BBLOCK (cfg, tblock, target); \
463 link_bblock (cfg, bblock, tblock); \
464 ins->inst_true_bb = tblock; \
465 if ((next_block)) { \
466 link_bblock (cfg, bblock, (next_block)); \
467 ins->inst_false_bb = (next_block); \
468 start_new_bblock = 1; \
470 GET_BBLOCK (cfg, tblock, ip); \
471 link_bblock (cfg, bblock, tblock); \
472 ins->inst_false_bb = tblock; \
473 start_new_bblock = 2; \
475 if (sp != stack_start) { \
476 handle_stack_args (cfg, stack_start, sp - stack_start); \
477 CHECK_UNVERIFIABLE (cfg); \
479 MONO_ADD_INS (bblock, cmp); \
480 MONO_ADD_INS (bblock, ins); \
484 * link_bblock: Links two basic blocks
486 * links two basic blocks in the control flow graph, the 'from'
487 * argument is the starting block and the 'to' argument is the block
488 * the control flow ends to after 'from'.
491 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
493 MonoBasicBlock **newa;
497 if (from->cil_code) {
499 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
501 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
504 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
506 printf ("edge from entry to exit\n");
511 for (i = 0; i < from->out_count; ++i) {
512 if (to == from->out_bb [i]) {
518 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
519 for (i = 0; i < from->out_count; ++i) {
520 newa [i] = from->out_bb [i];
528 for (i = 0; i < to->in_count; ++i) {
529 if (from == to->in_bb [i]) {
535 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
536 for (i = 0; i < to->in_count; ++i) {
537 newa [i] = to->in_bb [i];
546 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
548 link_bblock (cfg, from, to);
552 * mono_find_block_region:
554 * We mark each basic block with a region ID. We use that to avoid BB
555 * optimizations when blocks are in different regions.
558 * A region token that encodes where this region is, and information
559 * about the clause owner for this block.
561 * The region encodes the try/catch/filter clause that owns this block
562 * as well as the type. -1 is a special value that represents a block
563 * that is in none of try/catch/filter.
566 mono_find_block_region (MonoCompile *cfg, int offset)
568 MonoMethodHeader *header = cfg->header;
569 MonoExceptionClause *clause;
572 for (i = 0; i < header->num_clauses; ++i) {
573 clause = &header->clauses [i];
574 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
575 (offset < (clause->handler_offset)))
576 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
578 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
579 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
580 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
581 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
582 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
584 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
587 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
588 return ((i + 1) << 8) | clause->flags;
595 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
597 MonoMethodHeader *header = cfg->header;
598 MonoExceptionClause *clause;
602 for (i = 0; i < header->num_clauses; ++i) {
603 clause = &header->clauses [i];
604 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
605 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
606 if (clause->flags == type)
607 res = g_list_append (res, clause);
614 mono_create_spvar_for_region (MonoCompile *cfg, int region)
618 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
622 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
623 /* prevent it from being register allocated */
624 var->flags |= MONO_INST_VOLATILE;
626 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
630 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
632 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
636 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
640 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
644 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
645 /* prevent it from being register allocated */
646 var->flags |= MONO_INST_VOLATILE;
648 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
654 * Returns the type used in the eval stack when @type is loaded.
655 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
658 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
662 type = mini_replace_type (type);
663 inst->klass = klass = mono_class_from_mono_type (type);
665 inst->type = STACK_MP;
670 switch (type->type) {
672 inst->type = STACK_INV;
676 case MONO_TYPE_BOOLEAN:
682 inst->type = STACK_I4;
687 case MONO_TYPE_FNPTR:
688 inst->type = STACK_PTR;
690 case MONO_TYPE_CLASS:
691 case MONO_TYPE_STRING:
692 case MONO_TYPE_OBJECT:
693 case MONO_TYPE_SZARRAY:
694 case MONO_TYPE_ARRAY:
695 inst->type = STACK_OBJ;
699 inst->type = STACK_I8;
703 inst->type = STACK_R8;
705 case MONO_TYPE_VALUETYPE:
706 if (type->data.klass->enumtype) {
707 type = mono_class_enum_basetype (type->data.klass);
711 inst->type = STACK_VTYPE;
714 case MONO_TYPE_TYPEDBYREF:
715 inst->klass = mono_defaults.typed_reference_class;
716 inst->type = STACK_VTYPE;
718 case MONO_TYPE_GENERICINST:
719 type = &type->data.generic_class->container_class->byval_arg;
723 g_assert (cfg->generic_sharing_context);
724 if (mini_is_gsharedvt_type (cfg, type)) {
725 g_assert (cfg->gsharedvt);
726 inst->type = STACK_VTYPE;
728 inst->type = STACK_OBJ;
732 g_error ("unknown type 0x%02x in eval stack type", type->type);
737 * The following tables are used to quickly validate the IL code in type_from_op ().
740 bin_num_table [STACK_MAX] [STACK_MAX] = {
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
753 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
756 /* reduce the size of this table */
758 bin_int_table [STACK_MAX] [STACK_MAX] = {
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
761 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
762 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
763 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
764 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
765 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
766 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
770 bin_comp_table [STACK_MAX] [STACK_MAX] = {
771 /* Inv i L p F & O vt */
773 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
774 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
775 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
776 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
777 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
778 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
779 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
782 /* reduce the size of this table */
784 shift_table [STACK_MAX] [STACK_MAX] = {
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
787 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
788 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
789 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
796 * Tables to map from the non-specific opcode to the matching
797 * type-specific opcode.
799 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
801 binops_op_map [STACK_MAX] = {
802 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
805 /* handles from CEE_NEG to CEE_CONV_U8 */
807 unops_op_map [STACK_MAX] = {
808 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
811 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
813 ovfops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
817 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
819 ovf2ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
823 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
825 ovf3ops_op_map [STACK_MAX] = {
826 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
829 /* handles from CEE_BEQ to CEE_BLT_UN */
831 beqops_op_map [STACK_MAX] = {
832 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
835 /* handles from CEE_CEQ to CEE_CLT_UN */
837 ceqops_op_map [STACK_MAX] = {
838 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
842 * Sets ins->type (the type on the eval stack) according to the
843 * type of the opcode and the arguments to it.
844 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
846 * FIXME: this function sets ins->type unconditionally in some cases, but
847 * it should set it to invalid for some types (a conv.x on an object)
850 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
852 switch (ins->opcode) {
859 /* FIXME: check unverifiable args for STACK_MP */
860 ins->type = bin_num_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
868 ins->type = bin_int_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = shift_table [src1->type] [src2->type];
875 ins->opcode += binops_op_map [ins->type];
880 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
881 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
882 ins->opcode = OP_LCOMPARE;
883 else if (src1->type == STACK_R8)
884 ins->opcode = OP_FCOMPARE;
886 ins->opcode = OP_ICOMPARE;
888 case OP_ICOMPARE_IMM:
889 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
890 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
891 ins->opcode = OP_LCOMPARE_IMM;
903 ins->opcode += beqops_op_map [src1->type];
906 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
907 ins->opcode += ceqops_op_map [src1->type];
913 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
914 ins->opcode += ceqops_op_map [src1->type];
918 ins->type = neg_table [src1->type];
919 ins->opcode += unops_op_map [ins->type];
922 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
923 ins->type = src1->type;
925 ins->type = STACK_INV;
926 ins->opcode += unops_op_map [ins->type];
932 ins->type = STACK_I4;
933 ins->opcode += unops_op_map [src1->type];
936 ins->type = STACK_R8;
937 switch (src1->type) {
940 ins->opcode = OP_ICONV_TO_R_UN;
943 ins->opcode = OP_LCONV_TO_R_UN;
947 case CEE_CONV_OVF_I1:
948 case CEE_CONV_OVF_U1:
949 case CEE_CONV_OVF_I2:
950 case CEE_CONV_OVF_U2:
951 case CEE_CONV_OVF_I4:
952 case CEE_CONV_OVF_U4:
953 ins->type = STACK_I4;
954 ins->opcode += ovf3ops_op_map [src1->type];
956 case CEE_CONV_OVF_I_UN:
957 case CEE_CONV_OVF_U_UN:
958 ins->type = STACK_PTR;
959 ins->opcode += ovf2ops_op_map [src1->type];
961 case CEE_CONV_OVF_I1_UN:
962 case CEE_CONV_OVF_I2_UN:
963 case CEE_CONV_OVF_I4_UN:
964 case CEE_CONV_OVF_U1_UN:
965 case CEE_CONV_OVF_U2_UN:
966 case CEE_CONV_OVF_U4_UN:
967 ins->type = STACK_I4;
968 ins->opcode += ovf2ops_op_map [src1->type];
971 ins->type = STACK_PTR;
972 switch (src1->type) {
974 ins->opcode = OP_ICONV_TO_U;
978 #if SIZEOF_VOID_P == 8
979 ins->opcode = OP_LCONV_TO_U;
981 ins->opcode = OP_MOVE;
985 ins->opcode = OP_LCONV_TO_U;
988 ins->opcode = OP_FCONV_TO_U;
994 ins->type = STACK_I8;
995 ins->opcode += unops_op_map [src1->type];
997 case CEE_CONV_OVF_I8:
998 case CEE_CONV_OVF_U8:
999 ins->type = STACK_I8;
1000 ins->opcode += ovf3ops_op_map [src1->type];
1002 case CEE_CONV_OVF_U8_UN:
1003 case CEE_CONV_OVF_I8_UN:
1004 ins->type = STACK_I8;
1005 ins->opcode += ovf2ops_op_map [src1->type];
1009 ins->type = STACK_R8;
1010 ins->opcode += unops_op_map [src1->type];
1013 ins->type = STACK_R8;
1017 ins->type = STACK_I4;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_CONV_OVF_I:
1022 case CEE_CONV_OVF_U:
1023 ins->type = STACK_PTR;
1024 ins->opcode += ovfops_op_map [src1->type];
1027 case CEE_ADD_OVF_UN:
1029 case CEE_MUL_OVF_UN:
1031 case CEE_SUB_OVF_UN:
1032 ins->type = bin_num_table [src1->type] [src2->type];
1033 ins->opcode += ovfops_op_map [src1->type];
1034 if (ins->type == STACK_R8)
1035 ins->type = STACK_INV;
1037 case OP_LOAD_MEMBASE:
1038 ins->type = STACK_PTR;
1040 case OP_LOADI1_MEMBASE:
1041 case OP_LOADU1_MEMBASE:
1042 case OP_LOADI2_MEMBASE:
1043 case OP_LOADU2_MEMBASE:
1044 case OP_LOADI4_MEMBASE:
1045 case OP_LOADU4_MEMBASE:
1046 ins->type = STACK_PTR;
1048 case OP_LOADI8_MEMBASE:
1049 ins->type = STACK_I8;
1051 case OP_LOADR4_MEMBASE:
1052 case OP_LOADR8_MEMBASE:
1053 ins->type = STACK_R8;
1056 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1060 if (ins->type == STACK_MP)
1061 ins->klass = mono_defaults.object_class;
1066 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1072 param_table [STACK_MAX] [STACK_MAX] = {
1077 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1081 switch (args->type) {
1091 for (i = 0; i < sig->param_count; ++i) {
1092 switch (args [i].type) {
1096 if (!sig->params [i]->byref)
1100 if (sig->params [i]->byref)
1102 switch (sig->params [i]->type) {
1103 case MONO_TYPE_CLASS:
1104 case MONO_TYPE_STRING:
1105 case MONO_TYPE_OBJECT:
1106 case MONO_TYPE_SZARRAY:
1107 case MONO_TYPE_ARRAY:
1114 if (sig->params [i]->byref)
1116 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1125 /*if (!param_table [args [i].type] [sig->params [i]->type])
1133 * When we need a pointer to the current domain many times in a method, we
1134 * call mono_domain_get() once and we store the result in a local variable.
1135 * This function returns the variable that represents the MonoDomain*.
1137 inline static MonoInst *
1138 mono_get_domainvar (MonoCompile *cfg)
1140 if (!cfg->domainvar)
1141 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1142 return cfg->domainvar;
1146 * The got_var contains the address of the Global Offset Table when AOT
1150 mono_get_got_var (MonoCompile *cfg)
1152 #ifdef MONO_ARCH_NEED_GOT_VAR
1153 if (!cfg->compile_aot)
1155 if (!cfg->got_var) {
1156 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1158 return cfg->got_var;
1165 mono_get_vtable_var (MonoCompile *cfg)
1167 g_assert (cfg->generic_sharing_context);
1169 if (!cfg->rgctx_var) {
1170 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1171 /* force the var to be stack allocated */
1172 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1175 return cfg->rgctx_var;
1179 type_from_stack_type (MonoInst *ins) {
1180 switch (ins->type) {
1181 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1182 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1183 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1184 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1186 return &ins->klass->this_arg;
1187 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1188 case STACK_VTYPE: return &ins->klass->byval_arg;
1190 g_error ("stack type %d to monotype not handled\n", ins->type);
1195 static G_GNUC_UNUSED int
1196 type_to_stack_type (MonoType *t)
1198 t = mono_type_get_underlying_type (t);
1202 case MONO_TYPE_BOOLEAN:
1205 case MONO_TYPE_CHAR:
1212 case MONO_TYPE_FNPTR:
1214 case MONO_TYPE_CLASS:
1215 case MONO_TYPE_STRING:
1216 case MONO_TYPE_OBJECT:
1217 case MONO_TYPE_SZARRAY:
1218 case MONO_TYPE_ARRAY:
1226 case MONO_TYPE_VALUETYPE:
1227 case MONO_TYPE_TYPEDBYREF:
1229 case MONO_TYPE_GENERICINST:
1230 if (mono_type_generic_inst_is_valuetype (t))
1236 g_assert_not_reached ();
1243 array_access_to_klass (int opcode)
1247 return mono_defaults.byte_class;
1249 return mono_defaults.uint16_class;
1252 return mono_defaults.int_class;
1255 return mono_defaults.sbyte_class;
1258 return mono_defaults.int16_class;
1261 return mono_defaults.int32_class;
1263 return mono_defaults.uint32_class;
1266 return mono_defaults.int64_class;
1269 return mono_defaults.single_class;
1272 return mono_defaults.double_class;
1273 case CEE_LDELEM_REF:
1274 case CEE_STELEM_REF:
1275 return mono_defaults.object_class;
1277 g_assert_not_reached ();
1283 * We try to share variables when possible
1286 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1291 /* inlining can result in deeper stacks */
1292 if (slot >= cfg->header->max_stack)
1293 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1295 pos = ins->type - 1 + slot * STACK_MAX;
1297 switch (ins->type) {
1304 if ((vnum = cfg->intvars [pos]))
1305 return cfg->varinfo [vnum];
1306 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1307 cfg->intvars [pos] = res->inst_c0;
1310 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1316 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1319 * Don't use this if a generic_context is set, since that means AOT can't
1320 * look up the method using just the image+token.
1321 * table == 0 means this is a reference made from a wrapper.
1323 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1324 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1325 jump_info_token->image = image;
1326 jump_info_token->token = token;
1327 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1332 * This function is called to handle items that are left on the evaluation stack
1333 * at basic block boundaries. What happens is that we save the values to local variables
1334 * and we reload them later when first entering the target basic block (with the
1335 * handle_loaded_temps () function).
1336 * A single joint point will use the same variables (stored in the array bb->out_stack or
1337 * bb->in_stack, if the basic block is before or after the joint point).
1339 * This function needs to be called _before_ emitting the last instruction of
1340 * the bb (i.e. before emitting a branch).
1341 * If the stack merge fails at a join point, cfg->unverifiable is set.
1344 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1347 MonoBasicBlock *bb = cfg->cbb;
1348 MonoBasicBlock *outb;
1349 MonoInst *inst, **locals;
1354 if (cfg->verbose_level > 3)
1355 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1356 if (!bb->out_scount) {
1357 bb->out_scount = count;
1358 //printf ("bblock %d has out:", bb->block_num);
1360 for (i = 0; i < bb->out_count; ++i) {
1361 outb = bb->out_bb [i];
1362 /* exception handlers are linked, but they should not be considered for stack args */
1363 if (outb->flags & BB_EXCEPTION_HANDLER)
1365 //printf (" %d", outb->block_num);
1366 if (outb->in_stack) {
1368 bb->out_stack = outb->in_stack;
1374 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1375 for (i = 0; i < count; ++i) {
1377 * try to reuse temps already allocated for this purpouse, if they occupy the same
1378 * stack slot and if they are of the same type.
1379 * This won't cause conflicts since if 'local' is used to
1380 * store one of the values in the in_stack of a bblock, then
1381 * the same variable will be used for the same outgoing stack
1383 * This doesn't work when inlining methods, since the bblocks
1384 * in the inlined methods do not inherit their in_stack from
1385 * the bblock they are inlined to. See bug #58863 for an
1388 if (cfg->inlined_method)
1389 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1391 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1396 for (i = 0; i < bb->out_count; ++i) {
1397 outb = bb->out_bb [i];
1398 /* exception handlers are linked, but they should not be considered for stack args */
1399 if (outb->flags & BB_EXCEPTION_HANDLER)
1401 if (outb->in_scount) {
1402 if (outb->in_scount != bb->out_scount) {
1403 cfg->unverifiable = TRUE;
1406 continue; /* check they are the same locals */
1408 outb->in_scount = count;
1409 outb->in_stack = bb->out_stack;
1412 locals = bb->out_stack;
1414 for (i = 0; i < count; ++i) {
1415 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1416 inst->cil_code = sp [i]->cil_code;
1417 sp [i] = locals [i];
1418 if (cfg->verbose_level > 3)
1419 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1423 * It is possible that the out bblocks already have in_stack assigned, and
1424 * the in_stacks differ. In this case, we will store to all the different
1431 /* Find a bblock which has a different in_stack */
1433 while (bindex < bb->out_count) {
1434 outb = bb->out_bb [bindex];
1435 /* exception handlers are linked, but they should not be considered for stack args */
1436 if (outb->flags & BB_EXCEPTION_HANDLER) {
1440 if (outb->in_stack != locals) {
1441 for (i = 0; i < count; ++i) {
1442 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1443 inst->cil_code = sp [i]->cil_code;
1444 sp [i] = locals [i];
1445 if (cfg->verbose_level > 3)
1446 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1448 locals = outb->in_stack;
1457 /* Emit code which loads interface_offsets [klass->interface_id]
1458 * The array is stored in memory before vtable.
1461 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1463 if (cfg->compile_aot) {
1464 int ioffset_reg = alloc_preg (cfg);
1465 int iid_reg = alloc_preg (cfg);
1467 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1477 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1479 int ibitmap_reg = alloc_preg (cfg);
1480 #ifdef COMPRESSED_INTERFACE_BITMAP
1482 MonoInst *res, *ins;
1483 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1484 MONO_ADD_INS (cfg->cbb, ins);
1486 if (cfg->compile_aot)
1487 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1489 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1490 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1491 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1493 int ibitmap_byte_reg = alloc_preg (cfg);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1497 if (cfg->compile_aot) {
1498 int iid_reg = alloc_preg (cfg);
1499 int shifted_iid_reg = alloc_preg (cfg);
1500 int ibitmap_byte_address_reg = alloc_preg (cfg);
1501 int masked_iid_reg = alloc_preg (cfg);
1502 int iid_one_bit_reg = alloc_preg (cfg);
1503 int iid_bit_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1506 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1509 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1510 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1511 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1520 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1521 * stored in "klass_reg" implements the interface "klass".
1524 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1526 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1530 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1531 * stored in "vtable_reg" implements the interface "klass".
1534 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1536 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1540 * Emit code which checks whenever the interface id of @klass is smaller than
1541 * than the value given by max_iid_reg.
1544 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1545 MonoBasicBlock *false_target)
1547 if (cfg->compile_aot) {
1548 int iid_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1557 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1560 /* Same as above, but obtains max_iid from a vtable */
1562 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1563 MonoBasicBlock *false_target)
1565 int max_iid_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1568 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1571 /* Same as above, but obtains max_iid from a klass */
1573 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1574 MonoBasicBlock *false_target)
1576 int max_iid_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1579 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1583 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1585 int idepth_reg = alloc_preg (cfg);
1586 int stypes_reg = alloc_preg (cfg);
1587 int stype = alloc_preg (cfg);
1589 mono_class_setup_supertypes (klass);
1591 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1592 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1600 } else if (cfg->compile_aot) {
1601 int const_reg = alloc_preg (cfg);
1602 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1611 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1617 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1619 int intf_reg = alloc_preg (cfg);
1621 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1622 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1627 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1631 * Variant of the above that takes a register to the class, not the vtable.
1634 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1636 int intf_bit_reg = alloc_preg (cfg);
1638 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1639 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1644 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1648 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1652 } else if (cfg->compile_aot) {
1653 int const_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1655 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1659 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1663 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1665 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1669 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1671 if (cfg->compile_aot) {
1672 int const_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1682 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1685 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1688 int rank_reg = alloc_preg (cfg);
1689 int eclass_reg = alloc_preg (cfg);
1691 g_assert (!klass_inst);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1694 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1695 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1697 if (klass->cast_class == mono_defaults.object_class) {
1698 int parent_reg = alloc_preg (cfg);
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1700 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1701 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1702 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1703 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1704 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1705 } else if (klass->cast_class == mono_defaults.enum_class) {
1706 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1707 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1708 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1710 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1711 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1714 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1715 /* Check that the object is a vector too */
1716 int bounds_reg = alloc_preg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1719 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1722 int idepth_reg = alloc_preg (cfg);
1723 int stypes_reg = alloc_preg (cfg);
1724 int stype = alloc_preg (cfg);
1726 mono_class_setup_supertypes (klass);
1728 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1731 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1735 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1740 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1742 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1746 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1750 g_assert (val == 0);
1755 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1761 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1764 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1766 #if SIZEOF_REGISTER == 8
1768 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1774 val_reg = alloc_preg (cfg);
1776 if (SIZEOF_REGISTER == 8)
1777 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1779 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1782 /* This could be optimized further if neccesary */
1784 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1791 #if !NO_UNALIGNED_ACCESS
1792 if (SIZEOF_REGISTER == 8) {
1794 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1824 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1831 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1832 g_assert (size < 10000);
1835 /* This could be optimized further if neccesary */
1837 cur_reg = alloc_preg (cfg);
1838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1846 #if !NO_UNALIGNED_ACCESS
1847 if (SIZEOF_REGISTER == 8) {
1849 cur_reg = alloc_preg (cfg);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1860 cur_reg = alloc_preg (cfg);
1861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1868 cur_reg = alloc_preg (cfg);
1869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1876 cur_reg = alloc_preg (cfg);
1877 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1886 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1890 if (cfg->compile_aot) {
1891 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1892 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1894 ins->sreg2 = c->dreg;
1895 MONO_ADD_INS (cfg->cbb, ins);
1897 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1899 ins->inst_offset = mini_get_tls_offset (tls_key);
1900 MONO_ADD_INS (cfg->cbb, ins);
1907 * Emit IR to push the current LMF onto the LMF stack.
1910 emit_push_lmf (MonoCompile *cfg)
1913 * Emit IR to push the LMF:
1914 * lmf_addr = <lmf_addr from tls>
1915 * lmf->lmf_addr = lmf_addr
1916 * lmf->prev_lmf = *lmf_addr
1919 int lmf_reg, prev_lmf_reg;
1920 MonoInst *ins, *lmf_ins;
1925 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1926 /* Load current lmf */
1927 lmf_ins = mono_get_lmf_intrinsic (cfg);
1929 MONO_ADD_INS (cfg->cbb, lmf_ins);
1930 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1931 lmf_reg = ins->dreg;
1932 /* Save previous_lmf */
1933 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1935 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1938 * Store lmf_addr in a variable, so it can be allocated to a global register.
1940 if (!cfg->lmf_addr_var)
1941 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1944 ins = mono_get_jit_tls_intrinsic (cfg);
1946 int jit_tls_dreg = ins->dreg;
1948 MONO_ADD_INS (cfg->cbb, ins);
1949 lmf_reg = alloc_preg (cfg);
1950 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1952 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1957 MONO_ADD_INS (cfg->cbb, lmf_ins);
1960 MonoInst *args [16], *jit_tls_ins, *ins;
1962 /* Inline mono_get_lmf_addr () */
1963 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
1965 /* Load mono_jit_tls_id */
1966 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
1967 /* call pthread_getspecific () */
1968 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
1969 /* lmf_addr = &jit_tls->lmf */
1970 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1973 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1977 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1982 prev_lmf_reg = alloc_preg (cfg);
1983 /* Save previous_lmf */
1984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1985 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1987 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1994 * Emit IR to pop the current LMF from the LMF stack.
1997 emit_pop_lmf (MonoCompile *cfg)
1999 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2005 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2006 lmf_reg = ins->dreg;
2008 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2009 /* Load previous_lmf */
2010 prev_lmf_reg = alloc_preg (cfg);
2011 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2013 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2016 * Emit IR to pop the LMF:
2017 * *(lmf->lmf_addr) = lmf->prev_lmf
2019 /* This could be called before emit_push_lmf () */
2020 if (!cfg->lmf_addr_var)
2021 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2022 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2024 prev_lmf_reg = alloc_preg (cfg);
2025 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2026 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2031 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2034 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2037 type = mini_get_basic_type_from_generic (gsctx, type);
2038 type = mini_replace_type (type);
2039 switch (type->type) {
2040 case MONO_TYPE_VOID:
2041 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2044 case MONO_TYPE_BOOLEAN:
2047 case MONO_TYPE_CHAR:
2050 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2054 case MONO_TYPE_FNPTR:
2055 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2056 case MONO_TYPE_CLASS:
2057 case MONO_TYPE_STRING:
2058 case MONO_TYPE_OBJECT:
2059 case MONO_TYPE_SZARRAY:
2060 case MONO_TYPE_ARRAY:
2061 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2064 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2067 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2068 case MONO_TYPE_VALUETYPE:
2069 if (type->data.klass->enumtype) {
2070 type = mono_class_enum_basetype (type->data.klass);
2073 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2074 case MONO_TYPE_TYPEDBYREF:
2075 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2076 case MONO_TYPE_GENERICINST:
2077 type = &type->data.generic_class->container_class->byval_arg;
2080 case MONO_TYPE_MVAR:
2082 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2084 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2090 * target_type_is_incompatible:
2091 * @cfg: MonoCompile context
2093 * Check that the item @arg on the evaluation stack can be stored
2094 * in the target type (can be a local, or field, etc).
2095 * The cfg arg can be used to check if we need verification or just
2098 * Returns: non-0 value if arg can't be stored on a target.
2101 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2103 MonoType *simple_type;
2106 target = mini_replace_type (target);
2107 if (target->byref) {
2108 /* FIXME: check that the pointed to types match */
2109 if (arg->type == STACK_MP)
2110 return arg->klass != mono_class_from_mono_type (target);
2111 if (arg->type == STACK_PTR)
2116 simple_type = mono_type_get_underlying_type (target);
2117 switch (simple_type->type) {
2118 case MONO_TYPE_VOID:
2122 case MONO_TYPE_BOOLEAN:
2125 case MONO_TYPE_CHAR:
2128 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2132 /* STACK_MP is needed when setting pinned locals */
2133 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2138 case MONO_TYPE_FNPTR:
2140 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2141 * in native int. (#688008).
2143 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2146 case MONO_TYPE_CLASS:
2147 case MONO_TYPE_STRING:
2148 case MONO_TYPE_OBJECT:
2149 case MONO_TYPE_SZARRAY:
2150 case MONO_TYPE_ARRAY:
2151 if (arg->type != STACK_OBJ)
2153 /* FIXME: check type compatibility */
2157 if (arg->type != STACK_I8)
2162 if (arg->type != STACK_R8)
2165 case MONO_TYPE_VALUETYPE:
2166 if (arg->type != STACK_VTYPE)
2168 klass = mono_class_from_mono_type (simple_type);
2169 if (klass != arg->klass)
2172 case MONO_TYPE_TYPEDBYREF:
2173 if (arg->type != STACK_VTYPE)
2175 klass = mono_class_from_mono_type (simple_type);
2176 if (klass != arg->klass)
2179 case MONO_TYPE_GENERICINST:
2180 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2181 if (arg->type != STACK_VTYPE)
2183 klass = mono_class_from_mono_type (simple_type);
2184 if (klass != arg->klass)
2188 if (arg->type != STACK_OBJ)
2190 /* FIXME: check type compatibility */
2194 case MONO_TYPE_MVAR:
2195 g_assert (cfg->generic_sharing_context);
2196 if (mini_type_var_is_vt (cfg, simple_type)) {
2197 if (arg->type != STACK_VTYPE)
2200 if (arg->type != STACK_OBJ)
2205 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2211 * Prepare arguments for passing to a function call.
2212 * Return a non-zero value if the arguments can't be passed to the given
2214 * The type checks are not yet complete and some conversions may need
2215 * casts on 32 or 64 bit architectures.
2217 * FIXME: implement this using target_type_is_incompatible ()
2220 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2222 MonoType *simple_type;
2226 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2230 for (i = 0; i < sig->param_count; ++i) {
2231 if (sig->params [i]->byref) {
2232 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2236 simple_type = sig->params [i];
2237 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2239 switch (simple_type->type) {
2240 case MONO_TYPE_VOID:
2245 case MONO_TYPE_BOOLEAN:
2248 case MONO_TYPE_CHAR:
2251 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2257 case MONO_TYPE_FNPTR:
2258 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2261 case MONO_TYPE_CLASS:
2262 case MONO_TYPE_STRING:
2263 case MONO_TYPE_OBJECT:
2264 case MONO_TYPE_SZARRAY:
2265 case MONO_TYPE_ARRAY:
2266 if (args [i]->type != STACK_OBJ)
2271 if (args [i]->type != STACK_I8)
2276 if (args [i]->type != STACK_R8)
2279 case MONO_TYPE_VALUETYPE:
2280 if (simple_type->data.klass->enumtype) {
2281 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2284 if (args [i]->type != STACK_VTYPE)
2287 case MONO_TYPE_TYPEDBYREF:
2288 if (args [i]->type != STACK_VTYPE)
2291 case MONO_TYPE_GENERICINST:
2292 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2295 case MONO_TYPE_MVAR:
2297 if (args [i]->type != STACK_VTYPE)
2301 g_error ("unknown type 0x%02x in check_call_signature",
2309 callvirt_to_call (int opcode)
2312 case OP_CALL_MEMBASE:
2314 case OP_VOIDCALL_MEMBASE:
2316 case OP_FCALL_MEMBASE:
2318 case OP_VCALL_MEMBASE:
2320 case OP_LCALL_MEMBASE:
2323 g_assert_not_reached ();
2329 #ifdef MONO_ARCH_HAVE_IMT
2330 /* Either METHOD or IMT_ARG needs to be set */
2332 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2336 if (COMPILE_LLVM (cfg)) {
2337 method_reg = alloc_preg (cfg);
2340 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2341 } else if (cfg->compile_aot) {
2342 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2345 MONO_INST_NEW (cfg, ins, OP_PCONST);
2346 ins->inst_p0 = method;
2347 ins->dreg = method_reg;
2348 MONO_ADD_INS (cfg->cbb, ins);
2352 call->imt_arg_reg = method_reg;
2354 #ifdef MONO_ARCH_IMT_REG
2355 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2357 /* Need this to keep the IMT arg alive */
2358 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2363 #ifdef MONO_ARCH_IMT_REG
2364 method_reg = alloc_preg (cfg);
2367 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2368 } else if (cfg->compile_aot) {
2369 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2372 MONO_INST_NEW (cfg, ins, OP_PCONST);
2373 ins->inst_p0 = method;
2374 ins->dreg = method_reg;
2375 MONO_ADD_INS (cfg->cbb, ins);
2378 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2380 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2385 static MonoJumpInfo *
2386 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2388 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2392 ji->data.target = target;
2398 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2400 if (cfg->generic_sharing_context)
2401 return mono_class_check_context_used (klass);
2407 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2409 if (cfg->generic_sharing_context)
2410 return mono_method_check_context_used (method);
2416 * check_method_sharing:
2418 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2421 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2423 gboolean pass_vtable = FALSE;
2424 gboolean pass_mrgctx = FALSE;
2426 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2427 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2428 gboolean sharable = FALSE;
2430 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2433 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2434 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2435 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2437 sharable = sharing_enabled && context_sharable;
2441 * Pass vtable iff target method might
2442 * be shared, which means that sharing
2443 * is enabled for its class and its
2444 * context is sharable (and it's not a
2447 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2451 if (mini_method_get_context (cmethod) &&
2452 mini_method_get_context (cmethod)->method_inst) {
2453 g_assert (!pass_vtable);
2455 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2458 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2459 MonoGenericContext *context = mini_method_get_context (cmethod);
2460 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2462 if (sharing_enabled && context_sharable)
2464 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2469 if (out_pass_vtable)
2470 *out_pass_vtable = pass_vtable;
2471 if (out_pass_mrgctx)
2472 *out_pass_mrgctx = pass_mrgctx;
2475 inline static MonoCallInst *
2476 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2477 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2481 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2486 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2488 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2491 call->signature = sig;
2492 call->rgctx_reg = rgctx;
2493 sig_ret = mini_replace_type (sig->ret);
2495 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2498 if (mini_type_is_vtype (cfg, sig_ret)) {
2499 call->vret_var = cfg->vret_addr;
2500 //g_assert_not_reached ();
2502 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2503 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2506 temp->backend.is_pinvoke = sig->pinvoke;
2509 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2510 * address of return value to increase optimization opportunities.
2511 * Before vtype decomposition, the dreg of the call ins itself represents the
2512 * fact the call modifies the return value. After decomposition, the call will
2513 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2514 * will be transformed into an LDADDR.
2516 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2517 loada->dreg = alloc_preg (cfg);
2518 loada->inst_p0 = temp;
2519 /* We reference the call too since call->dreg could change during optimization */
2520 loada->inst_p1 = call;
2521 MONO_ADD_INS (cfg->cbb, loada);
2523 call->inst.dreg = temp->dreg;
2525 call->vret_var = loada;
2526 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2527 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2529 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2530 if (COMPILE_SOFT_FLOAT (cfg)) {
2532 * If the call has a float argument, we would need to do an r8->r4 conversion using
2533 * an icall, but that cannot be done during the call sequence since it would clobber
2534 * the call registers + the stack. So we do it before emitting the call.
2536 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2538 MonoInst *in = call->args [i];
2540 if (i >= sig->hasthis)
2541 t = sig->params [i - sig->hasthis];
2543 t = &mono_defaults.int_class->byval_arg;
2544 t = mono_type_get_underlying_type (t);
2546 if (!t->byref && t->type == MONO_TYPE_R4) {
2547 MonoInst *iargs [1];
2551 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2553 /* The result will be in an int vreg */
2554 call->args [i] = conv;
2560 call->need_unbox_trampoline = unbox_trampoline;
2563 if (COMPILE_LLVM (cfg))
2564 mono_llvm_emit_call (cfg, call);
2566 mono_arch_emit_call (cfg, call);
2568 mono_arch_emit_call (cfg, call);
2571 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2572 cfg->flags |= MONO_CFG_HAS_CALLS;
2578 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2580 #ifdef MONO_ARCH_RGCTX_REG
2581 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2582 cfg->uses_rgctx_reg = TRUE;
2583 call->rgctx_reg = TRUE;
2585 call->rgctx_arg_reg = rgctx_reg;
2592 inline static MonoInst*
2593 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2598 gboolean check_sp = FALSE;
2600 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2601 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2603 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2608 rgctx_reg = mono_alloc_preg (cfg);
2609 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2613 if (!cfg->stack_inbalance_var)
2614 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2616 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2617 ins->dreg = cfg->stack_inbalance_var->dreg;
2618 MONO_ADD_INS (cfg->cbb, ins);
2621 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2623 call->inst.sreg1 = addr->dreg;
2626 emit_imt_argument (cfg, call, NULL, imt_arg);
2628 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2633 sp_reg = mono_alloc_preg (cfg);
2635 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2637 MONO_ADD_INS (cfg->cbb, ins);
2639 /* Restore the stack so we don't crash when throwing the exception */
2640 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2641 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2642 MONO_ADD_INS (cfg->cbb, ins);
2644 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2645 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2649 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2651 return (MonoInst*)call;
2655 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2658 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2660 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2663 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2664 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2666 #ifndef DISABLE_REMOTING
2667 gboolean might_be_remote = FALSE;
2669 gboolean virtual = this != NULL;
2670 gboolean enable_for_aot = TRUE;
2674 gboolean need_unbox_trampoline;
2677 sig = mono_method_signature (method);
2680 rgctx_reg = mono_alloc_preg (cfg);
2681 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2684 if (method->string_ctor) {
2685 /* Create the real signature */
2686 /* FIXME: Cache these */
2687 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2688 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2693 context_used = mini_method_check_context_used (cfg, method);
2695 #ifndef DISABLE_REMOTING
2696 might_be_remote = this && sig->hasthis &&
2697 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2698 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2700 if (might_be_remote && context_used) {
2703 g_assert (cfg->generic_sharing_context);
2705 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2707 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2711 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2713 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2715 #ifndef DISABLE_REMOTING
2716 if (might_be_remote)
2717 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2720 call->method = method;
2721 call->inst.flags |= MONO_INST_HAS_METHOD;
2722 call->inst.inst_left = this;
2723 call->tail_call = tail;
2726 int vtable_reg, slot_reg, this_reg;
2729 this_reg = this->dreg;
2731 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2732 MonoInst *dummy_use;
2734 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2736 /* Make a call to delegate->invoke_impl */
2737 call->inst.inst_basereg = this_reg;
2738 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2739 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2741 /* We must emit a dummy use here because the delegate trampoline will
2742 replace the 'this' argument with the delegate target making this activation
2743 no longer a root for the delegate.
2744 This is an issue for delegates that target collectible code such as dynamic
2745 methods of GC'able assemblies.
2747 For a test case look into #667921.
2749 FIXME: a dummy use is not the best way to do it as the local register allocator
2750 will put it on a caller save register and spil it around the call.
2751 Ideally, we would either put it on a callee save register or only do the store part.
2753 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2755 return (MonoInst*)call;
2758 if ((!cfg->compile_aot || enable_for_aot) &&
2759 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2760 (MONO_METHOD_IS_FINAL (method) &&
2761 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2762 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2764 * the method is not virtual, we just need to ensure this is not null
2765 * and then we can call the method directly.
2767 #ifndef DISABLE_REMOTING
2768 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2770 * The check above ensures method is not gshared, this is needed since
2771 * gshared methods can't have wrappers.
2773 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2777 if (!method->string_ctor)
2778 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2780 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2781 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2783 * the method is virtual, but we can statically dispatch since either
2784 * it's class or the method itself are sealed.
2785 * But first we need to ensure it's not a null reference.
2787 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2789 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2791 vtable_reg = alloc_preg (cfg);
2792 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2793 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2795 #ifdef MONO_ARCH_HAVE_IMT
2797 guint32 imt_slot = mono_method_get_imt_slot (method);
2798 emit_imt_argument (cfg, call, call->method, imt_arg);
2799 slot_reg = vtable_reg;
2800 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2803 if (slot_reg == -1) {
2804 slot_reg = alloc_preg (cfg);
2805 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2806 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2809 slot_reg = vtable_reg;
2810 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2811 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2812 #ifdef MONO_ARCH_HAVE_IMT
2814 g_assert (mono_method_signature (method)->generic_param_count);
2815 emit_imt_argument (cfg, call, call->method, imt_arg);
2820 call->inst.sreg1 = slot_reg;
2821 call->inst.inst_offset = offset;
2822 call->virtual = TRUE;
2826 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2829 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2831 return (MonoInst*)call;
2835 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2837 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2841 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2848 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2851 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2853 return (MonoInst*)call;
2857 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2859 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2863 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2867 * mono_emit_abs_call:
2869 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2871 inline static MonoInst*
2872 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2873 MonoMethodSignature *sig, MonoInst **args)
2875 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2879 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2882 if (cfg->abs_patches == NULL)
2883 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2884 g_hash_table_insert (cfg->abs_patches, ji, ji);
2885 ins = mono_emit_native_call (cfg, ji, sig, args);
2886 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2891 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2893 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2894 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2898 * Native code might return non register sized integers
2899 * without initializing the upper bits.
2901 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2902 case OP_LOADI1_MEMBASE:
2903 widen_op = OP_ICONV_TO_I1;
2905 case OP_LOADU1_MEMBASE:
2906 widen_op = OP_ICONV_TO_U1;
2908 case OP_LOADI2_MEMBASE:
2909 widen_op = OP_ICONV_TO_I2;
2911 case OP_LOADU2_MEMBASE:
2912 widen_op = OP_ICONV_TO_U2;
2918 if (widen_op != -1) {
2919 int dreg = alloc_preg (cfg);
2922 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2923 widen->type = ins->type;
2933 get_memcpy_method (void)
2935 static MonoMethod *memcpy_method = NULL;
2936 if (!memcpy_method) {
2937 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2939 g_error ("Old corlib found. Install a new one");
2941 return memcpy_method;
2945 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2947 MonoClassField *field;
2948 gpointer iter = NULL;
2950 while ((field = mono_class_get_fields (klass, &iter))) {
2953 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2955 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2956 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2957 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2958 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2960 MonoClass *field_class = mono_class_from_mono_type (field->type);
2961 if (field_class->has_references)
2962 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2968 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2970 int card_table_shift_bits;
2971 gpointer card_table_mask;
2973 MonoInst *dummy_use;
2974 int nursery_shift_bits;
2975 size_t nursery_size;
2976 gboolean has_card_table_wb = FALSE;
2978 if (!cfg->gen_write_barriers)
2981 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2983 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2985 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2986 has_card_table_wb = TRUE;
2989 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2992 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2993 wbarrier->sreg1 = ptr->dreg;
2994 wbarrier->sreg2 = value->dreg;
2995 MONO_ADD_INS (cfg->cbb, wbarrier);
2996 } else if (card_table) {
2997 int offset_reg = alloc_preg (cfg);
2998 int card_reg = alloc_preg (cfg);
3001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3002 if (card_table_mask)
3003 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3005 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3006 * IMM's larger than 32bits.
3008 if (cfg->compile_aot) {
3009 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3011 MONO_INST_NEW (cfg, ins, OP_PCONST);
3012 ins->inst_p0 = card_table;
3013 ins->dreg = card_reg;
3014 MONO_ADD_INS (cfg->cbb, ins);
3017 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3018 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3020 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3021 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3024 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3028 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3030 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3031 unsigned need_wb = 0;
3036 /*types with references can't have alignment smaller than sizeof(void*) */
3037 if (align < SIZEOF_VOID_P)
3040 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3041 if (size > 32 * SIZEOF_VOID_P)
3044 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3046 /* We don't unroll more than 5 stores to avoid code bloat. */
3047 if (size > 5 * SIZEOF_VOID_P) {
3048 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3049 size += (SIZEOF_VOID_P - 1);
3050 size &= ~(SIZEOF_VOID_P - 1);
3052 EMIT_NEW_ICONST (cfg, iargs [2], size);
3053 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3054 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3058 destreg = iargs [0]->dreg;
3059 srcreg = iargs [1]->dreg;
3062 dest_ptr_reg = alloc_preg (cfg);
3063 tmp_reg = alloc_preg (cfg);
3066 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3068 while (size >= SIZEOF_VOID_P) {
3069 MonoInst *load_inst;
3070 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3071 load_inst->dreg = tmp_reg;
3072 load_inst->inst_basereg = srcreg;
3073 load_inst->inst_offset = offset;
3074 MONO_ADD_INS (cfg->cbb, load_inst);
3076 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3079 emit_write_barrier (cfg, iargs [0], load_inst);
3081 offset += SIZEOF_VOID_P;
3082 size -= SIZEOF_VOID_P;
3085 /*tmp += sizeof (void*)*/
3086 if (size >= SIZEOF_VOID_P) {
3087 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3088 MONO_ADD_INS (cfg->cbb, iargs [0]);
3092 /* Those cannot be references since size < sizeof (void*) */
3094 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3095 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3101 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3102 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3108 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3109 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3118 * Emit code to copy a valuetype of type @klass whose address is stored in
3119 * @src->dreg to memory whose address is stored at @dest->dreg.
3122 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3124 MonoInst *iargs [4];
3125 int context_used, n;
3127 MonoMethod *memcpy_method;
3128 MonoInst *size_ins = NULL;
3129 MonoInst *memcpy_ins = NULL;
3133 * This check breaks with spilled vars... need to handle it during verification anyway.
3134 * g_assert (klass && klass == src->klass && klass == dest->klass);
3137 if (mini_is_gsharedvt_klass (cfg, klass)) {
3139 context_used = mini_class_check_context_used (cfg, klass);
3140 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3141 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3145 n = mono_class_native_size (klass, &align);
3147 n = mono_class_value_size (klass, &align);
3149 /* if native is true there should be no references in the struct */
3150 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3151 /* Avoid barriers when storing to the stack */
3152 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3153 (dest->opcode == OP_LDADDR))) {
3159 context_used = mini_class_check_context_used (cfg, klass);
3161 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3162 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3164 } else if (context_used) {
3165 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3167 if (cfg->compile_aot) {
3168 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3170 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3171 mono_class_compute_gc_descriptor (klass);
3176 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3178 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3183 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3184 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3185 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3190 iargs [2] = size_ins;
3192 EMIT_NEW_ICONST (cfg, iargs [2], n);
3194 memcpy_method = get_memcpy_method ();
3196 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3198 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3203 get_memset_method (void)
3205 static MonoMethod *memset_method = NULL;
3206 if (!memset_method) {
3207 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3209 g_error ("Old corlib found. Install a new one");
3211 return memset_method;
3215 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3217 MonoInst *iargs [3];
3218 int n, context_used;
3220 MonoMethod *memset_method;
3221 MonoInst *size_ins = NULL;
3222 MonoInst *bzero_ins = NULL;
3223 static MonoMethod *bzero_method;
3225 /* FIXME: Optimize this for the case when dest is an LDADDR */
3227 mono_class_init (klass);
3228 if (mini_is_gsharedvt_klass (cfg, klass)) {
3229 context_used = mini_class_check_context_used (cfg, klass);
3230 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3231 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3233 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3234 g_assert (bzero_method);
3236 iargs [1] = size_ins;
3237 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3241 n = mono_class_value_size (klass, &align);
3243 if (n <= sizeof (gpointer) * 5) {
3244 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3247 memset_method = get_memset_method ();
3249 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3250 EMIT_NEW_ICONST (cfg, iargs [2], n);
3251 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3256 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3258 MonoInst *this = NULL;
3260 g_assert (cfg->generic_sharing_context);
3262 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3263 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3264 !method->klass->valuetype)
3265 EMIT_NEW_ARGLOAD (cfg, this, 0);
3267 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3268 MonoInst *mrgctx_loc, *mrgctx_var;
3271 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3273 mrgctx_loc = mono_get_vtable_var (cfg);
3274 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3277 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3278 MonoInst *vtable_loc, *vtable_var;
3282 vtable_loc = mono_get_vtable_var (cfg);
3283 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3285 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3286 MonoInst *mrgctx_var = vtable_var;
3289 vtable_reg = alloc_preg (cfg);
3290 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3291 vtable_var->type = STACK_PTR;
3299 vtable_reg = alloc_preg (cfg);
3300 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3305 static MonoJumpInfoRgctxEntry *
3306 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3308 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3309 res->method = method;
3310 res->in_mrgctx = in_mrgctx;
3311 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3312 res->data->type = patch_type;
3313 res->data->data.target = patch_data;
3314 res->info_type = info_type;
3319 static inline MonoInst*
3320 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3322 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3326 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3327 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3329 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3330 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3332 return emit_rgctx_fetch (cfg, rgctx, entry);
3336 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3337 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3339 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3340 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3342 return emit_rgctx_fetch (cfg, rgctx, entry);
3346 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3347 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3349 MonoJumpInfoGSharedVtCall *call_info;
3350 MonoJumpInfoRgctxEntry *entry;
3353 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3354 call_info->sig = sig;
3355 call_info->method = cmethod;
3357 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3358 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3360 return emit_rgctx_fetch (cfg, rgctx, entry);
3365 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3366 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3368 MonoJumpInfoRgctxEntry *entry;
3371 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3372 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3374 return emit_rgctx_fetch (cfg, rgctx, entry);
3378 * emit_get_rgctx_method:
3380 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3381 * normal constants, else emit a load from the rgctx.
3384 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3385 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3387 if (!context_used) {
3390 switch (rgctx_type) {
3391 case MONO_RGCTX_INFO_METHOD:
3392 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3394 case MONO_RGCTX_INFO_METHOD_RGCTX:
3395 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3398 g_assert_not_reached ();
3401 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3402 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3404 return emit_rgctx_fetch (cfg, rgctx, entry);
3409 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3410 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3412 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3413 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3415 return emit_rgctx_fetch (cfg, rgctx, entry);
3419 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3421 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3422 MonoRuntimeGenericContextInfoTemplate *template;
3427 for (i = 0; i < info->num_entries; ++i) {
3428 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3430 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3434 if (info->num_entries == info->count_entries) {
3435 MonoRuntimeGenericContextInfoTemplate *new_entries;
3436 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3438 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3440 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3441 info->entries = new_entries;
3442 info->count_entries = new_count_entries;
3445 idx = info->num_entries;
3446 template = &info->entries [idx];
3447 template->info_type = rgctx_type;
3448 template->data = data;
3450 info->num_entries ++;
3456 * emit_get_gsharedvt_info:
3458 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3461 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3466 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3467 /* Load info->entries [idx] */
3468 dreg = alloc_preg (cfg);
3469 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3475 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3477 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3481 * On return the caller must check @klass for load errors.
3484 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3486 MonoInst *vtable_arg;
3490 context_used = mini_class_check_context_used (cfg, klass);
3493 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3494 klass, MONO_RGCTX_INFO_VTABLE);
3496 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3500 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3503 if (COMPILE_LLVM (cfg))
3504 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3506 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3507 #ifdef MONO_ARCH_VTABLE_REG
3508 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3509 cfg->uses_vtable_reg = TRUE;
3516 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3520 if (cfg->gen_seq_points && cfg->method == method) {
3521 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3523 ins->flags |= MONO_INST_NONEMPTY_STACK;
3524 MONO_ADD_INS (cfg->cbb, ins);
3529 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3531 if (mini_get_debug_options ()->better_cast_details) {
3532 int to_klass_reg = alloc_preg (cfg);
3533 int vtable_reg = alloc_preg (cfg);
3534 int klass_reg = alloc_preg (cfg);
3535 MonoBasicBlock *is_null_bb = NULL;
3539 NEW_BBLOCK (cfg, is_null_bb);
3541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3542 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3545 tls_get = mono_get_jit_tls_intrinsic (cfg);
3547 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3551 MONO_ADD_INS (cfg->cbb, tls_get);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3553 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3555 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3556 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3557 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3560 MONO_START_BB (cfg, is_null_bb);
3562 *out_bblock = cfg->cbb;
3568 reset_cast_details (MonoCompile *cfg)
3570 /* Reset the variables holding the cast details */
3571 if (mini_get_debug_options ()->better_cast_details) {
3572 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3574 MONO_ADD_INS (cfg->cbb, tls_get);
3575 /* It is enough to reset the from field */
3576 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3581 * On return the caller must check @array_class for load errors
3584 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3586 int vtable_reg = alloc_preg (cfg);
3589 context_used = mini_class_check_context_used (cfg, array_class);
3591 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3593 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3595 if (cfg->opt & MONO_OPT_SHARED) {
3596 int class_reg = alloc_preg (cfg);
3597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3598 if (cfg->compile_aot) {
3599 int klass_reg = alloc_preg (cfg);
3600 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3601 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3605 } else if (context_used) {
3606 MonoInst *vtable_ins;
3608 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3609 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3611 if (cfg->compile_aot) {
3615 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3617 vt_reg = alloc_preg (cfg);
3618 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3619 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3622 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3628 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3630 reset_cast_details (cfg);
3634 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3635 * generic code is generated.
3638 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3640 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3643 MonoInst *rgctx, *addr;
3645 /* FIXME: What if the class is shared? We might not
3646 have to get the address of the method from the
3648 addr = emit_get_rgctx_method (cfg, context_used, method,
3649 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3651 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3653 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3655 gboolean pass_vtable, pass_mrgctx;
3656 MonoInst *rgctx_arg = NULL;
3658 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3659 g_assert (!pass_mrgctx);
3662 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3665 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3668 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3673 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3677 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3678 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3679 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3680 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3682 obj_reg = sp [0]->dreg;
3683 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3684 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3686 /* FIXME: generics */
3687 g_assert (klass->rank == 0);
3690 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3691 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3693 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3694 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3697 MonoInst *element_class;
3699 /* This assertion is from the unboxcast insn */
3700 g_assert (klass->rank == 0);
3702 element_class = emit_get_rgctx_klass (cfg, context_used,
3703 klass->element_class, MONO_RGCTX_INFO_KLASS);
3705 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3706 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3708 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3709 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3710 reset_cast_details (cfg);
3713 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3714 MONO_ADD_INS (cfg->cbb, add);
3715 add->type = STACK_MP;
3722 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3724 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3725 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3729 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3735 args [1] = klass_inst;
3738 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3740 NEW_BBLOCK (cfg, is_ref_bb);
3741 NEW_BBLOCK (cfg, is_nullable_bb);
3742 NEW_BBLOCK (cfg, end_bb);
3743 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3744 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3745 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3747 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3748 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3750 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3751 addr_reg = alloc_dreg (cfg, STACK_MP);
3755 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3756 MONO_ADD_INS (cfg->cbb, addr);
3758 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3761 MONO_START_BB (cfg, is_ref_bb);
3763 /* Save the ref to a temporary */
3764 dreg = alloc_ireg (cfg);
3765 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3766 addr->dreg = addr_reg;
3767 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3771 MONO_START_BB (cfg, is_nullable_bb);
3774 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3775 MonoInst *unbox_call;
3776 MonoMethodSignature *unbox_sig;
3779 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3781 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3782 unbox_sig->ret = &klass->byval_arg;
3783 unbox_sig->param_count = 1;
3784 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3785 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3787 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3788 addr->dreg = addr_reg;
3791 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3794 MONO_START_BB (cfg, end_bb);
3797 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3799 *out_cbb = cfg->cbb;
3805 * Returns NULL and set the cfg exception on error.
3808 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3810 MonoInst *iargs [2];
3816 MonoInst *iargs [2];
3818 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3820 if (cfg->opt & MONO_OPT_SHARED)
3821 rgctx_info = MONO_RGCTX_INFO_KLASS;
3823 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3824 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3826 if (cfg->opt & MONO_OPT_SHARED) {
3827 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3829 alloc_ftn = mono_object_new;
3832 alloc_ftn = mono_object_new_specific;
3835 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3836 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3838 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3841 if (cfg->opt & MONO_OPT_SHARED) {
3842 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3843 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3845 alloc_ftn = mono_object_new;
3846 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3847 /* This happens often in argument checking code, eg. throw new FooException... */
3848 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3849 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3850 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3852 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3853 MonoMethod *managed_alloc = NULL;
3857 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3858 cfg->exception_ptr = klass;
3862 #ifndef MONO_CROSS_COMPILE
3863 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3866 if (managed_alloc) {
3867 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3868 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3870 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3872 guint32 lw = vtable->klass->instance_size;
3873 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3874 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3875 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3878 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3882 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3886 * Returns NULL and set the cfg exception on error.
3889 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3891 MonoInst *alloc, *ins;
3893 *out_cbb = cfg->cbb;
3895 if (mono_class_is_nullable (klass)) {
3896 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3899 /* FIXME: What if the class is shared? We might not
3900 have to get the method address from the RGCTX. */
3901 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3902 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3903 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3905 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3907 gboolean pass_vtable, pass_mrgctx;
3908 MonoInst *rgctx_arg = NULL;
3910 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3911 g_assert (!pass_mrgctx);
3914 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3917 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3920 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3924 if (mini_is_gsharedvt_klass (cfg, klass)) {
3925 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3926 MonoInst *res, *is_ref, *src_var, *addr;
3929 dreg = alloc_ireg (cfg);
3931 NEW_BBLOCK (cfg, is_ref_bb);
3932 NEW_BBLOCK (cfg, is_nullable_bb);
3933 NEW_BBLOCK (cfg, end_bb);
3934 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3935 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3936 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3938 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3939 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3942 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3945 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3946 ins->opcode = OP_STOREV_MEMBASE;
3948 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3949 res->type = STACK_OBJ;
3951 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3954 MONO_START_BB (cfg, is_ref_bb);
3955 addr_reg = alloc_ireg (cfg);
3957 /* val is a vtype, so has to load the value manually */
3958 src_var = get_vreg_to_inst (cfg, val->dreg);
3960 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3961 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3962 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3963 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3966 MONO_START_BB (cfg, is_nullable_bb);
3969 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3970 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3972 MonoMethodSignature *box_sig;
3975 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3976 * construct that method at JIT time, so have to do things by hand.
3978 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3979 box_sig->ret = &mono_defaults.object_class->byval_arg;
3980 box_sig->param_count = 1;
3981 box_sig->params [0] = &klass->byval_arg;
3982 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3983 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3984 res->type = STACK_OBJ;
3988 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3990 MONO_START_BB (cfg, end_bb);
3992 *out_cbb = cfg->cbb;
3996 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4000 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4007 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4010 MonoGenericContainer *container;
4011 MonoGenericInst *ginst;
4013 if (klass->generic_class) {
4014 container = klass->generic_class->container_class->generic_container;
4015 ginst = klass->generic_class->context.class_inst;
4016 } else if (klass->generic_container && context_used) {
4017 container = klass->generic_container;
4018 ginst = container->context.class_inst;
4023 for (i = 0; i < container->type_argc; ++i) {
4025 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4027 type = ginst->type_argv [i];
4028 if (mini_type_is_reference (cfg, type))
4034 // FIXME: This doesn't work yet (class libs tests fail?)
4035 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4038 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4040 MonoMethod *mono_castclass;
4043 mono_castclass = mono_marshal_get_castclass_with_cache ();
4045 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4046 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4047 reset_cast_details (cfg);
4053 * Returns NULL and set the cfg exception on error.
4056 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4058 MonoBasicBlock *is_null_bb;
4059 int obj_reg = src->dreg;
4060 int vtable_reg = alloc_preg (cfg);
4061 MonoInst *klass_inst = NULL;
4066 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4067 MonoInst *cache_ins;
4069 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4074 /* klass - it's the second element of the cache entry*/
4075 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4078 args [2] = cache_ins;
4080 return emit_castclass_with_cache (cfg, klass, args, NULL);
4083 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4086 NEW_BBLOCK (cfg, is_null_bb);
4088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4089 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4091 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4093 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4095 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4097 int klass_reg = alloc_preg (cfg);
4099 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4101 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4102 /* the remoting code is broken, access the class for now */
4103 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4104 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4106 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4107 cfg->exception_ptr = klass;
4110 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4113 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4115 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4117 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4118 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4122 MONO_START_BB (cfg, is_null_bb);
4124 reset_cast_details (cfg);
4130 * Returns NULL and set the cfg exception on error.
4133 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4136 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4137 int obj_reg = src->dreg;
4138 int vtable_reg = alloc_preg (cfg);
4139 int res_reg = alloc_ireg_ref (cfg);
4140 MonoInst *klass_inst = NULL;
4145 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4146 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4147 MonoInst *cache_ins;
4149 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4154 /* klass - it's the second element of the cache entry*/
4155 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4158 args [2] = cache_ins;
4160 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4163 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4166 NEW_BBLOCK (cfg, is_null_bb);
4167 NEW_BBLOCK (cfg, false_bb);
4168 NEW_BBLOCK (cfg, end_bb);
4170 /* Do the assignment at the beginning, so the other assignment can be if converted */
4171 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4172 ins->type = STACK_OBJ;
4175 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4176 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4180 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4181 g_assert (!context_used);
4182 /* the is_null_bb target simply copies the input register to the output */
4183 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4185 int klass_reg = alloc_preg (cfg);
4188 int rank_reg = alloc_preg (cfg);
4189 int eclass_reg = alloc_preg (cfg);
4191 g_assert (!context_used);
4192 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4193 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4194 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4195 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4196 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4197 if (klass->cast_class == mono_defaults.object_class) {
4198 int parent_reg = alloc_preg (cfg);
4199 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4200 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4201 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4202 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4203 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4204 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4205 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4206 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4207 } else if (klass->cast_class == mono_defaults.enum_class) {
4208 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4209 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4210 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4211 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4213 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4214 /* Check that the object is a vector too */
4215 int bounds_reg = alloc_preg (cfg);
4216 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4218 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4221 /* the is_null_bb target simply copies the input register to the output */
4222 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4224 } else if (mono_class_is_nullable (klass)) {
4225 g_assert (!context_used);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4227 /* the is_null_bb target simply copies the input register to the output */
4228 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4230 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4231 g_assert (!context_used);
4232 /* the remoting code is broken, access the class for now */
4233 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4234 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4236 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4237 cfg->exception_ptr = klass;
4240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4243 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4245 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4246 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4249 /* the is_null_bb target simply copies the input register to the output */
4250 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4255 MONO_START_BB (cfg, false_bb);
4257 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4258 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4260 MONO_START_BB (cfg, is_null_bb);
4262 MONO_START_BB (cfg, end_bb);
4268 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4270 /* This opcode takes as input an object reference and a class, and returns:
4271 0) if the object is an instance of the class,
4272 1) if the object is not instance of the class,
4273 2) if the object is a proxy whose type cannot be determined */
4276 #ifndef DISABLE_REMOTING
4277 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4279 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4281 int obj_reg = src->dreg;
4282 int dreg = alloc_ireg (cfg);
4284 #ifndef DISABLE_REMOTING
4285 int klass_reg = alloc_preg (cfg);
4288 NEW_BBLOCK (cfg, true_bb);
4289 NEW_BBLOCK (cfg, false_bb);
4290 NEW_BBLOCK (cfg, end_bb);
4291 #ifndef DISABLE_REMOTING
4292 NEW_BBLOCK (cfg, false2_bb);
4293 NEW_BBLOCK (cfg, no_proxy_bb);
4296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4297 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4299 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4300 #ifndef DISABLE_REMOTING
4301 NEW_BBLOCK (cfg, interface_fail_bb);
4304 tmp_reg = alloc_preg (cfg);
4305 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4306 #ifndef DISABLE_REMOTING
4307 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4308 MONO_START_BB (cfg, interface_fail_bb);
4309 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4311 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4313 tmp_reg = alloc_preg (cfg);
4314 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4315 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4316 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4318 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4321 #ifndef DISABLE_REMOTING
4322 tmp_reg = alloc_preg (cfg);
4323 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4324 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4326 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4327 tmp_reg = alloc_preg (cfg);
4328 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4331 tmp_reg = alloc_preg (cfg);
4332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4336 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4337 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4339 MONO_START_BB (cfg, no_proxy_bb);
4341 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4343 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4347 MONO_START_BB (cfg, false_bb);
4349 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4350 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4352 #ifndef DISABLE_REMOTING
4353 MONO_START_BB (cfg, false2_bb);
4355 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4356 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4359 MONO_START_BB (cfg, true_bb);
4361 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4363 MONO_START_BB (cfg, end_bb);
4366 MONO_INST_NEW (cfg, ins, OP_ICONST);
4368 ins->type = STACK_I4;
4374 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4376 /* This opcode takes as input an object reference and a class, and returns:
4377 0) if the object is an instance of the class,
4378 1) if the object is a proxy whose type cannot be determined
4379 an InvalidCastException exception is thrown otherwhise*/
4382 #ifndef DISABLE_REMOTING
4383 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4385 MonoBasicBlock *ok_result_bb;
4387 int obj_reg = src->dreg;
4388 int dreg = alloc_ireg (cfg);
4389 int tmp_reg = alloc_preg (cfg);
4391 #ifndef DISABLE_REMOTING
4392 int klass_reg = alloc_preg (cfg);
4393 NEW_BBLOCK (cfg, end_bb);
4396 NEW_BBLOCK (cfg, ok_result_bb);
4398 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4399 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4401 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4403 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4404 #ifndef DISABLE_REMOTING
4405 NEW_BBLOCK (cfg, interface_fail_bb);
4407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4408 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4409 MONO_START_BB (cfg, interface_fail_bb);
4410 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4412 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4414 tmp_reg = alloc_preg (cfg);
4415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4417 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4419 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4420 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4423 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4424 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4427 #ifndef DISABLE_REMOTING
4428 NEW_BBLOCK (cfg, no_proxy_bb);
4430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4432 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4434 tmp_reg = alloc_preg (cfg);
4435 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4436 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4438 tmp_reg = alloc_preg (cfg);
4439 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4441 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4443 NEW_BBLOCK (cfg, fail_1_bb);
4445 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4447 MONO_START_BB (cfg, fail_1_bb);
4449 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4450 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4452 MONO_START_BB (cfg, no_proxy_bb);
4454 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4456 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4460 MONO_START_BB (cfg, ok_result_bb);
4462 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4464 #ifndef DISABLE_REMOTING
4465 MONO_START_BB (cfg, end_bb);
4469 MONO_INST_NEW (cfg, ins, OP_ICONST);
4471 ins->type = STACK_I4;
4477 * Returns NULL and set the cfg exception on error.
4479 static G_GNUC_UNUSED MonoInst*
4480 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4484 gpointer *trampoline;
4485 MonoInst *obj, *method_ins, *tramp_ins;
4489 obj = handle_alloc (cfg, klass, FALSE, 0);
4493 /* Inline the contents of mono_delegate_ctor */
4495 /* Set target field */
4496 /* Optimize away setting of NULL target */
4497 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4498 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4499 if (cfg->gen_write_barriers) {
4500 dreg = alloc_preg (cfg);
4501 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4502 emit_write_barrier (cfg, ptr, target);
4506 /* Set method field */
4507 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4508 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4509 if (cfg->gen_write_barriers) {
4510 dreg = alloc_preg (cfg);
4511 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4512 emit_write_barrier (cfg, ptr, method_ins);
4515 * To avoid looking up the compiled code belonging to the target method
4516 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4517 * store it, and we fill it after the method has been compiled.
4519 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4520 MonoInst *code_slot_ins;
4523 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4525 domain = mono_domain_get ();
4526 mono_domain_lock (domain);
4527 if (!domain_jit_info (domain)->method_code_hash)
4528 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4529 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4531 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4532 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4534 mono_domain_unlock (domain);
4536 if (cfg->compile_aot)
4537 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4539 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4541 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4544 /* Set invoke_impl field */
4545 if (cfg->compile_aot) {
4546 MonoClassMethodPair *del_tramp;
4548 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4549 del_tramp->klass = klass;
4550 del_tramp->method = context_used ? NULL : method;
4551 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4553 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4554 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4556 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4558 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4564 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4566 MonoJitICallInfo *info;
4568 /* Need to register the icall so it gets an icall wrapper */
4569 info = mono_get_array_new_va_icall (rank);
4571 cfg->flags |= MONO_CFG_HAS_VARARGS;
4573 /* mono_array_new_va () needs a vararg calling convention */
4574 cfg->disable_llvm = TRUE;
4576 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4577 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4581 mono_emit_load_got_addr (MonoCompile *cfg)
4583 MonoInst *getaddr, *dummy_use;
4585 if (!cfg->got_var || cfg->got_var_allocated)
4588 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4589 getaddr->cil_code = cfg->header->code;
4590 getaddr->dreg = cfg->got_var->dreg;
4592 /* Add it to the start of the first bblock */
4593 if (cfg->bb_entry->code) {
4594 getaddr->next = cfg->bb_entry->code;
4595 cfg->bb_entry->code = getaddr;
4598 MONO_ADD_INS (cfg->bb_entry, getaddr);
4600 cfg->got_var_allocated = TRUE;
4603 * Add a dummy use to keep the got_var alive, since real uses might
4604 * only be generated by the back ends.
4605 * Add it to end_bblock, so the variable's lifetime covers the whole
4607 * It would be better to make the usage of the got var explicit in all
4608 * cases when the backend needs it (i.e. calls, throw etc.), so this
4609 * wouldn't be needed.
4611 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4612 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4615 static int inline_limit;
4616 static gboolean inline_limit_inited;
4619 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4621 MonoMethodHeaderSummary header;
4623 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4624 MonoMethodSignature *sig = mono_method_signature (method);
4628 if (cfg->generic_sharing_context)
4631 if (cfg->inline_depth > 10)
4634 #ifdef MONO_ARCH_HAVE_LMF_OPS
4635 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4636 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4637 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4642 if (!mono_method_get_header_summary (method, &header))
4645 /*runtime, icall and pinvoke are checked by summary call*/
4646 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4647 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4648 (mono_class_is_marshalbyref (method->klass)) ||
4652 /* also consider num_locals? */
4653 /* Do the size check early to avoid creating vtables */
4654 if (!inline_limit_inited) {
4655 if (g_getenv ("MONO_INLINELIMIT"))
4656 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4658 inline_limit = INLINE_LENGTH_LIMIT;
4659 inline_limit_inited = TRUE;
4661 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4665 * if we can initialize the class of the method right away, we do,
4666 * otherwise we don't allow inlining if the class needs initialization,
4667 * since it would mean inserting a call to mono_runtime_class_init()
4668 * inside the inlined code
4670 if (!(cfg->opt & MONO_OPT_SHARED)) {
4671 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4672 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4673 vtable = mono_class_vtable (cfg->domain, method->klass);
4676 if (!cfg->compile_aot)
4677 mono_runtime_class_init (vtable);
4678 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4679 if (cfg->run_cctors && method->klass->has_cctor) {
4680 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4681 if (!method->klass->runtime_info)
4682 /* No vtable created yet */
4684 vtable = mono_class_vtable (cfg->domain, method->klass);
4687 /* This makes so that inline cannot trigger */
4688 /* .cctors: too many apps depend on them */
4689 /* running with a specific order... */
4690 if (! vtable->initialized)
4692 mono_runtime_class_init (vtable);
4694 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4695 if (!method->klass->runtime_info)
4696 /* No vtable created yet */
4698 vtable = mono_class_vtable (cfg->domain, method->klass);
4701 if (!vtable->initialized)
4706 * If we're compiling for shared code
4707 * the cctor will need to be run at aot method load time, for example,
4708 * or at the end of the compilation of the inlining method.
4710 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4715 * CAS - do not inline methods with declarative security
4716 * Note: this has to be before any possible return TRUE;
4718 if (mono_security_method_has_declsec (method))
4721 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4722 if (mono_arch_is_soft_float ()) {
4724 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4726 for (i = 0; i < sig->param_count; ++i)
4727 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4736 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4738 if (!cfg->compile_aot) {
4740 if (vtable->initialized)
4744 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4745 if (cfg->method == method)
4749 if (!mono_class_needs_cctor_run (klass, method))
4752 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4753 /* The initialization is already done before the method is called */
4760 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4764 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4767 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4770 mono_class_init (klass);
4771 size = mono_class_array_element_size (klass);
4774 mult_reg = alloc_preg (cfg);
4775 array_reg = arr->dreg;
4776 index_reg = index->dreg;
4778 #if SIZEOF_REGISTER == 8
4779 /* The array reg is 64 bits but the index reg is only 32 */
4780 if (COMPILE_LLVM (cfg)) {
4782 index2_reg = index_reg;
4784 index2_reg = alloc_preg (cfg);
4785 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4788 if (index->type == STACK_I8) {
4789 index2_reg = alloc_preg (cfg);
4790 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4792 index2_reg = index_reg;
4797 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4799 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4800 if (size == 1 || size == 2 || size == 4 || size == 8) {
4801 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4803 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4804 ins->klass = mono_class_get_element_class (klass);
4805 ins->type = STACK_MP;
4811 add_reg = alloc_ireg_mp (cfg);
4814 MonoInst *rgctx_ins;
4817 g_assert (cfg->generic_sharing_context);
4818 context_used = mini_class_check_context_used (cfg, klass);
4819 g_assert (context_used);
4820 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4821 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4823 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4825 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4826 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4827 ins->klass = mono_class_get_element_class (klass);
4828 ins->type = STACK_MP;
4829 MONO_ADD_INS (cfg->cbb, ins);
4834 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4836 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4838 int bounds_reg = alloc_preg (cfg);
4839 int add_reg = alloc_ireg_mp (cfg);
4840 int mult_reg = alloc_preg (cfg);
4841 int mult2_reg = alloc_preg (cfg);
4842 int low1_reg = alloc_preg (cfg);
4843 int low2_reg = alloc_preg (cfg);
4844 int high1_reg = alloc_preg (cfg);
4845 int high2_reg = alloc_preg (cfg);
4846 int realidx1_reg = alloc_preg (cfg);
4847 int realidx2_reg = alloc_preg (cfg);
4848 int sum_reg = alloc_preg (cfg);
4849 int index1, index2, tmpreg;
4853 mono_class_init (klass);
4854 size = mono_class_array_element_size (klass);
4856 index1 = index_ins1->dreg;
4857 index2 = index_ins2->dreg;
4859 #if SIZEOF_REGISTER == 8
4860 /* The array reg is 64 bits but the index reg is only 32 */
4861 if (COMPILE_LLVM (cfg)) {
4864 tmpreg = alloc_preg (cfg);
4865 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4867 tmpreg = alloc_preg (cfg);
4868 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4872 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4876 /* range checking */
4877 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4878 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4880 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4881 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4882 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4883 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4884 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4885 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4886 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4888 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4889 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4890 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4891 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4892 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4893 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4894 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4896 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4897 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4898 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4899 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4900 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4902 ins->type = STACK_MP;
4904 MONO_ADD_INS (cfg->cbb, ins);
4911 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4915 MonoMethod *addr_method;
4918 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4921 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4923 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4924 /* emit_ldelema_2 depends on OP_LMUL */
4925 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4926 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4930 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4931 addr_method = mono_marshal_get_array_address (rank, element_size);
4932 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4937 static MonoBreakPolicy
4938 always_insert_breakpoint (MonoMethod *method)
4940 return MONO_BREAK_POLICY_ALWAYS;
4943 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4946 * mono_set_break_policy:
4947 * policy_callback: the new callback function
4949 * Allow embedders to decide wherther to actually obey breakpoint instructions
4950 * (both break IL instructions and Debugger.Break () method calls), for example
4951 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4952 * untrusted or semi-trusted code.
4954 * @policy_callback will be called every time a break point instruction needs to
4955 * be inserted with the method argument being the method that calls Debugger.Break()
4956 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4957 * if it wants the breakpoint to not be effective in the given method.
4958 * #MONO_BREAK_POLICY_ALWAYS is the default.
4961 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4963 if (policy_callback)
4964 break_policy_func = policy_callback;
4966 break_policy_func = always_insert_breakpoint;
4970 should_insert_brekpoint (MonoMethod *method) {
4971 switch (break_policy_func (method)) {
4972 case MONO_BREAK_POLICY_ALWAYS:
4974 case MONO_BREAK_POLICY_NEVER:
4976 case MONO_BREAK_POLICY_ON_DBG:
4977 g_warning ("mdb no longer supported");
4980 g_warning ("Incorrect value returned from break policy callback");
4985 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4987 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4989 MonoInst *addr, *store, *load;
4990 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4992 /* the bounds check is already done by the callers */
4993 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4995 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4996 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4997 if (mini_type_is_reference (cfg, fsig->params [2]))
4998 emit_write_barrier (cfg, addr, load);
5000 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5001 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5008 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5010 return mini_type_is_reference (cfg, &klass->byval_arg);
5014 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5016 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5017 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5018 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5019 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5020 MonoInst *iargs [3];
5023 mono_class_setup_vtable (obj_array);
5024 g_assert (helper->slot);
5026 if (sp [0]->type != STACK_OBJ)
5028 if (sp [2]->type != STACK_OBJ)
5035 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5039 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5042 // FIXME-VT: OP_ICONST optimization
5043 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5044 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5045 ins->opcode = OP_STOREV_MEMBASE;
5046 } else if (sp [1]->opcode == OP_ICONST) {
5047 int array_reg = sp [0]->dreg;
5048 int index_reg = sp [1]->dreg;
5049 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
5052 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5053 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5055 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5056 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5057 if (generic_class_is_reference_type (cfg, klass))
5058 emit_write_barrier (cfg, addr, sp [2]);
5065 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5070 eklass = mono_class_from_mono_type (fsig->params [2]);
5072 eklass = mono_class_from_mono_type (fsig->ret);
5075 return emit_array_store (cfg, eklass, args, FALSE);
5077 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5078 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5084 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5088 //Only allow for valuetypes
5089 if (!param_klass->valuetype || !return_klass->valuetype)
5093 if (param_klass->has_references || return_klass->has_references)
5096 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5097 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5098 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5101 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5102 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5105 //And have the same size
5106 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5112 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5114 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5115 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5117 //Valuetypes that are semantically equivalent
5118 if (is_unsafe_mov_compatible (param_klass, return_klass))
5121 //Arrays of valuetypes that are semantically equivalent
5122 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5129 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5131 #ifdef MONO_ARCH_SIMD_INTRINSICS
5132 MonoInst *ins = NULL;
5134 if (cfg->opt & MONO_OPT_SIMD) {
5135 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5141 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5145 emit_memory_barrier (MonoCompile *cfg, int kind)
5147 MonoInst *ins = NULL;
5148 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5149 MONO_ADD_INS (cfg->cbb, ins);
5150 ins->backend.memory_barrier_kind = kind;
5156 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5158 MonoInst *ins = NULL;
5161 /* The LLVM backend supports these intrinsics */
5162 if (cmethod->klass == mono_defaults.math_class) {
5163 if (strcmp (cmethod->name, "Sin") == 0) {
5165 } else if (strcmp (cmethod->name, "Cos") == 0) {
5167 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5169 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5174 MONO_INST_NEW (cfg, ins, opcode);
5175 ins->type = STACK_R8;
5176 ins->dreg = mono_alloc_freg (cfg);
5177 ins->sreg1 = args [0]->dreg;
5178 MONO_ADD_INS (cfg->cbb, ins);
5182 if (cfg->opt & MONO_OPT_CMOV) {
5183 if (strcmp (cmethod->name, "Min") == 0) {
5184 if (fsig->params [0]->type == MONO_TYPE_I4)
5186 if (fsig->params [0]->type == MONO_TYPE_U4)
5187 opcode = OP_IMIN_UN;
5188 else if (fsig->params [0]->type == MONO_TYPE_I8)
5190 else if (fsig->params [0]->type == MONO_TYPE_U8)
5191 opcode = OP_LMIN_UN;
5192 } else if (strcmp (cmethod->name, "Max") == 0) {
5193 if (fsig->params [0]->type == MONO_TYPE_I4)
5195 if (fsig->params [0]->type == MONO_TYPE_U4)
5196 opcode = OP_IMAX_UN;
5197 else if (fsig->params [0]->type == MONO_TYPE_I8)
5199 else if (fsig->params [0]->type == MONO_TYPE_U8)
5200 opcode = OP_LMAX_UN;
5205 MONO_INST_NEW (cfg, ins, opcode);
5206 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5207 ins->dreg = mono_alloc_ireg (cfg);
5208 ins->sreg1 = args [0]->dreg;
5209 ins->sreg2 = args [1]->dreg;
5210 MONO_ADD_INS (cfg->cbb, ins);
5218 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5220 if (cmethod->klass == mono_defaults.array_class) {
5221 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5222 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5223 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5224 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5225 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5226 return emit_array_unsafe_mov (cfg, fsig, args);
5233 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5235 MonoInst *ins = NULL;
5237 static MonoClass *runtime_helpers_class = NULL;
5238 if (! runtime_helpers_class)
5239 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5240 "System.Runtime.CompilerServices", "RuntimeHelpers");
5242 if (cmethod->klass == mono_defaults.string_class) {
5243 if (strcmp (cmethod->name, "get_Chars") == 0) {
5244 int dreg = alloc_ireg (cfg);
5245 int index_reg = alloc_preg (cfg);
5246 int mult_reg = alloc_preg (cfg);
5247 int add_reg = alloc_preg (cfg);
5249 #if SIZEOF_REGISTER == 8
5250 /* The array reg is 64 bits but the index reg is only 32 */
5251 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5253 index_reg = args [1]->dreg;
5255 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5257 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5258 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5259 add_reg = ins->dreg;
5260 /* Avoid a warning */
5262 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5266 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5267 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5268 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5270 type_from_op (ins, NULL, NULL);
5272 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5273 int dreg = alloc_ireg (cfg);
5274 /* Decompose later to allow more optimizations */
5275 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5276 ins->type = STACK_I4;
5277 ins->flags |= MONO_INST_FAULT;
5278 cfg->cbb->has_array_access = TRUE;
5279 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5282 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5283 int mult_reg = alloc_preg (cfg);
5284 int add_reg = alloc_preg (cfg);
5286 /* The corlib functions check for oob already. */
5287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5288 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5289 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5290 return cfg->cbb->last_ins;
5293 } else if (cmethod->klass == mono_defaults.object_class) {
5295 if (strcmp (cmethod->name, "GetType") == 0) {
5296 int dreg = alloc_ireg_ref (cfg);
5297 int vt_reg = alloc_preg (cfg);
5298 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5299 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5300 type_from_op (ins, NULL, NULL);
5303 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5304 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5305 int dreg = alloc_ireg (cfg);
5306 int t1 = alloc_ireg (cfg);
5308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5309 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5310 ins->type = STACK_I4;
5314 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5315 MONO_INST_NEW (cfg, ins, OP_NOP);
5316 MONO_ADD_INS (cfg->cbb, ins);
5320 } else if (cmethod->klass == mono_defaults.array_class) {
5321 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5322 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5324 #ifndef MONO_BIG_ARRAYS
5326 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5329 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5330 int dreg = alloc_ireg (cfg);
5331 int bounds_reg = alloc_ireg_mp (cfg);
5332 MonoBasicBlock *end_bb, *szarray_bb;
5333 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5335 NEW_BBLOCK (cfg, end_bb);
5336 NEW_BBLOCK (cfg, szarray_bb);
5338 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5339 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5342 /* Non-szarray case */
5344 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5345 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5347 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5348 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5350 MONO_START_BB (cfg, szarray_bb);
5353 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5354 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5356 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5357 MONO_START_BB (cfg, end_bb);
5359 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5360 ins->type = STACK_I4;
5366 if (cmethod->name [0] != 'g')
5369 if (strcmp (cmethod->name, "get_Rank") == 0) {
5370 int dreg = alloc_ireg (cfg);
5371 int vtable_reg = alloc_preg (cfg);
5372 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5373 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5374 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5375 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5376 type_from_op (ins, NULL, NULL);
5379 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5380 int dreg = alloc_ireg (cfg);
5382 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5383 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5384 type_from_op (ins, NULL, NULL);
5389 } else if (cmethod->klass == runtime_helpers_class) {
5391 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5392 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5396 } else if (cmethod->klass == mono_defaults.thread_class) {
5397 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5398 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5399 MONO_ADD_INS (cfg->cbb, ins);
5401 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5402 return emit_memory_barrier (cfg, FullBarrier);
5404 } else if (cmethod->klass == mono_defaults.monitor_class) {
5406 /* FIXME this should be integrated to the check below once we support the trampoline version */
5407 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5408 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5409 MonoMethod *fast_method = NULL;
5411 /* Avoid infinite recursion */
5412 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5415 fast_method = mono_monitor_get_fast_path (cmethod);
5419 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5423 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5424 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5427 if (COMPILE_LLVM (cfg)) {
5429 * Pass the argument normally, the LLVM backend will handle the
5430 * calling convention problems.
5432 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5434 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5435 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5436 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5437 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5440 return (MonoInst*)call;
5441 } else if (strcmp (cmethod->name, "Exit") == 0) {
5444 if (COMPILE_LLVM (cfg)) {
5445 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5447 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5448 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5449 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5450 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5453 return (MonoInst*)call;
5455 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5457 MonoMethod *fast_method = NULL;
5459 /* Avoid infinite recursion */
5460 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5461 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5462 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5465 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5466 strcmp (cmethod->name, "Exit") == 0)
5467 fast_method = mono_monitor_get_fast_path (cmethod);
5471 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5474 } else if (cmethod->klass->image == mono_defaults.corlib &&
5475 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5476 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5479 #if SIZEOF_REGISTER == 8
5480 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5481 /* 64 bit reads are already atomic */
5482 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5483 ins->dreg = mono_alloc_preg (cfg);
5484 ins->inst_basereg = args [0]->dreg;
5485 ins->inst_offset = 0;
5486 MONO_ADD_INS (cfg->cbb, ins);
5490 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5491 if (strcmp (cmethod->name, "Increment") == 0) {
5492 MonoInst *ins_iconst;
5495 if (fsig->params [0]->type == MONO_TYPE_I4) {
5496 opcode = OP_ATOMIC_ADD_NEW_I4;
5497 cfg->has_atomic_add_new_i4 = TRUE;
5499 #if SIZEOF_REGISTER == 8
5500 else if (fsig->params [0]->type == MONO_TYPE_I8)
5501 opcode = OP_ATOMIC_ADD_NEW_I8;
5504 if (!mono_arch_opcode_supported (opcode))
5506 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5507 ins_iconst->inst_c0 = 1;
5508 ins_iconst->dreg = mono_alloc_ireg (cfg);
5509 MONO_ADD_INS (cfg->cbb, ins_iconst);
5511 MONO_INST_NEW (cfg, ins, opcode);
5512 ins->dreg = mono_alloc_ireg (cfg);
5513 ins->inst_basereg = args [0]->dreg;
5514 ins->inst_offset = 0;
5515 ins->sreg2 = ins_iconst->dreg;
5516 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5517 MONO_ADD_INS (cfg->cbb, ins);
5519 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5520 MonoInst *ins_iconst;
5523 if (fsig->params [0]->type == MONO_TYPE_I4) {
5524 opcode = OP_ATOMIC_ADD_NEW_I4;
5525 cfg->has_atomic_add_new_i4 = TRUE;
5527 #if SIZEOF_REGISTER == 8
5528 else if (fsig->params [0]->type == MONO_TYPE_I8)
5529 opcode = OP_ATOMIC_ADD_NEW_I8;
5532 if (!mono_arch_opcode_supported (opcode))
5534 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5535 ins_iconst->inst_c0 = -1;
5536 ins_iconst->dreg = mono_alloc_ireg (cfg);
5537 MONO_ADD_INS (cfg->cbb, ins_iconst);
5539 MONO_INST_NEW (cfg, ins, opcode);
5540 ins->dreg = mono_alloc_ireg (cfg);
5541 ins->inst_basereg = args [0]->dreg;
5542 ins->inst_offset = 0;
5543 ins->sreg2 = ins_iconst->dreg;
5544 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5545 MONO_ADD_INS (cfg->cbb, ins);
5547 } else if (strcmp (cmethod->name, "Add") == 0) {
5550 if (fsig->params [0]->type == MONO_TYPE_I4) {
5551 opcode = OP_ATOMIC_ADD_NEW_I4;
5552 cfg->has_atomic_add_new_i4 = TRUE;
5554 #if SIZEOF_REGISTER == 8
5555 else if (fsig->params [0]->type == MONO_TYPE_I8)
5556 opcode = OP_ATOMIC_ADD_NEW_I8;
5559 if (!mono_arch_opcode_supported (opcode))
5561 MONO_INST_NEW (cfg, ins, opcode);
5562 ins->dreg = mono_alloc_ireg (cfg);
5563 ins->inst_basereg = args [0]->dreg;
5564 ins->inst_offset = 0;
5565 ins->sreg2 = args [1]->dreg;
5566 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5567 MONO_ADD_INS (cfg->cbb, ins);
5570 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5572 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5573 if (strcmp (cmethod->name, "Exchange") == 0) {
5575 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5577 if (fsig->params [0]->type == MONO_TYPE_I4) {
5578 opcode = OP_ATOMIC_EXCHANGE_I4;
5579 cfg->has_atomic_exchange_i4 = TRUE;
5581 #if SIZEOF_REGISTER == 8
5582 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5583 (fsig->params [0]->type == MONO_TYPE_I))
5584 opcode = OP_ATOMIC_EXCHANGE_I8;
5586 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5587 opcode = OP_ATOMIC_EXCHANGE_I4;
5588 cfg->has_atomic_exchange_i4 = TRUE;
5594 if (!mono_arch_opcode_supported (opcode))
5597 MONO_INST_NEW (cfg, ins, opcode);
5598 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5599 ins->inst_basereg = args [0]->dreg;
5600 ins->inst_offset = 0;
5601 ins->sreg2 = args [1]->dreg;
5602 MONO_ADD_INS (cfg->cbb, ins);
5604 switch (fsig->params [0]->type) {
5606 ins->type = STACK_I4;
5610 ins->type = STACK_I8;
5612 case MONO_TYPE_OBJECT:
5613 ins->type = STACK_OBJ;
5616 g_assert_not_reached ();
5619 if (cfg->gen_write_barriers && is_ref)
5620 emit_write_barrier (cfg, args [0], args [1]);
5622 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5624 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5625 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5627 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5628 if (fsig->params [1]->type == MONO_TYPE_I4)
5630 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5631 size = sizeof (gpointer);
5632 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5635 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5637 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5638 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5639 ins->sreg1 = args [0]->dreg;
5640 ins->sreg2 = args [1]->dreg;
5641 ins->sreg3 = args [2]->dreg;
5642 ins->type = STACK_I4;
5643 MONO_ADD_INS (cfg->cbb, ins);
5644 cfg->has_atomic_cas_i4 = TRUE;
5645 } else if (size == 8) {
5646 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5648 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5649 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5650 ins->sreg1 = args [0]->dreg;
5651 ins->sreg2 = args [1]->dreg;
5652 ins->sreg3 = args [2]->dreg;
5653 ins->type = STACK_I8;
5654 MONO_ADD_INS (cfg->cbb, ins);
5656 /* g_assert_not_reached (); */
5658 if (cfg->gen_write_barriers && is_ref)
5659 emit_write_barrier (cfg, args [0], args [1]);
5661 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5663 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5664 ins = emit_memory_barrier (cfg, FullBarrier);
5668 } else if (cmethod->klass->image == mono_defaults.corlib) {
5669 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5670 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5671 if (should_insert_brekpoint (cfg->method)) {
5672 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5674 MONO_INST_NEW (cfg, ins, OP_NOP);
5675 MONO_ADD_INS (cfg->cbb, ins);
5679 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5680 && strcmp (cmethod->klass->name, "Environment") == 0) {
5682 EMIT_NEW_ICONST (cfg, ins, 1);
5684 EMIT_NEW_ICONST (cfg, ins, 0);
5688 } else if (cmethod->klass == mono_defaults.math_class) {
5690 * There is general branches code for Min/Max, but it does not work for
5692 * http://everything2.com/?node_id=1051618
5694 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5695 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5697 MonoJumpInfoToken *ji;
5700 cfg->disable_llvm = TRUE;
5702 if (args [0]->opcode == OP_GOT_ENTRY) {
5703 pi = args [0]->inst_p1;
5704 g_assert (pi->opcode == OP_PATCH_INFO);
5705 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5708 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5709 ji = args [0]->inst_p0;
5712 NULLIFY_INS (args [0]);
5715 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5716 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5717 ins->dreg = mono_alloc_ireg (cfg);
5719 ins->inst_p0 = mono_string_to_utf8 (s);
5720 MONO_ADD_INS (cfg->cbb, ins);
5725 #ifdef MONO_ARCH_SIMD_INTRINSICS
5726 if (cfg->opt & MONO_OPT_SIMD) {
5727 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5733 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5737 if (COMPILE_LLVM (cfg)) {
5738 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5743 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5747 * This entry point could be used later for arbitrary method
5750 inline static MonoInst*
5751 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5752 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5754 if (method->klass == mono_defaults.string_class) {
5755 /* managed string allocation support */
5756 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5757 MonoInst *iargs [2];
5758 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5759 MonoMethod *managed_alloc = NULL;
5761 g_assert (vtable); /*Should not fail since it System.String*/
5762 #ifndef MONO_CROSS_COMPILE
5763 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5767 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5768 iargs [1] = args [0];
5769 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5776 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5778 MonoInst *store, *temp;
5781 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5782 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5785 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5786 * would be different than the MonoInst's used to represent arguments, and
5787 * the ldelema implementation can't deal with that.
5788 * Solution: When ldelema is used on an inline argument, create a var for
5789 * it, emit ldelema on that var, and emit the saving code below in
5790 * inline_method () if needed.
5792 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5793 cfg->args [i] = temp;
5794 /* This uses cfg->args [i] which is set by the preceeding line */
5795 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5796 store->cil_code = sp [0]->cil_code;
5801 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5802 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5804 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5806 check_inline_called_method_name_limit (MonoMethod *called_method)
5809 static const char *limit = NULL;
5811 if (limit == NULL) {
5812 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5814 if (limit_string != NULL)
5815 limit = limit_string;
5820 if (limit [0] != '\0') {
5821 char *called_method_name = mono_method_full_name (called_method, TRUE);
5823 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5824 g_free (called_method_name);
5826 //return (strncmp_result <= 0);
5827 return (strncmp_result == 0);
5834 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5836 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5839 static const char *limit = NULL;
5841 if (limit == NULL) {
5842 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5843 if (limit_string != NULL) {
5844 limit = limit_string;
5850 if (limit [0] != '\0') {
5851 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5853 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5854 g_free (caller_method_name);
5856 //return (strncmp_result <= 0);
5857 return (strncmp_result == 0);
5865 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5867 static double r8_0 = 0.0;
5871 rtype = mini_replace_type (rtype);
5875 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5876 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5877 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5878 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5879 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5880 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5881 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5882 ins->type = STACK_R8;
5883 ins->inst_p0 = (void*)&r8_0;
5885 MONO_ADD_INS (cfg->cbb, ins);
5886 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5887 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5888 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5889 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5890 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5892 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5897 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5901 rtype = mini_replace_type (rtype);
5905 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5906 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5907 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5908 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5909 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5910 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5911 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5912 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5913 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5914 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5915 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5916 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5918 emit_init_rvar (cfg, dreg, rtype);
5922 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5924 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5926 MonoInst *var = cfg->locals [local];
5927 if (COMPILE_SOFT_FLOAT (cfg)) {
5929 int reg = alloc_dreg (cfg, var->type);
5930 emit_init_rvar (cfg, reg, type);
5931 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5934 emit_init_rvar (cfg, var->dreg, type);
5936 emit_dummy_init_rvar (cfg, var->dreg, type);
5941 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5942 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5944 MonoInst *ins, *rvar = NULL;
5945 MonoMethodHeader *cheader;
5946 MonoBasicBlock *ebblock, *sbblock;
5948 MonoMethod *prev_inlined_method;
5949 MonoInst **prev_locals, **prev_args;
5950 MonoType **prev_arg_types;
5951 guint prev_real_offset;
5952 GHashTable *prev_cbb_hash;
5953 MonoBasicBlock **prev_cil_offset_to_bb;
5954 MonoBasicBlock *prev_cbb;
5955 unsigned char* prev_cil_start;
5956 guint32 prev_cil_offset_to_bb_len;
5957 MonoMethod *prev_current_method;
5958 MonoGenericContext *prev_generic_context;
5959 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5961 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5963 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5964 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5967 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5968 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5972 if (cfg->verbose_level > 2)
5973 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5975 if (!cmethod->inline_info) {
5976 cfg->stat_inlineable_methods++;
5977 cmethod->inline_info = 1;
5980 /* allocate local variables */
5981 cheader = mono_method_get_header (cmethod);
5983 if (cheader == NULL || mono_loader_get_last_error ()) {
5984 MonoLoaderError *error = mono_loader_get_last_error ();
5987 mono_metadata_free_mh (cheader);
5988 if (inline_always && error)
5989 mono_cfg_set_exception (cfg, error->exception_type);
5991 mono_loader_clear_error ();
5995 /*Must verify before creating locals as it can cause the JIT to assert.*/
5996 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5997 mono_metadata_free_mh (cheader);
6001 /* allocate space to store the return value */
6002 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6003 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6006 prev_locals = cfg->locals;
6007 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6008 for (i = 0; i < cheader->num_locals; ++i)
6009 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6011 /* allocate start and end blocks */
6012 /* This is needed so if the inline is aborted, we can clean up */
6013 NEW_BBLOCK (cfg, sbblock);
6014 sbblock->real_offset = real_offset;
6016 NEW_BBLOCK (cfg, ebblock);
6017 ebblock->block_num = cfg->num_bblocks++;
6018 ebblock->real_offset = real_offset;
6020 prev_args = cfg->args;
6021 prev_arg_types = cfg->arg_types;
6022 prev_inlined_method = cfg->inlined_method;
6023 cfg->inlined_method = cmethod;
6024 cfg->ret_var_set = FALSE;
6025 cfg->inline_depth ++;
6026 prev_real_offset = cfg->real_offset;
6027 prev_cbb_hash = cfg->cbb_hash;
6028 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6029 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6030 prev_cil_start = cfg->cil_start;
6031 prev_cbb = cfg->cbb;
6032 prev_current_method = cfg->current_method;
6033 prev_generic_context = cfg->generic_context;
6034 prev_ret_var_set = cfg->ret_var_set;
6036 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6039 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
6041 ret_var_set = cfg->ret_var_set;
6043 cfg->inlined_method = prev_inlined_method;
6044 cfg->real_offset = prev_real_offset;
6045 cfg->cbb_hash = prev_cbb_hash;
6046 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6047 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6048 cfg->cil_start = prev_cil_start;
6049 cfg->locals = prev_locals;
6050 cfg->args = prev_args;
6051 cfg->arg_types = prev_arg_types;
6052 cfg->current_method = prev_current_method;
6053 cfg->generic_context = prev_generic_context;
6054 cfg->ret_var_set = prev_ret_var_set;
6055 cfg->inline_depth --;
6057 if ((costs >= 0 && costs < 60) || inline_always) {
6058 if (cfg->verbose_level > 2)
6059 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6061 cfg->stat_inlined_methods++;
6063 /* always add some code to avoid block split failures */
6064 MONO_INST_NEW (cfg, ins, OP_NOP);
6065 MONO_ADD_INS (prev_cbb, ins);
6067 prev_cbb->next_bb = sbblock;
6068 link_bblock (cfg, prev_cbb, sbblock);
6071 * Get rid of the begin and end bblocks if possible to aid local
6074 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6076 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6077 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6079 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6080 MonoBasicBlock *prev = ebblock->in_bb [0];
6081 mono_merge_basic_blocks (cfg, prev, ebblock);
6083 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6084 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6085 cfg->cbb = prev_cbb;
6089 * Its possible that the rvar is set in some prev bblock, but not in others.
6095 for (i = 0; i < ebblock->in_count; ++i) {
6096 bb = ebblock->in_bb [i];
6098 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6101 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6111 * If the inlined method contains only a throw, then the ret var is not
6112 * set, so set it to a dummy value.
6115 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6117 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6120 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6123 if (cfg->verbose_level > 2)
6124 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6125 cfg->exception_type = MONO_EXCEPTION_NONE;
6126 mono_loader_clear_error ();
6128 /* This gets rid of the newly added bblocks */
6129 cfg->cbb = prev_cbb;
6131 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6136 * Some of these comments may well be out-of-date.
6137 * Design decisions: we do a single pass over the IL code (and we do bblock
6138 * splitting/merging in the few cases when it's required: a back jump to an IL
6139 * address that was not already seen as bblock starting point).
6140 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6141 * Complex operations are decomposed in simpler ones right away. We need to let the
6142 * arch-specific code peek and poke inside this process somehow (except when the
6143 * optimizations can take advantage of the full semantic info of coarse opcodes).
6144 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6145 * MonoInst->opcode initially is the IL opcode or some simplification of that
6146 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6147 * opcode with value bigger than OP_LAST.
6148 * At this point the IR can be handed over to an interpreter, a dumb code generator
6149 * or to the optimizing code generator that will translate it to SSA form.
6151 * Profiling directed optimizations.
6152 * We may compile by default with few or no optimizations and instrument the code
6153 * or the user may indicate what methods to optimize the most either in a config file
6154 * or through repeated runs where the compiler applies offline the optimizations to
6155 * each method and then decides if it was worth it.
6158 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6159 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6160 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6161 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6162 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6163 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6164 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6165 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6167 /* offset from br.s -> br like opcodes */
6168 #define BIG_BRANCH_OFFSET 13
6171 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6173 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6175 return b == NULL || b == bb;
6179 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6181 unsigned char *ip = start;
6182 unsigned char *target;
6185 MonoBasicBlock *bblock;
6186 const MonoOpcode *opcode;
6189 cli_addr = ip - start;
6190 i = mono_opcode_value ((const guint8 **)&ip, end);
6193 opcode = &mono_opcodes [i];
6194 switch (opcode->argument) {
6195 case MonoInlineNone:
6198 case MonoInlineString:
6199 case MonoInlineType:
6200 case MonoInlineField:
6201 case MonoInlineMethod:
6204 case MonoShortInlineR:
6211 case MonoShortInlineVar:
6212 case MonoShortInlineI:
6215 case MonoShortInlineBrTarget:
6216 target = start + cli_addr + 2 + (signed char)ip [1];
6217 GET_BBLOCK (cfg, bblock, target);
6220 GET_BBLOCK (cfg, bblock, ip);
6222 case MonoInlineBrTarget:
6223 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6224 GET_BBLOCK (cfg, bblock, target);
6227 GET_BBLOCK (cfg, bblock, ip);
6229 case MonoInlineSwitch: {
6230 guint32 n = read32 (ip + 1);
6233 cli_addr += 5 + 4 * n;
6234 target = start + cli_addr;
6235 GET_BBLOCK (cfg, bblock, target);
6237 for (j = 0; j < n; ++j) {
6238 target = start + cli_addr + (gint32)read32 (ip);
6239 GET_BBLOCK (cfg, bblock, target);
6249 g_assert_not_reached ();
6252 if (i == CEE_THROW) {
6253 unsigned char *bb_start = ip - 1;
6255 /* Find the start of the bblock containing the throw */
6257 while ((bb_start >= start) && !bblock) {
6258 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6262 bblock->out_of_line = 1;
6272 static inline MonoMethod *
6273 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6277 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6278 method = mono_method_get_wrapper_data (m, token);
6280 method = mono_class_inflate_generic_method (method, context);
6282 method = mono_get_method_full (m->klass->image, token, klass, context);
6288 static inline MonoMethod *
6289 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6291 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6293 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6299 static inline MonoClass*
6300 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6304 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6305 klass = mono_method_get_wrapper_data (method, token);
6307 klass = mono_class_inflate_generic_class (klass, context);
6309 klass = mono_class_get_full (method->klass->image, token, context);
6312 mono_class_init (klass);
6316 static inline MonoMethodSignature*
6317 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6319 MonoMethodSignature *fsig;
6321 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6324 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6326 fsig = mono_inflate_generic_signature (fsig, context, &error);
6328 g_assert (mono_error_ok (&error));
6331 fsig = mono_metadata_parse_signature (method->klass->image, token);
6337 * Returns TRUE if the JIT should abort inlining because "callee"
6338 * is influenced by security attributes.
6341 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6345 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6349 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6350 if (result == MONO_JIT_SECURITY_OK)
6353 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6354 /* Generate code to throw a SecurityException before the actual call/link */
6355 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6358 NEW_ICONST (cfg, args [0], 4);
6359 NEW_METHODCONST (cfg, args [1], caller);
6360 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6361 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6362 /* don't hide previous results */
6363 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6364 cfg->exception_data = result;
6372 throw_exception (void)
6374 static MonoMethod *method = NULL;
6377 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6378 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6385 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6387 MonoMethod *thrower = throw_exception ();
6390 EMIT_NEW_PCONST (cfg, args [0], ex);
6391 mono_emit_method_call (cfg, thrower, args, NULL);
6395 * Return the original method is a wrapper is specified. We can only access
6396 * the custom attributes from the original method.
6399 get_original_method (MonoMethod *method)
6401 if (method->wrapper_type == MONO_WRAPPER_NONE)
6404 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6405 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6408 /* in other cases we need to find the original method */
6409 return mono_marshal_method_from_wrapper (method);
6413 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6414 MonoBasicBlock *bblock, unsigned char *ip)
6416 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6417 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6419 emit_throw_exception (cfg, ex);
6423 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6424 MonoBasicBlock *bblock, unsigned char *ip)
6426 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6427 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6429 emit_throw_exception (cfg, ex);
6433 * Check that the IL instructions at ip are the array initialization
6434 * sequence and return the pointer to the data and the size.
6437 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6440 * newarr[System.Int32]
6442 * ldtoken field valuetype ...
6443 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6445 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6446 guint32 token = read32 (ip + 7);
6447 guint32 field_token = read32 (ip + 2);
6448 guint32 field_index = field_token & 0xffffff;
6450 const char *data_ptr;
6452 MonoMethod *cmethod;
6453 MonoClass *dummy_class;
6454 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6460 *out_field_token = field_token;
6462 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6465 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6467 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6468 case MONO_TYPE_BOOLEAN:
6472 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6473 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6474 case MONO_TYPE_CHAR:
6491 if (size > mono_type_size (field->type, &dummy_align))
6494 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6495 if (!method->klass->image->dynamic) {
6496 field_index = read32 (ip + 2) & 0xffffff;
6497 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6498 data_ptr = mono_image_rva_map (method->klass->image, rva);
6499 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6500 /* for aot code we do the lookup on load */
6501 if (aot && data_ptr)
6502 return GUINT_TO_POINTER (rva);
6504 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6506 data_ptr = mono_field_get_data (field);
6514 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6516 char *method_fname = mono_method_full_name (method, TRUE);
6518 MonoMethodHeader *header = mono_method_get_header (method);
6520 if (header->code_size == 0)
6521 method_code = g_strdup ("method body is empty.");
6523 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6524 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6525 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6526 g_free (method_fname);
6527 g_free (method_code);
6528 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6532 set_exception_object (MonoCompile *cfg, MonoException *exception)
6534 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6535 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6536 cfg->exception_ptr = exception;
6540 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6543 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6544 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6545 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6546 /* Optimize reg-reg moves away */
6548 * Can't optimize other opcodes, since sp[0] might point to
6549 * the last ins of a decomposed opcode.
6551 sp [0]->dreg = (cfg)->locals [n]->dreg;
6553 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6558 * ldloca inhibits many optimizations so try to get rid of it in common
6561 static inline unsigned char *
6562 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6572 local = read16 (ip + 2);
6576 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6577 /* From the INITOBJ case */
6578 token = read32 (ip + 2);
6579 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6580 CHECK_TYPELOAD (klass);
6581 type = mini_replace_type (&klass->byval_arg);
6582 emit_init_local (cfg, local, type, TRUE);
6590 is_exception_class (MonoClass *class)
6593 if (class == mono_defaults.exception_class)
6595 class = class->parent;
6601 * is_jit_optimizer_disabled:
6603 * Determine whenever M's assembly has a DebuggableAttribute with the
6604 * IsJITOptimizerDisabled flag set.
6607 is_jit_optimizer_disabled (MonoMethod *m)
6609 MonoAssembly *ass = m->klass->image->assembly;
6610 MonoCustomAttrInfo* attrs;
6611 static MonoClass *klass;
6613 gboolean val = FALSE;
6616 if (ass->jit_optimizer_disabled_inited)
6617 return ass->jit_optimizer_disabled;
6620 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6623 ass->jit_optimizer_disabled = FALSE;
6624 mono_memory_barrier ();
6625 ass->jit_optimizer_disabled_inited = TRUE;
6629 attrs = mono_custom_attrs_from_assembly (ass);
6631 for (i = 0; i < attrs->num_attrs; ++i) {
6632 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6635 MonoMethodSignature *sig;
6637 if (!attr->ctor || attr->ctor->klass != klass)
6639 /* Decode the attribute. See reflection.c */
6640 len = attr->data_size;
6641 p = (const char*)attr->data;
6642 g_assert (read16 (p) == 0x0001);
6645 // FIXME: Support named parameters
6646 sig = mono_method_signature (attr->ctor);
6647 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6649 /* Two boolean arguments */
6653 mono_custom_attrs_free (attrs);
6656 ass->jit_optimizer_disabled = val;
6657 mono_memory_barrier ();
6658 ass->jit_optimizer_disabled_inited = TRUE;
6664 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6666 gboolean supported_tail_call;
6669 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6670 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6672 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6675 for (i = 0; i < fsig->param_count; ++i) {
6676 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6677 /* These can point to the current method's stack */
6678 supported_tail_call = FALSE;
6680 if (fsig->hasthis && cmethod->klass->valuetype)
6681 /* this might point to the current method's stack */
6682 supported_tail_call = FALSE;
6683 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6684 supported_tail_call = FALSE;
6685 if (cfg->method->save_lmf)
6686 supported_tail_call = FALSE;
6687 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6688 supported_tail_call = FALSE;
6689 if (call_opcode != CEE_CALL)
6690 supported_tail_call = FALSE;
6692 /* Debugging support */
6694 if (supported_tail_call) {
6695 if (!mono_debug_count ())
6696 supported_tail_call = FALSE;
6700 return supported_tail_call;
6703 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6704 * it to the thread local value based on the tls_offset field. Every other kind of access to
6705 * the field causes an assert.
6708 is_magic_tls_access (MonoClassField *field)
6710 if (strcmp (field->name, "tlsdata"))
6712 if (strcmp (field->parent->name, "ThreadLocal`1"))
6714 return field->parent->image == mono_defaults.corlib;
6717 /* emits the code needed to access a managed tls var (like ThreadStatic)
6718 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6719 * pointer for the current thread.
6720 * Returns the MonoInst* representing the address of the tls var.
6723 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6726 int static_data_reg, array_reg, dreg;
6727 int offset2_reg, idx_reg;
6728 // inlined access to the tls data
6729 // idx = (offset >> 24) - 1;
6730 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6731 static_data_reg = alloc_ireg (cfg);
6732 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6733 idx_reg = alloc_ireg (cfg);
6734 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6735 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6736 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6737 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6738 array_reg = alloc_ireg (cfg);
6739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6740 offset2_reg = alloc_ireg (cfg);
6741 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6742 dreg = alloc_ireg (cfg);
6743 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6748 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6749 * this address is cached per-method in cached_tls_addr.
6752 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6754 MonoInst *load, *addr, *temp, *store, *thread_ins;
6755 MonoClassField *offset_field;
6757 if (*cached_tls_addr) {
6758 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6761 thread_ins = mono_get_thread_intrinsic (cfg);
6762 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6764 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6766 MONO_ADD_INS (cfg->cbb, thread_ins);
6768 MonoMethod *thread_method;
6769 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6770 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6772 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6773 addr->klass = mono_class_from_mono_type (tls_field->type);
6774 addr->type = STACK_MP;
6775 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6776 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6778 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6783 * mono_method_to_ir:
6785 * Translate the .net IL into linear IR.
6788 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6789 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6790 guint inline_offset, gboolean is_virtual_call)
6793 MonoInst *ins, **sp, **stack_start;
6794 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6795 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6796 MonoMethod *cmethod, *method_definition;
6797 MonoInst **arg_array;
6798 MonoMethodHeader *header;
6800 guint32 token, ins_flag;
6802 MonoClass *constrained_call = NULL;
6803 unsigned char *ip, *end, *target, *err_pos;
6804 MonoMethodSignature *sig;
6805 MonoGenericContext *generic_context = NULL;
6806 MonoGenericContainer *generic_container = NULL;
6807 MonoType **param_types;
6808 int i, n, start_new_bblock, dreg;
6809 int num_calls = 0, inline_costs = 0;
6810 int breakpoint_id = 0;
6812 MonoBoolean security, pinvoke;
6813 MonoSecurityManager* secman = NULL;
6814 MonoDeclSecurityActions actions;
6815 GSList *class_inits = NULL;
6816 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6818 gboolean init_locals, seq_points, skip_dead_blocks;
6819 gboolean disable_inline, sym_seq_points = FALSE;
6820 MonoInst *cached_tls_addr = NULL;
6821 MonoDebugMethodInfo *minfo;
6822 MonoBitSet *seq_point_locs = NULL;
6823 MonoBitSet *seq_point_set_locs = NULL;
6825 disable_inline = is_jit_optimizer_disabled (method);
6827 /* serialization and xdomain stuff may need access to private fields and methods */
6828 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6829 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6830 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6831 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6832 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6833 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6835 dont_verify |= mono_security_smcs_hack_enabled ();
6837 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6838 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6839 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6840 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6841 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6843 image = method->klass->image;
6844 header = mono_method_get_header (method);
6846 MonoLoaderError *error;
6848 if ((error = mono_loader_get_last_error ())) {
6849 mono_cfg_set_exception (cfg, error->exception_type);
6851 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6852 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6854 goto exception_exit;
6856 generic_container = mono_method_get_generic_container (method);
6857 sig = mono_method_signature (method);
6858 num_args = sig->hasthis + sig->param_count;
6859 ip = (unsigned char*)header->code;
6860 cfg->cil_start = ip;
6861 end = ip + header->code_size;
6862 cfg->stat_cil_code_size += header->code_size;
6864 seq_points = cfg->gen_seq_points && cfg->method == method;
6865 #ifdef PLATFORM_ANDROID
6866 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6869 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6870 /* We could hit a seq point before attaching to the JIT (#8338) */
6874 if (cfg->gen_seq_points && cfg->method == method) {
6875 minfo = mono_debug_lookup_method (method);
6877 int i, n_il_offsets;
6881 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6882 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6883 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6884 sym_seq_points = TRUE;
6885 for (i = 0; i < n_il_offsets; ++i) {
6886 if (il_offsets [i] < header->code_size)
6887 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6889 g_free (il_offsets);
6890 g_free (line_numbers);
6895 * Methods without init_locals set could cause asserts in various passes
6896 * (#497220). To work around this, we emit dummy initialization opcodes
6897 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6898 * on some platforms.
6900 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6901 init_locals = header->init_locals;
6905 method_definition = method;
6906 while (method_definition->is_inflated) {
6907 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6908 method_definition = imethod->declaring;
6911 /* SkipVerification is not allowed if core-clr is enabled */
6912 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6914 dont_verify_stloc = TRUE;
6917 if (sig->is_inflated)
6918 generic_context = mono_method_get_context (method);
6919 else if (generic_container)
6920 generic_context = &generic_container->context;
6921 cfg->generic_context = generic_context;
6923 if (!cfg->generic_sharing_context)
6924 g_assert (!sig->has_type_parameters);
6926 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6927 g_assert (method->is_inflated);
6928 g_assert (mono_method_get_context (method)->method_inst);
6930 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6931 g_assert (sig->generic_param_count);
6933 if (cfg->method == method) {
6934 cfg->real_offset = 0;
6936 cfg->real_offset = inline_offset;
6939 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6940 cfg->cil_offset_to_bb_len = header->code_size;
6942 cfg->current_method = method;
6944 if (cfg->verbose_level > 2)
6945 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6947 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6949 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6950 for (n = 0; n < sig->param_count; ++n)
6951 param_types [n + sig->hasthis] = sig->params [n];
6952 cfg->arg_types = param_types;
6954 dont_inline = g_list_prepend (dont_inline, method);
6955 if (cfg->method == method) {
6957 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6958 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6961 NEW_BBLOCK (cfg, start_bblock);
6962 cfg->bb_entry = start_bblock;
6963 start_bblock->cil_code = NULL;
6964 start_bblock->cil_length = 0;
6965 #if defined(__native_client_codegen__)
6966 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6967 ins->dreg = alloc_dreg (cfg, STACK_I4);
6968 MONO_ADD_INS (start_bblock, ins);
6972 NEW_BBLOCK (cfg, end_bblock);
6973 cfg->bb_exit = end_bblock;
6974 end_bblock->cil_code = NULL;
6975 end_bblock->cil_length = 0;
6976 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6977 g_assert (cfg->num_bblocks == 2);
6979 arg_array = cfg->args;
6981 if (header->num_clauses) {
6982 cfg->spvars = g_hash_table_new (NULL, NULL);
6983 cfg->exvars = g_hash_table_new (NULL, NULL);
6985 /* handle exception clauses */
6986 for (i = 0; i < header->num_clauses; ++i) {
6987 MonoBasicBlock *try_bb;
6988 MonoExceptionClause *clause = &header->clauses [i];
6989 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6990 try_bb->real_offset = clause->try_offset;
6991 try_bb->try_start = TRUE;
6992 try_bb->region = ((i + 1) << 8) | clause->flags;
6993 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6994 tblock->real_offset = clause->handler_offset;
6995 tblock->flags |= BB_EXCEPTION_HANDLER;
6998 * Linking the try block with the EH block hinders inlining as we won't be able to
6999 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7001 if (COMPILE_LLVM (cfg))
7002 link_bblock (cfg, try_bb, tblock);
7004 if (*(ip + clause->handler_offset) == CEE_POP)
7005 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7007 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7008 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7009 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7010 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7011 MONO_ADD_INS (tblock, ins);
7013 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7014 /* finally clauses already have a seq point */
7015 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7016 MONO_ADD_INS (tblock, ins);
7019 /* todo: is a fault block unsafe to optimize? */
7020 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7021 tblock->flags |= BB_EXCEPTION_UNSAFE;
7025 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7027 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7029 /* catch and filter blocks get the exception object on the stack */
7030 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7031 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7032 MonoInst *dummy_use;
7034 /* mostly like handle_stack_args (), but just sets the input args */
7035 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7036 tblock->in_scount = 1;
7037 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7038 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7041 * Add a dummy use for the exvar so its liveness info will be
7045 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7047 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7048 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7049 tblock->flags |= BB_EXCEPTION_HANDLER;
7050 tblock->real_offset = clause->data.filter_offset;
7051 tblock->in_scount = 1;
7052 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7053 /* The filter block shares the exvar with the handler block */
7054 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7055 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7056 MONO_ADD_INS (tblock, ins);
7060 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7061 clause->data.catch_class &&
7062 cfg->generic_sharing_context &&
7063 mono_class_check_context_used (clause->data.catch_class)) {
7065 * In shared generic code with catch
7066 * clauses containing type variables
7067 * the exception handling code has to
7068 * be able to get to the rgctx.
7069 * Therefore we have to make sure that
7070 * the vtable/mrgctx argument (for
7071 * static or generic methods) or the
7072 * "this" argument (for non-static
7073 * methods) are live.
7075 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7076 mini_method_get_context (method)->method_inst ||
7077 method->klass->valuetype) {
7078 mono_get_vtable_var (cfg);
7080 MonoInst *dummy_use;
7082 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7087 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7088 cfg->cbb = start_bblock;
7089 cfg->args = arg_array;
7090 mono_save_args (cfg, sig, inline_args);
7093 /* FIRST CODE BLOCK */
7094 NEW_BBLOCK (cfg, bblock);
7095 bblock->cil_code = ip;
7099 ADD_BBLOCK (cfg, bblock);
7101 if (cfg->method == method) {
7102 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7103 if (breakpoint_id) {
7104 MONO_INST_NEW (cfg, ins, OP_BREAK);
7105 MONO_ADD_INS (bblock, ins);
7109 if (mono_security_cas_enabled ())
7110 secman = mono_security_manager_get_methods ();
7112 security = (secman && mono_security_method_has_declsec (method));
7113 /* at this point having security doesn't mean we have any code to generate */
7114 if (security && (cfg->method == method)) {
7115 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7116 * And we do not want to enter the next section (with allocation) if we
7117 * have nothing to generate */
7118 security = mono_declsec_get_demands (method, &actions);
7121 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7122 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7124 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7125 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7126 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7128 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7129 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7133 mono_custom_attrs_free (custom);
7136 custom = mono_custom_attrs_from_class (wrapped->klass);
7137 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7141 mono_custom_attrs_free (custom);
7144 /* not a P/Invoke after all */
7149 /* we use a separate basic block for the initialization code */
7150 NEW_BBLOCK (cfg, init_localsbb);
7151 cfg->bb_init = init_localsbb;
7152 init_localsbb->real_offset = cfg->real_offset;
7153 start_bblock->next_bb = init_localsbb;
7154 init_localsbb->next_bb = bblock;
7155 link_bblock (cfg, start_bblock, init_localsbb);
7156 link_bblock (cfg, init_localsbb, bblock);
7158 cfg->cbb = init_localsbb;
7160 if (cfg->gsharedvt && cfg->method == method) {
7161 MonoGSharedVtMethodInfo *info;
7162 MonoInst *var, *locals_var;
7165 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7166 info->method = cfg->method;
7167 info->count_entries = 16;
7168 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7169 cfg->gsharedvt_info = info;
7171 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7172 /* prevent it from being register allocated */
7173 //var->flags |= MONO_INST_VOLATILE;
7174 cfg->gsharedvt_info_var = var;
7176 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7177 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7179 /* Allocate locals */
7180 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7181 /* prevent it from being register allocated */
7182 //locals_var->flags |= MONO_INST_VOLATILE;
7183 cfg->gsharedvt_locals_var = locals_var;
7185 dreg = alloc_ireg (cfg);
7186 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7188 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7189 ins->dreg = locals_var->dreg;
7191 MONO_ADD_INS (cfg->cbb, ins);
7192 cfg->gsharedvt_locals_var_ins = ins;
7194 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7197 ins->flags |= MONO_INST_INIT;
7201 /* at this point we know, if security is TRUE, that some code needs to be generated */
7202 if (security && (cfg->method == method)) {
7205 cfg->stat_cas_demand_generation++;
7207 if (actions.demand.blob) {
7208 /* Add code for SecurityAction.Demand */
7209 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7210 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7211 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7212 mono_emit_method_call (cfg, secman->demand, args, NULL);
7214 if (actions.noncasdemand.blob) {
7215 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7216 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7217 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7218 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7219 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7220 mono_emit_method_call (cfg, secman->demand, args, NULL);
7222 if (actions.demandchoice.blob) {
7223 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7224 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7225 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7226 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7227 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7231 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7233 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7236 if (mono_security_core_clr_enabled ()) {
7237 /* check if this is native code, e.g. an icall or a p/invoke */
7238 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7239 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7241 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7242 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7244 /* if this ia a native call then it can only be JITted from platform code */
7245 if ((icall || pinvk) && method->klass && method->klass->image) {
7246 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7247 MonoException *ex = icall ? mono_get_exception_security () :
7248 mono_get_exception_method_access ();
7249 emit_throw_exception (cfg, ex);
7256 CHECK_CFG_EXCEPTION;
7258 if (header->code_size == 0)
7261 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7266 if (cfg->method == method)
7267 mono_debug_init_method (cfg, bblock, breakpoint_id);
7269 for (n = 0; n < header->num_locals; ++n) {
7270 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7275 /* We force the vtable variable here for all shared methods
7276 for the possibility that they might show up in a stack
7277 trace where their exact instantiation is needed. */
7278 if (cfg->generic_sharing_context && method == cfg->method) {
7279 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7280 mini_method_get_context (method)->method_inst ||
7281 method->klass->valuetype) {
7282 mono_get_vtable_var (cfg);
7284 /* FIXME: Is there a better way to do this?
7285 We need the variable live for the duration
7286 of the whole method. */
7287 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7291 /* add a check for this != NULL to inlined methods */
7292 if (is_virtual_call) {
7295 NEW_ARGLOAD (cfg, arg_ins, 0);
7296 MONO_ADD_INS (cfg->cbb, arg_ins);
7297 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7300 skip_dead_blocks = !dont_verify;
7301 if (skip_dead_blocks) {
7302 original_bb = bb = mono_basic_block_split (method, &error);
7303 if (!mono_error_ok (&error)) {
7304 mono_error_cleanup (&error);
7310 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7311 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7314 start_new_bblock = 0;
7317 if (cfg->method == method)
7318 cfg->real_offset = ip - header->code;
7320 cfg->real_offset = inline_offset;
7325 if (start_new_bblock) {
7326 bblock->cil_length = ip - bblock->cil_code;
7327 if (start_new_bblock == 2) {
7328 g_assert (ip == tblock->cil_code);
7330 GET_BBLOCK (cfg, tblock, ip);
7332 bblock->next_bb = tblock;
7335 start_new_bblock = 0;
7336 for (i = 0; i < bblock->in_scount; ++i) {
7337 if (cfg->verbose_level > 3)
7338 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7339 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7343 g_slist_free (class_inits);
7346 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7347 link_bblock (cfg, bblock, tblock);
7348 if (sp != stack_start) {
7349 handle_stack_args (cfg, stack_start, sp - stack_start);
7351 CHECK_UNVERIFIABLE (cfg);
7353 bblock->next_bb = tblock;
7356 for (i = 0; i < bblock->in_scount; ++i) {
7357 if (cfg->verbose_level > 3)
7358 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7359 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7362 g_slist_free (class_inits);
7367 if (skip_dead_blocks) {
7368 int ip_offset = ip - header->code;
7370 if (ip_offset == bb->end)
7374 int op_size = mono_opcode_size (ip, end);
7375 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7377 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7379 if (ip_offset + op_size == bb->end) {
7380 MONO_INST_NEW (cfg, ins, OP_NOP);
7381 MONO_ADD_INS (bblock, ins);
7382 start_new_bblock = 1;
7390 * Sequence points are points where the debugger can place a breakpoint.
7391 * Currently, we generate these automatically at points where the IL
7394 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7396 * Make methods interruptable at the beginning, and at the targets of
7397 * backward branches.
7398 * Also, do this at the start of every bblock in methods with clauses too,
7399 * to be able to handle instructions with inprecise control flow like
7401 * Backward branches are handled at the end of method-to-ir ().
7403 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7405 /* Avoid sequence points on empty IL like .volatile */
7406 // FIXME: Enable this
7407 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7408 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7409 if (sp != stack_start)
7410 ins->flags |= MONO_INST_NONEMPTY_STACK;
7411 MONO_ADD_INS (cfg->cbb, ins);
7414 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7417 bblock->real_offset = cfg->real_offset;
7419 if ((cfg->method == method) && cfg->coverage_info) {
7420 guint32 cil_offset = ip - header->code;
7421 cfg->coverage_info->data [cil_offset].cil_code = ip;
7423 /* TODO: Use an increment here */
7424 #if defined(TARGET_X86)
7425 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7426 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7428 MONO_ADD_INS (cfg->cbb, ins);
7430 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7431 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7435 if (cfg->verbose_level > 3)
7436 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7440 if (seq_points && !sym_seq_points && sp != stack_start) {
7442 * The C# compiler uses these nops to notify the JIT that it should
7443 * insert seq points.
7445 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7446 MONO_ADD_INS (cfg->cbb, ins);
7448 if (cfg->keep_cil_nops)
7449 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7451 MONO_INST_NEW (cfg, ins, OP_NOP);
7453 MONO_ADD_INS (bblock, ins);
7456 if (should_insert_brekpoint (cfg->method)) {
7457 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7459 MONO_INST_NEW (cfg, ins, OP_NOP);
7462 MONO_ADD_INS (bblock, ins);
7468 CHECK_STACK_OVF (1);
7469 n = (*ip)-CEE_LDARG_0;
7471 EMIT_NEW_ARGLOAD (cfg, ins, n);
7479 CHECK_STACK_OVF (1);
7480 n = (*ip)-CEE_LDLOC_0;
7482 EMIT_NEW_LOCLOAD (cfg, ins, n);
7491 n = (*ip)-CEE_STLOC_0;
7494 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7496 emit_stloc_ir (cfg, sp, header, n);
7503 CHECK_STACK_OVF (1);
7506 EMIT_NEW_ARGLOAD (cfg, ins, n);
7512 CHECK_STACK_OVF (1);
7515 NEW_ARGLOADA (cfg, ins, n);
7516 MONO_ADD_INS (cfg->cbb, ins);
7526 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7528 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7533 CHECK_STACK_OVF (1);
7536 EMIT_NEW_LOCLOAD (cfg, ins, n);
7540 case CEE_LDLOCA_S: {
7541 unsigned char *tmp_ip;
7543 CHECK_STACK_OVF (1);
7544 CHECK_LOCAL (ip [1]);
7546 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7552 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7561 CHECK_LOCAL (ip [1]);
7562 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7564 emit_stloc_ir (cfg, sp, header, ip [1]);
7569 CHECK_STACK_OVF (1);
7570 EMIT_NEW_PCONST (cfg, ins, NULL);
7571 ins->type = STACK_OBJ;
7576 CHECK_STACK_OVF (1);
7577 EMIT_NEW_ICONST (cfg, ins, -1);
7590 CHECK_STACK_OVF (1);
7591 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7597 CHECK_STACK_OVF (1);
7599 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7605 CHECK_STACK_OVF (1);
7606 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7612 CHECK_STACK_OVF (1);
7613 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7614 ins->type = STACK_I8;
7615 ins->dreg = alloc_dreg (cfg, STACK_I8);
7617 ins->inst_l = (gint64)read64 (ip);
7618 MONO_ADD_INS (bblock, ins);
7624 gboolean use_aotconst = FALSE;
7626 #ifdef TARGET_POWERPC
7627 /* FIXME: Clean this up */
7628 if (cfg->compile_aot)
7629 use_aotconst = TRUE;
7632 /* FIXME: we should really allocate this only late in the compilation process */
7633 f = mono_domain_alloc (cfg->domain, sizeof (float));
7635 CHECK_STACK_OVF (1);
7641 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7643 dreg = alloc_freg (cfg);
7644 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7645 ins->type = STACK_R8;
7647 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7648 ins->type = STACK_R8;
7649 ins->dreg = alloc_dreg (cfg, STACK_R8);
7651 MONO_ADD_INS (bblock, ins);
7661 gboolean use_aotconst = FALSE;
7663 #ifdef TARGET_POWERPC
7664 /* FIXME: Clean this up */
7665 if (cfg->compile_aot)
7666 use_aotconst = TRUE;
7669 /* FIXME: we should really allocate this only late in the compilation process */
7670 d = mono_domain_alloc (cfg->domain, sizeof (double));
7672 CHECK_STACK_OVF (1);
7678 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7680 dreg = alloc_freg (cfg);
7681 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7682 ins->type = STACK_R8;
7684 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7685 ins->type = STACK_R8;
7686 ins->dreg = alloc_dreg (cfg, STACK_R8);
7688 MONO_ADD_INS (bblock, ins);
7697 MonoInst *temp, *store;
7699 CHECK_STACK_OVF (1);
7703 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7704 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7706 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7709 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7722 if (sp [0]->type == STACK_R8)
7723 /* we need to pop the value from the x86 FP stack */
7724 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7730 INLINE_FAILURE ("jmp");
7731 GSHAREDVT_FAILURE (*ip);
7734 if (stack_start != sp)
7736 token = read32 (ip + 1);
7737 /* FIXME: check the signature matches */
7738 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7740 if (!cmethod || mono_loader_get_last_error ())
7743 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7744 GENERIC_SHARING_FAILURE (CEE_JMP);
7746 if (mono_security_cas_enabled ())
7747 CHECK_CFG_EXCEPTION;
7749 if (ARCH_HAVE_OP_TAIL_CALL) {
7750 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7753 /* Handle tail calls similarly to calls */
7754 n = fsig->param_count + fsig->hasthis;
7758 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7759 call->method = cmethod;
7760 call->tail_call = TRUE;
7761 call->signature = mono_method_signature (cmethod);
7762 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7763 call->inst.inst_p0 = cmethod;
7764 for (i = 0; i < n; ++i)
7765 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7767 mono_arch_emit_call (cfg, call);
7768 MONO_ADD_INS (bblock, (MonoInst*)call);
7770 for (i = 0; i < num_args; ++i)
7771 /* Prevent arguments from being optimized away */
7772 arg_array [i]->flags |= MONO_INST_VOLATILE;
7774 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7775 ins = (MonoInst*)call;
7776 ins->inst_p0 = cmethod;
7777 MONO_ADD_INS (bblock, ins);
7781 start_new_bblock = 1;
7786 case CEE_CALLVIRT: {
7787 MonoInst *addr = NULL;
7788 MonoMethodSignature *fsig = NULL;
7790 int virtual = *ip == CEE_CALLVIRT;
7791 int calli = *ip == CEE_CALLI;
7792 gboolean pass_imt_from_rgctx = FALSE;
7793 MonoInst *imt_arg = NULL;
7794 MonoInst *keep_this_alive = NULL;
7795 gboolean pass_vtable = FALSE;
7796 gboolean pass_mrgctx = FALSE;
7797 MonoInst *vtable_arg = NULL;
7798 gboolean check_this = FALSE;
7799 gboolean supported_tail_call = FALSE;
7800 gboolean tail_call = FALSE;
7801 gboolean need_seq_point = FALSE;
7802 guint32 call_opcode = *ip;
7803 gboolean emit_widen = TRUE;
7804 gboolean push_res = TRUE;
7805 gboolean skip_ret = FALSE;
7806 gboolean delegate_invoke = FALSE;
7809 token = read32 (ip + 1);
7814 //GSHAREDVT_FAILURE (*ip);
7819 fsig = mini_get_signature (method, token, generic_context);
7820 n = fsig->param_count + fsig->hasthis;
7822 if (method->dynamic && fsig->pinvoke) {
7826 * This is a call through a function pointer using a pinvoke
7827 * signature. Have to create a wrapper and call that instead.
7828 * FIXME: This is very slow, need to create a wrapper at JIT time
7829 * instead based on the signature.
7831 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7832 EMIT_NEW_PCONST (cfg, args [1], fsig);
7834 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7837 MonoMethod *cil_method;
7839 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7840 cil_method = cmethod;
7842 if (constrained_call) {
7843 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7844 if (cfg->verbose_level > 2)
7845 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7846 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7847 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7848 cfg->generic_sharing_context)) {
7849 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7852 if (cfg->verbose_level > 2)
7853 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7855 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7857 * This is needed since get_method_constrained can't find
7858 * the method in klass representing a type var.
7859 * The type var is guaranteed to be a reference type in this
7862 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7863 g_assert (!cmethod->klass->valuetype);
7865 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7870 if (!cmethod || mono_loader_get_last_error ())
7872 if (!dont_verify && !cfg->skip_visibility) {
7873 MonoMethod *target_method = cil_method;
7874 if (method->is_inflated) {
7875 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7877 if (!mono_method_can_access_method (method_definition, target_method) &&
7878 !mono_method_can_access_method (method, cil_method))
7879 METHOD_ACCESS_FAILURE;
7882 if (mono_security_core_clr_enabled ())
7883 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7885 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7886 /* MS.NET seems to silently convert this to a callvirt */
7891 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7892 * converts to a callvirt.
7894 * tests/bug-515884.il is an example of this behavior
7896 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7897 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7898 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7902 if (!cmethod->klass->inited)
7903 if (!mono_class_init (cmethod->klass))
7904 TYPE_LOAD_ERROR (cmethod->klass);
7906 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7907 mini_class_is_system_array (cmethod->klass)) {
7908 array_rank = cmethod->klass->rank;
7909 fsig = mono_method_signature (cmethod);
7911 fsig = mono_method_signature (cmethod);
7916 if (fsig->pinvoke) {
7917 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7918 check_for_pending_exc, cfg->compile_aot);
7919 fsig = mono_method_signature (wrapper);
7920 } else if (constrained_call) {
7921 fsig = mono_method_signature (cmethod);
7923 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7927 mono_save_token_info (cfg, image, token, cil_method);
7929 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7931 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7932 * foo (bar (), baz ())
7933 * works correctly. MS does this also:
7934 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7935 * The problem with this approach is that the debugger will stop after all calls returning a value,
7936 * even for simple cases, like:
7939 /* Special case a few common successor opcodes */
7940 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7941 need_seq_point = TRUE;
7944 n = fsig->param_count + fsig->hasthis;
7946 /* Don't support calls made using type arguments for now */
7948 if (cfg->gsharedvt) {
7949 if (mini_is_gsharedvt_signature (cfg, fsig))
7950 GSHAREDVT_FAILURE (*ip);
7954 if (mono_security_cas_enabled ()) {
7955 if (check_linkdemand (cfg, method, cmethod))
7956 INLINE_FAILURE ("linkdemand");
7957 CHECK_CFG_EXCEPTION;
7960 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7961 g_assert_not_reached ();
7964 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7967 if (!cfg->generic_sharing_context && cmethod)
7968 g_assert (!mono_method_check_context_used (cmethod));
7972 //g_assert (!virtual || fsig->hasthis);
7976 if (constrained_call) {
7977 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7979 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7981 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7982 /* The 'Own method' case below */
7983 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7984 /* 'The type parameter is instantiated as a reference type' case below. */
7985 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7986 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7987 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7988 MonoInst *args [16];
7991 * This case handles calls to
7992 * - object:ToString()/Equals()/GetHashCode(),
7993 * - System.IComparable<T>:CompareTo()
7994 * - System.IEquatable<T>:Equals ()
7995 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7999 if (mono_method_check_context_used (cmethod))
8000 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8002 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8003 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8005 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8006 if (fsig->hasthis && fsig->param_count) {
8007 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8008 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8009 ins->dreg = alloc_preg (cfg);
8010 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8011 MONO_ADD_INS (cfg->cbb, ins);
8014 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8017 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8019 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8020 addr_reg = ins->dreg;
8021 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8023 EMIT_NEW_ICONST (cfg, args [3], 0);
8024 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8027 EMIT_NEW_ICONST (cfg, args [3], 0);
8028 EMIT_NEW_ICONST (cfg, args [4], 0);
8030 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8033 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8034 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8035 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
8039 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8040 MONO_ADD_INS (cfg->cbb, add);
8042 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8043 MONO_ADD_INS (cfg->cbb, ins);
8044 /* ins represents the call result */
8049 GSHAREDVT_FAILURE (*ip);
8053 * We have the `constrained.' prefix opcode.
8055 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8057 * The type parameter is instantiated as a valuetype,
8058 * but that type doesn't override the method we're
8059 * calling, so we need to box `this'.
8061 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8062 ins->klass = constrained_call;
8063 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8064 CHECK_CFG_EXCEPTION;
8065 } else if (!constrained_call->valuetype) {
8066 int dreg = alloc_ireg_ref (cfg);
8069 * The type parameter is instantiated as a reference
8070 * type. We have a managed pointer on the stack, so
8071 * we need to dereference it here.
8073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8074 ins->type = STACK_OBJ;
8077 if (cmethod->klass->valuetype) {
8080 /* Interface method */
8083 mono_class_setup_vtable (constrained_call);
8084 CHECK_TYPELOAD (constrained_call);
8085 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8087 TYPE_LOAD_ERROR (constrained_call);
8088 slot = mono_method_get_vtable_slot (cmethod);
8090 TYPE_LOAD_ERROR (cmethod->klass);
8091 cmethod = constrained_call->vtable [ioffset + slot];
8093 if (cmethod->klass == mono_defaults.enum_class) {
8094 /* Enum implements some interfaces, so treat this as the first case */
8095 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8096 ins->klass = constrained_call;
8097 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8098 CHECK_CFG_EXCEPTION;
8103 constrained_call = NULL;
8106 if (!calli && check_call_signature (cfg, fsig, sp))
8109 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8110 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8111 delegate_invoke = TRUE;
8114 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8116 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8117 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8125 * If the callee is a shared method, then its static cctor
8126 * might not get called after the call was patched.
8128 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8129 emit_generic_class_init (cfg, cmethod->klass);
8130 CHECK_TYPELOAD (cmethod->klass);
8134 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8136 if (cfg->generic_sharing_context && cmethod) {
8137 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8139 context_used = mini_method_check_context_used (cfg, cmethod);
8141 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8142 /* Generic method interface
8143 calls are resolved via a
8144 helper function and don't
8146 if (!cmethod_context || !cmethod_context->method_inst)
8147 pass_imt_from_rgctx = TRUE;
8151 * If a shared method calls another
8152 * shared method then the caller must
8153 * have a generic sharing context
8154 * because the magic trampoline
8155 * requires it. FIXME: We shouldn't
8156 * have to force the vtable/mrgctx
8157 * variable here. Instead there
8158 * should be a flag in the cfg to
8159 * request a generic sharing context.
8162 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8163 mono_get_vtable_var (cfg);
8168 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8170 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8172 CHECK_TYPELOAD (cmethod->klass);
8173 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8178 g_assert (!vtable_arg);
8180 if (!cfg->compile_aot) {
8182 * emit_get_rgctx_method () calls mono_class_vtable () so check
8183 * for type load errors before.
8185 mono_class_setup_vtable (cmethod->klass);
8186 CHECK_TYPELOAD (cmethod->klass);
8189 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8191 /* !marshalbyref is needed to properly handle generic methods + remoting */
8192 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8193 MONO_METHOD_IS_FINAL (cmethod)) &&
8194 !mono_class_is_marshalbyref (cmethod->klass)) {
8201 if (pass_imt_from_rgctx) {
8202 g_assert (!pass_vtable);
8205 imt_arg = emit_get_rgctx_method (cfg, context_used,
8206 cmethod, MONO_RGCTX_INFO_METHOD);
8210 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8212 /* Calling virtual generic methods */
8213 if (cmethod && virtual &&
8214 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8215 !(MONO_METHOD_IS_FINAL (cmethod) &&
8216 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8217 fsig->generic_param_count &&
8218 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8219 MonoInst *this_temp, *this_arg_temp, *store;
8220 MonoInst *iargs [4];
8221 gboolean use_imt = FALSE;
8223 g_assert (fsig->is_inflated);
8225 /* Prevent inlining of methods that contain indirect calls */
8226 INLINE_FAILURE ("virtual generic call");
8228 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8229 GSHAREDVT_FAILURE (*ip);
8231 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8232 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8237 g_assert (!imt_arg);
8239 g_assert (cmethod->is_inflated);
8240 imt_arg = emit_get_rgctx_method (cfg, context_used,
8241 cmethod, MONO_RGCTX_INFO_METHOD);
8242 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8244 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8245 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8246 MONO_ADD_INS (bblock, store);
8248 /* FIXME: This should be a managed pointer */
8249 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8251 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8252 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8253 cmethod, MONO_RGCTX_INFO_METHOD);
8254 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8255 addr = mono_emit_jit_icall (cfg,
8256 mono_helper_compile_generic_method, iargs);
8258 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8260 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8267 * Implement a workaround for the inherent races involved in locking:
8273 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8274 * try block, the Exit () won't be executed, see:
8275 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8276 * To work around this, we extend such try blocks to include the last x bytes
8277 * of the Monitor.Enter () call.
8279 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8280 MonoBasicBlock *tbb;
8282 GET_BBLOCK (cfg, tbb, ip + 5);
8284 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8285 * from Monitor.Enter like ArgumentNullException.
8287 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8288 /* Mark this bblock as needing to be extended */
8289 tbb->extend_try_block = TRUE;
8293 /* Conversion to a JIT intrinsic */
8294 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8296 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8297 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8304 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8305 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8306 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8307 !g_list_find (dont_inline, cmethod)) {
8309 gboolean always = FALSE;
8311 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8312 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8313 /* Prevent inlining of methods that call wrappers */
8314 INLINE_FAILURE ("wrapper call");
8315 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8319 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8321 cfg->real_offset += 5;
8324 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8325 /* *sp is already set by inline_method */
8330 inline_costs += costs;
8336 /* Tail recursion elimination */
8337 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8338 gboolean has_vtargs = FALSE;
8341 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8342 INLINE_FAILURE ("tail call");
8344 /* keep it simple */
8345 for (i = fsig->param_count - 1; i >= 0; i--) {
8346 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8351 for (i = 0; i < n; ++i)
8352 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8353 MONO_INST_NEW (cfg, ins, OP_BR);
8354 MONO_ADD_INS (bblock, ins);
8355 tblock = start_bblock->out_bb [0];
8356 link_bblock (cfg, bblock, tblock);
8357 ins->inst_target_bb = tblock;
8358 start_new_bblock = 1;
8360 /* skip the CEE_RET, too */
8361 if (ip_in_bb (cfg, bblock, ip + 5))
8368 inline_costs += 10 * num_calls++;
8371 * Making generic calls out of gsharedvt methods.
8373 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8374 MonoRgctxInfoType info_type;
8377 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8378 //GSHAREDVT_FAILURE (*ip);
8379 // disable for possible remoting calls
8380 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8381 GSHAREDVT_FAILURE (*ip);
8382 if (fsig->generic_param_count) {
8383 /* virtual generic call */
8384 g_assert (mono_use_imt);
8385 g_assert (!imt_arg);
8386 /* Same as the virtual generic case above */
8387 imt_arg = emit_get_rgctx_method (cfg, context_used,
8388 cmethod, MONO_RGCTX_INFO_METHOD);
8389 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8394 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8395 /* test_0_multi_dim_arrays () in gshared.cs */
8396 GSHAREDVT_FAILURE (*ip);
8398 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8399 keep_this_alive = sp [0];
8401 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8402 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8404 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8405 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8407 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8409 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8411 * We pass the address to the gsharedvt trampoline in the rgctx reg
8413 MonoInst *callee = addr;
8415 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8417 GSHAREDVT_FAILURE (*ip);
8419 addr = emit_get_rgctx_sig (cfg, context_used,
8420 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8421 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8425 /* Generic sharing */
8426 /* FIXME: only do this for generic methods if
8427 they are not shared! */
8428 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8429 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8430 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8431 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8432 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8433 INLINE_FAILURE ("gshared");
8435 g_assert (cfg->generic_sharing_context && cmethod);
8439 * We are compiling a call to a
8440 * generic method from shared code,
8441 * which means that we have to look up
8442 * the method in the rgctx and do an
8446 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8448 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8449 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8453 /* Indirect calls */
8455 if (call_opcode == CEE_CALL)
8456 g_assert (context_used);
8457 else if (call_opcode == CEE_CALLI)
8458 g_assert (!vtable_arg);
8460 /* FIXME: what the hell is this??? */
8461 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8462 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8464 /* Prevent inlining of methods with indirect calls */
8465 INLINE_FAILURE ("indirect call");
8467 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8472 * Instead of emitting an indirect call, emit a direct call
8473 * with the contents of the aotconst as the patch info.
8475 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8476 info_type = addr->inst_c1;
8477 info_data = addr->inst_p0;
8479 info_type = addr->inst_right->inst_c1;
8480 info_data = addr->inst_right->inst_left;
8483 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8484 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8489 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8497 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8498 MonoInst *val = sp [fsig->param_count];
8500 if (val->type == STACK_OBJ) {
8501 MonoInst *iargs [2];
8506 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8509 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8510 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8511 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8512 emit_write_barrier (cfg, addr, val);
8513 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8514 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8516 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8517 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8518 if (!cmethod->klass->element_class->valuetype && !readonly)
8519 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8520 CHECK_TYPELOAD (cmethod->klass);
8523 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8526 g_assert_not_reached ();
8533 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8537 /* Tail prefix / tail call optimization */
8539 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8540 /* FIXME: runtime generic context pointer for jumps? */
8541 /* FIXME: handle this for generic sharing eventually */
8542 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8543 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8544 supported_tail_call = TRUE;
8546 if (supported_tail_call) {
8549 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8550 INLINE_FAILURE ("tail call");
8552 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8554 if (ARCH_HAVE_OP_TAIL_CALL) {
8555 /* Handle tail calls similarly to normal calls */
8558 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8559 call->tail_call = TRUE;
8560 call->method = cmethod;
8561 call->signature = mono_method_signature (cmethod);
8564 * We implement tail calls by storing the actual arguments into the
8565 * argument variables, then emitting a CEE_JMP.
8567 for (i = 0; i < n; ++i) {
8568 /* Prevent argument from being register allocated */
8569 arg_array [i]->flags |= MONO_INST_VOLATILE;
8570 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8572 ins = (MonoInst*)call;
8573 ins->inst_p0 = cmethod;
8574 ins->inst_p1 = arg_array [0];
8575 MONO_ADD_INS (bblock, ins);
8576 link_bblock (cfg, bblock, end_bblock);
8577 start_new_bblock = 1;
8579 // FIXME: Eliminate unreachable epilogs
8582 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8583 * only reachable from this call.
8585 GET_BBLOCK (cfg, tblock, ip + 5);
8586 if (tblock == bblock || tblock->in_count == 0)
8595 * Synchronized wrappers.
8596 * Its hard to determine where to replace a method with its synchronized
8597 * wrapper without causing an infinite recursion. The current solution is
8598 * to add the synchronized wrapper in the trampolines, and to
8599 * change the called method to a dummy wrapper, and resolve that wrapper
8600 * to the real method in mono_jit_compile_method ().
8602 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8603 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8604 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8605 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8609 INLINE_FAILURE ("call");
8610 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8611 imt_arg, vtable_arg);
8614 link_bblock (cfg, bblock, end_bblock);
8615 start_new_bblock = 1;
8617 // FIXME: Eliminate unreachable epilogs
8620 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8621 * only reachable from this call.
8623 GET_BBLOCK (cfg, tblock, ip + 5);
8624 if (tblock == bblock || tblock->in_count == 0)
8631 /* End of call, INS should contain the result of the call, if any */
8633 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8636 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8641 if (keep_this_alive) {
8642 MonoInst *dummy_use;
8644 /* See mono_emit_method_call_full () */
8645 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8648 CHECK_CFG_EXCEPTION;
8652 g_assert (*ip == CEE_RET);
8656 constrained_call = NULL;
8658 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8662 if (cfg->method != method) {
8663 /* return from inlined method */
8665 * If in_count == 0, that means the ret is unreachable due to
8666 * being preceeded by a throw. In that case, inline_method () will
8667 * handle setting the return value
8668 * (test case: test_0_inline_throw ()).
8670 if (return_var && cfg->cbb->in_count) {
8671 MonoType *ret_type = mono_method_signature (method)->ret;
8677 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8680 //g_assert (returnvar != -1);
8681 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8682 cfg->ret_var_set = TRUE;
8685 if (cfg->lmf_var && cfg->cbb->in_count)
8689 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8691 if (seq_points && !sym_seq_points) {
8693 * Place a seq point here too even through the IL stack is not
8694 * empty, so a step over on
8697 * will work correctly.
8699 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8700 MONO_ADD_INS (cfg->cbb, ins);
8703 g_assert (!return_var);
8707 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8710 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8713 if (!cfg->vret_addr) {
8716 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8718 EMIT_NEW_RETLOADA (cfg, ret_addr);
8720 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8721 ins->klass = mono_class_from_mono_type (ret_type);
8724 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8725 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8726 MonoInst *iargs [1];
8730 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8731 mono_arch_emit_setret (cfg, method, conv);
8733 mono_arch_emit_setret (cfg, method, *sp);
8736 mono_arch_emit_setret (cfg, method, *sp);
8741 if (sp != stack_start)
8743 MONO_INST_NEW (cfg, ins, OP_BR);
8745 ins->inst_target_bb = end_bblock;
8746 MONO_ADD_INS (bblock, ins);
8747 link_bblock (cfg, bblock, end_bblock);
8748 start_new_bblock = 1;
8752 MONO_INST_NEW (cfg, ins, OP_BR);
8754 target = ip + 1 + (signed char)(*ip);
8756 GET_BBLOCK (cfg, tblock, target);
8757 link_bblock (cfg, bblock, tblock);
8758 ins->inst_target_bb = tblock;
8759 if (sp != stack_start) {
8760 handle_stack_args (cfg, stack_start, sp - stack_start);
8762 CHECK_UNVERIFIABLE (cfg);
8764 MONO_ADD_INS (bblock, ins);
8765 start_new_bblock = 1;
8766 inline_costs += BRANCH_COST;
8780 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8782 target = ip + 1 + *(signed char*)ip;
8788 inline_costs += BRANCH_COST;
8792 MONO_INST_NEW (cfg, ins, OP_BR);
8795 target = ip + 4 + (gint32)read32(ip);
8797 GET_BBLOCK (cfg, tblock, target);
8798 link_bblock (cfg, bblock, tblock);
8799 ins->inst_target_bb = tblock;
8800 if (sp != stack_start) {
8801 handle_stack_args (cfg, stack_start, sp - stack_start);
8803 CHECK_UNVERIFIABLE (cfg);
8806 MONO_ADD_INS (bblock, ins);
8808 start_new_bblock = 1;
8809 inline_costs += BRANCH_COST;
8816 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8817 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8818 guint32 opsize = is_short ? 1 : 4;
8820 CHECK_OPSIZE (opsize);
8822 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8825 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8830 GET_BBLOCK (cfg, tblock, target);
8831 link_bblock (cfg, bblock, tblock);
8832 GET_BBLOCK (cfg, tblock, ip);
8833 link_bblock (cfg, bblock, tblock);
8835 if (sp != stack_start) {
8836 handle_stack_args (cfg, stack_start, sp - stack_start);
8837 CHECK_UNVERIFIABLE (cfg);
8840 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8841 cmp->sreg1 = sp [0]->dreg;
8842 type_from_op (cmp, sp [0], NULL);
8845 #if SIZEOF_REGISTER == 4
8846 if (cmp->opcode == OP_LCOMPARE_IMM) {
8847 /* Convert it to OP_LCOMPARE */
8848 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8849 ins->type = STACK_I8;
8850 ins->dreg = alloc_dreg (cfg, STACK_I8);
8852 MONO_ADD_INS (bblock, ins);
8853 cmp->opcode = OP_LCOMPARE;
8854 cmp->sreg2 = ins->dreg;
8857 MONO_ADD_INS (bblock, cmp);
8859 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8860 type_from_op (ins, sp [0], NULL);
8861 MONO_ADD_INS (bblock, ins);
8862 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8863 GET_BBLOCK (cfg, tblock, target);
8864 ins->inst_true_bb = tblock;
8865 GET_BBLOCK (cfg, tblock, ip);
8866 ins->inst_false_bb = tblock;
8867 start_new_bblock = 2;
8870 inline_costs += BRANCH_COST;
8885 MONO_INST_NEW (cfg, ins, *ip);
8887 target = ip + 4 + (gint32)read32(ip);
8893 inline_costs += BRANCH_COST;
8897 MonoBasicBlock **targets;
8898 MonoBasicBlock *default_bblock;
8899 MonoJumpInfoBBTable *table;
8900 int offset_reg = alloc_preg (cfg);
8901 int target_reg = alloc_preg (cfg);
8902 int table_reg = alloc_preg (cfg);
8903 int sum_reg = alloc_preg (cfg);
8904 gboolean use_op_switch;
8908 n = read32 (ip + 1);
8911 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8915 CHECK_OPSIZE (n * sizeof (guint32));
8916 target = ip + n * sizeof (guint32);
8918 GET_BBLOCK (cfg, default_bblock, target);
8919 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8921 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8922 for (i = 0; i < n; ++i) {
8923 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8924 targets [i] = tblock;
8925 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8929 if (sp != stack_start) {
8931 * Link the current bb with the targets as well, so handle_stack_args
8932 * will set their in_stack correctly.
8934 link_bblock (cfg, bblock, default_bblock);
8935 for (i = 0; i < n; ++i)
8936 link_bblock (cfg, bblock, targets [i]);
8938 handle_stack_args (cfg, stack_start, sp - stack_start);
8940 CHECK_UNVERIFIABLE (cfg);
8943 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8944 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8947 for (i = 0; i < n; ++i)
8948 link_bblock (cfg, bblock, targets [i]);
8950 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8951 table->table = targets;
8952 table->table_size = n;
8954 use_op_switch = FALSE;
8956 /* ARM implements SWITCH statements differently */
8957 /* FIXME: Make it use the generic implementation */
8958 if (!cfg->compile_aot)
8959 use_op_switch = TRUE;
8962 if (COMPILE_LLVM (cfg))
8963 use_op_switch = TRUE;
8965 cfg->cbb->has_jump_table = 1;
8967 if (use_op_switch) {
8968 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8969 ins->sreg1 = src1->dreg;
8970 ins->inst_p0 = table;
8971 ins->inst_many_bb = targets;
8972 ins->klass = GUINT_TO_POINTER (n);
8973 MONO_ADD_INS (cfg->cbb, ins);
8975 if (sizeof (gpointer) == 8)
8976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8978 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8980 #if SIZEOF_REGISTER == 8
8981 /* The upper word might not be zero, and we add it to a 64 bit address later */
8982 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8985 if (cfg->compile_aot) {
8986 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8988 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8989 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8990 ins->inst_p0 = table;
8991 ins->dreg = table_reg;
8992 MONO_ADD_INS (cfg->cbb, ins);
8995 /* FIXME: Use load_memindex */
8996 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8997 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8998 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9000 start_new_bblock = 1;
9001 inline_costs += (BRANCH_COST * 2);
9021 dreg = alloc_freg (cfg);
9024 dreg = alloc_lreg (cfg);
9027 dreg = alloc_ireg_ref (cfg);
9030 dreg = alloc_preg (cfg);
9033 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9034 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9035 ins->flags |= ins_flag;
9036 MONO_ADD_INS (bblock, ins);
9038 if (ins_flag & MONO_INST_VOLATILE) {
9039 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9040 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9041 emit_memory_barrier (cfg, FullBarrier);
9057 if (ins_flag & MONO_INST_VOLATILE) {
9058 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9059 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9060 emit_memory_barrier (cfg, FullBarrier);
9063 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9064 ins->flags |= ins_flag;
9067 MONO_ADD_INS (bblock, ins);
9069 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9070 emit_write_barrier (cfg, sp [0], sp [1]);
9079 MONO_INST_NEW (cfg, ins, (*ip));
9081 ins->sreg1 = sp [0]->dreg;
9082 ins->sreg2 = sp [1]->dreg;
9083 type_from_op (ins, sp [0], sp [1]);
9085 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9087 /* Use the immediate opcodes if possible */
9088 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9089 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9090 if (imm_opcode != -1) {
9091 ins->opcode = imm_opcode;
9092 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9095 sp [1]->opcode = OP_NOP;
9099 MONO_ADD_INS ((cfg)->cbb, (ins));
9101 *sp++ = mono_decompose_opcode (cfg, ins);
9118 MONO_INST_NEW (cfg, ins, (*ip));
9120 ins->sreg1 = sp [0]->dreg;
9121 ins->sreg2 = sp [1]->dreg;
9122 type_from_op (ins, sp [0], sp [1]);
9124 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9125 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9127 /* FIXME: Pass opcode to is_inst_imm */
9129 /* Use the immediate opcodes if possible */
9130 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9133 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9134 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9135 /* Keep emulated opcodes which are optimized away later */
9136 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9137 imm_opcode = mono_op_to_op_imm (ins->opcode);
9140 if (imm_opcode != -1) {
9141 ins->opcode = imm_opcode;
9142 if (sp [1]->opcode == OP_I8CONST) {
9143 #if SIZEOF_REGISTER == 8
9144 ins->inst_imm = sp [1]->inst_l;
9146 ins->inst_ls_word = sp [1]->inst_ls_word;
9147 ins->inst_ms_word = sp [1]->inst_ms_word;
9151 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9154 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9155 if (sp [1]->next == NULL)
9156 sp [1]->opcode = OP_NOP;
9159 MONO_ADD_INS ((cfg)->cbb, (ins));
9161 *sp++ = mono_decompose_opcode (cfg, ins);
9174 case CEE_CONV_OVF_I8:
9175 case CEE_CONV_OVF_U8:
9179 /* Special case this earlier so we have long constants in the IR */
9180 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9181 int data = sp [-1]->inst_c0;
9182 sp [-1]->opcode = OP_I8CONST;
9183 sp [-1]->type = STACK_I8;
9184 #if SIZEOF_REGISTER == 8
9185 if ((*ip) == CEE_CONV_U8)
9186 sp [-1]->inst_c0 = (guint32)data;
9188 sp [-1]->inst_c0 = data;
9190 sp [-1]->inst_ls_word = data;
9191 if ((*ip) == CEE_CONV_U8)
9192 sp [-1]->inst_ms_word = 0;
9194 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9196 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9203 case CEE_CONV_OVF_I4:
9204 case CEE_CONV_OVF_I1:
9205 case CEE_CONV_OVF_I2:
9206 case CEE_CONV_OVF_I:
9207 case CEE_CONV_OVF_U:
9210 if (sp [-1]->type == STACK_R8) {
9211 ADD_UNOP (CEE_CONV_OVF_I8);
9218 case CEE_CONV_OVF_U1:
9219 case CEE_CONV_OVF_U2:
9220 case CEE_CONV_OVF_U4:
9223 if (sp [-1]->type == STACK_R8) {
9224 ADD_UNOP (CEE_CONV_OVF_U8);
9231 case CEE_CONV_OVF_I1_UN:
9232 case CEE_CONV_OVF_I2_UN:
9233 case CEE_CONV_OVF_I4_UN:
9234 case CEE_CONV_OVF_I8_UN:
9235 case CEE_CONV_OVF_U1_UN:
9236 case CEE_CONV_OVF_U2_UN:
9237 case CEE_CONV_OVF_U4_UN:
9238 case CEE_CONV_OVF_U8_UN:
9239 case CEE_CONV_OVF_I_UN:
9240 case CEE_CONV_OVF_U_UN:
9247 CHECK_CFG_EXCEPTION;
9251 case CEE_ADD_OVF_UN:
9253 case CEE_MUL_OVF_UN:
9255 case CEE_SUB_OVF_UN:
9261 GSHAREDVT_FAILURE (*ip);
9264 token = read32 (ip + 1);
9265 klass = mini_get_class (method, token, generic_context);
9266 CHECK_TYPELOAD (klass);
9268 if (generic_class_is_reference_type (cfg, klass)) {
9269 MonoInst *store, *load;
9270 int dreg = alloc_ireg_ref (cfg);
9272 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9273 load->flags |= ins_flag;
9274 MONO_ADD_INS (cfg->cbb, load);
9276 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9277 store->flags |= ins_flag;
9278 MONO_ADD_INS (cfg->cbb, store);
9280 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9281 emit_write_barrier (cfg, sp [0], sp [1]);
9283 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9295 token = read32 (ip + 1);
9296 klass = mini_get_class (method, token, generic_context);
9297 CHECK_TYPELOAD (klass);
9299 /* Optimize the common ldobj+stloc combination */
9309 loc_index = ip [5] - CEE_STLOC_0;
9316 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9317 CHECK_LOCAL (loc_index);
9319 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9320 ins->dreg = cfg->locals [loc_index]->dreg;
9321 ins->flags |= ins_flag;
9324 if (ins_flag & MONO_INST_VOLATILE) {
9325 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9326 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9327 emit_memory_barrier (cfg, FullBarrier);
9333 /* Optimize the ldobj+stobj combination */
9334 /* The reference case ends up being a load+store anyway */
9335 /* Skip this if the operation is volatile. */
9336 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9341 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9348 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9349 ins->flags |= ins_flag;
9352 if (ins_flag & MONO_INST_VOLATILE) {
9353 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9354 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9355 emit_memory_barrier (cfg, FullBarrier);
9364 CHECK_STACK_OVF (1);
9366 n = read32 (ip + 1);
9368 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9369 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9370 ins->type = STACK_OBJ;
9373 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9374 MonoInst *iargs [1];
9376 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9377 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9379 if (cfg->opt & MONO_OPT_SHARED) {
9380 MonoInst *iargs [3];
9382 if (cfg->compile_aot) {
9383 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9385 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9386 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9387 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9388 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9389 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9391 if (bblock->out_of_line) {
9392 MonoInst *iargs [2];
9394 if (image == mono_defaults.corlib) {
9396 * Avoid relocations in AOT and save some space by using a
9397 * version of helper_ldstr specialized to mscorlib.
9399 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9400 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9402 /* Avoid creating the string object */
9403 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9404 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9405 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9409 if (cfg->compile_aot) {
9410 NEW_LDSTRCONST (cfg, ins, image, n);
9412 MONO_ADD_INS (bblock, ins);
9415 NEW_PCONST (cfg, ins, NULL);
9416 ins->type = STACK_OBJ;
9417 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9419 OUT_OF_MEMORY_FAILURE;
9422 MONO_ADD_INS (bblock, ins);
9431 MonoInst *iargs [2];
9432 MonoMethodSignature *fsig;
9435 MonoInst *vtable_arg = NULL;
9438 token = read32 (ip + 1);
9439 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9440 if (!cmethod || mono_loader_get_last_error ())
9442 fsig = mono_method_get_signature (cmethod, image, token);
9446 mono_save_token_info (cfg, image, token, cmethod);
9448 if (!mono_class_init (cmethod->klass))
9449 TYPE_LOAD_ERROR (cmethod->klass);
9451 context_used = mini_method_check_context_used (cfg, cmethod);
9453 if (mono_security_cas_enabled ()) {
9454 if (check_linkdemand (cfg, method, cmethod))
9455 INLINE_FAILURE ("linkdemand");
9456 CHECK_CFG_EXCEPTION;
9457 } else if (mono_security_core_clr_enabled ()) {
9458 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9461 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9462 emit_generic_class_init (cfg, cmethod->klass);
9463 CHECK_TYPELOAD (cmethod->klass);
9467 if (cfg->gsharedvt) {
9468 if (mini_is_gsharedvt_variable_signature (sig))
9469 GSHAREDVT_FAILURE (*ip);
9473 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9474 mono_method_is_generic_sharable (cmethod, TRUE)) {
9475 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9476 mono_class_vtable (cfg->domain, cmethod->klass);
9477 CHECK_TYPELOAD (cmethod->klass);
9479 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9480 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9483 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9484 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9486 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9488 CHECK_TYPELOAD (cmethod->klass);
9489 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9494 n = fsig->param_count;
9498 * Generate smaller code for the common newobj <exception> instruction in
9499 * argument checking code.
9501 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9502 is_exception_class (cmethod->klass) && n <= 2 &&
9503 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9504 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9505 MonoInst *iargs [3];
9507 g_assert (!vtable_arg);
9511 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9514 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9518 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9523 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9526 g_assert_not_reached ();
9534 /* move the args to allow room for 'this' in the first position */
9540 /* check_call_signature () requires sp[0] to be set */
9541 this_ins.type = STACK_OBJ;
9543 if (check_call_signature (cfg, fsig, sp))
9548 if (mini_class_is_system_array (cmethod->klass)) {
9549 g_assert (!vtable_arg);
9551 *sp = emit_get_rgctx_method (cfg, context_used,
9552 cmethod, MONO_RGCTX_INFO_METHOD);
9554 /* Avoid varargs in the common case */
9555 if (fsig->param_count == 1)
9556 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9557 else if (fsig->param_count == 2)
9558 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9559 else if (fsig->param_count == 3)
9560 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9561 else if (fsig->param_count == 4)
9562 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9564 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9565 } else if (cmethod->string_ctor) {
9566 g_assert (!context_used);
9567 g_assert (!vtable_arg);
9568 /* we simply pass a null pointer */
9569 EMIT_NEW_PCONST (cfg, *sp, NULL);
9570 /* now call the string ctor */
9571 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9573 MonoInst* callvirt_this_arg = NULL;
9575 if (cmethod->klass->valuetype) {
9576 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9577 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9578 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9583 * The code generated by mini_emit_virtual_call () expects
9584 * iargs [0] to be a boxed instance, but luckily the vcall
9585 * will be transformed into a normal call there.
9587 } else if (context_used) {
9588 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9591 MonoVTable *vtable = NULL;
9593 if (!cfg->compile_aot)
9594 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9595 CHECK_TYPELOAD (cmethod->klass);
9598 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9599 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9600 * As a workaround, we call class cctors before allocating objects.
9602 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9603 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9604 if (cfg->verbose_level > 2)
9605 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9606 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9609 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9612 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9615 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9617 /* Now call the actual ctor */
9618 /* Avoid virtual calls to ctors if possible */
9619 if (mono_class_is_marshalbyref (cmethod->klass))
9620 callvirt_this_arg = sp [0];
9623 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9624 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9625 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9630 CHECK_CFG_EXCEPTION;
9631 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9632 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9633 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9634 !g_list_find (dont_inline, cmethod)) {
9637 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9638 cfg->real_offset += 5;
9641 inline_costs += costs - 5;
9643 INLINE_FAILURE ("inline failure");
9644 // FIXME-VT: Clean this up
9645 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9646 GSHAREDVT_FAILURE(*ip);
9647 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9649 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9652 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9653 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9654 } else if (context_used &&
9655 ((!mono_method_is_generic_sharable (cmethod, TRUE) ||
9656 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
9657 MonoInst *cmethod_addr;
9659 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
9661 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9662 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9664 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9666 INLINE_FAILURE ("ctor call");
9667 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9668 callvirt_this_arg, NULL, vtable_arg);
9672 if (alloc == NULL) {
9674 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9675 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9689 token = read32 (ip + 1);
9690 klass = mini_get_class (method, token, generic_context);
9691 CHECK_TYPELOAD (klass);
9692 if (sp [0]->type != STACK_OBJ)
9695 context_used = mini_class_check_context_used (cfg, klass);
9697 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9704 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9707 if (cfg->compile_aot)
9708 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9710 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9712 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9714 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9717 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9718 MonoMethod *mono_castclass;
9719 MonoInst *iargs [1];
9722 mono_castclass = mono_marshal_get_castclass (klass);
9725 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9726 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9727 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9728 reset_cast_details (cfg);
9729 CHECK_CFG_EXCEPTION;
9730 g_assert (costs > 0);
9733 cfg->real_offset += 5;
9738 inline_costs += costs;
9741 ins = handle_castclass (cfg, klass, *sp, context_used);
9742 CHECK_CFG_EXCEPTION;
9752 token = read32 (ip + 1);
9753 klass = mini_get_class (method, token, generic_context);
9754 CHECK_TYPELOAD (klass);
9755 if (sp [0]->type != STACK_OBJ)
9758 context_used = mini_class_check_context_used (cfg, klass);
9760 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9761 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9768 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9771 if (cfg->compile_aot)
9772 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9774 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9776 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9779 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9780 MonoMethod *mono_isinst;
9781 MonoInst *iargs [1];
9784 mono_isinst = mono_marshal_get_isinst (klass);
9787 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9788 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9789 CHECK_CFG_EXCEPTION;
9790 g_assert (costs > 0);
9793 cfg->real_offset += 5;
9798 inline_costs += costs;
9801 ins = handle_isinst (cfg, klass, *sp, context_used);
9802 CHECK_CFG_EXCEPTION;
9809 case CEE_UNBOX_ANY: {
9813 token = read32 (ip + 1);
9814 klass = mini_get_class (method, token, generic_context);
9815 CHECK_TYPELOAD (klass);
9817 mono_save_token_info (cfg, image, token, klass);
9819 context_used = mini_class_check_context_used (cfg, klass);
9821 if (mini_is_gsharedvt_klass (cfg, klass)) {
9822 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9830 if (generic_class_is_reference_type (cfg, klass)) {
9831 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9832 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9839 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9842 /*FIXME AOT support*/
9843 if (cfg->compile_aot)
9844 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9846 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9848 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9849 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9852 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9853 MonoMethod *mono_castclass;
9854 MonoInst *iargs [1];
9857 mono_castclass = mono_marshal_get_castclass (klass);
9860 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9861 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9862 CHECK_CFG_EXCEPTION;
9863 g_assert (costs > 0);
9866 cfg->real_offset += 5;
9870 inline_costs += costs;
9872 ins = handle_castclass (cfg, klass, *sp, context_used);
9873 CHECK_CFG_EXCEPTION;
9881 if (mono_class_is_nullable (klass)) {
9882 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9889 ins = handle_unbox (cfg, klass, sp, context_used);
9895 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9908 token = read32 (ip + 1);
9909 klass = mini_get_class (method, token, generic_context);
9910 CHECK_TYPELOAD (klass);
9912 mono_save_token_info (cfg, image, token, klass);
9914 context_used = mini_class_check_context_used (cfg, klass);
9916 if (generic_class_is_reference_type (cfg, klass)) {
9922 if (klass == mono_defaults.void_class)
9924 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9926 /* frequent check in generic code: box (struct), brtrue */
9928 // FIXME: LLVM can't handle the inconsistent bb linking
9929 if (!mono_class_is_nullable (klass) &&
9930 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9931 (ip [5] == CEE_BRTRUE ||
9932 ip [5] == CEE_BRTRUE_S ||
9933 ip [5] == CEE_BRFALSE ||
9934 ip [5] == CEE_BRFALSE_S)) {
9935 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9937 MonoBasicBlock *true_bb, *false_bb;
9941 if (cfg->verbose_level > 3) {
9942 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9943 printf ("<box+brtrue opt>\n");
9951 target = ip + 1 + (signed char)(*ip);
9958 target = ip + 4 + (gint)(read32 (ip));
9962 g_assert_not_reached ();
9966 * We need to link both bblocks, since it is needed for handling stack
9967 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9968 * Branching to only one of them would lead to inconsistencies, so
9969 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9971 GET_BBLOCK (cfg, true_bb, target);
9972 GET_BBLOCK (cfg, false_bb, ip);
9974 mono_link_bblock (cfg, cfg->cbb, true_bb);
9975 mono_link_bblock (cfg, cfg->cbb, false_bb);
9977 if (sp != stack_start) {
9978 handle_stack_args (cfg, stack_start, sp - stack_start);
9980 CHECK_UNVERIFIABLE (cfg);
9983 if (COMPILE_LLVM (cfg)) {
9984 dreg = alloc_ireg (cfg);
9985 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9986 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9988 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9990 /* The JIT can't eliminate the iconst+compare */
9991 MONO_INST_NEW (cfg, ins, OP_BR);
9992 ins->inst_target_bb = is_true ? true_bb : false_bb;
9993 MONO_ADD_INS (cfg->cbb, ins);
9996 start_new_bblock = 1;
10000 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10002 CHECK_CFG_EXCEPTION;
10011 token = read32 (ip + 1);
10012 klass = mini_get_class (method, token, generic_context);
10013 CHECK_TYPELOAD (klass);
10015 mono_save_token_info (cfg, image, token, klass);
10017 context_used = mini_class_check_context_used (cfg, klass);
10019 if (mono_class_is_nullable (klass)) {
10022 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10023 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10027 ins = handle_unbox (cfg, klass, sp, context_used);
10040 MonoClassField *field;
10041 #ifndef DISABLE_REMOTING
10045 gboolean is_instance;
10047 gpointer addr = NULL;
10048 gboolean is_special_static;
10050 MonoInst *store_val = NULL;
10051 MonoInst *thread_ins;
10054 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10056 if (op == CEE_STFLD) {
10059 store_val = sp [1];
10064 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10066 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10069 if (op == CEE_STSFLD) {
10072 store_val = sp [0];
10077 token = read32 (ip + 1);
10078 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10079 field = mono_method_get_wrapper_data (method, token);
10080 klass = field->parent;
10083 field = mono_field_from_token (image, token, &klass, generic_context);
10087 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10088 FIELD_ACCESS_FAILURE;
10089 mono_class_init (klass);
10091 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10094 /* if the class is Critical then transparent code cannot access it's fields */
10095 if (!is_instance && mono_security_core_clr_enabled ())
10096 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10098 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10099 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10100 if (mono_security_core_clr_enabled ())
10101 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10105 * LDFLD etc. is usable on static fields as well, so convert those cases to
10108 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10120 g_assert_not_reached ();
10122 is_instance = FALSE;
10125 context_used = mini_class_check_context_used (cfg, klass);
10127 /* INSTANCE CASE */
10129 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10130 if (op == CEE_STFLD) {
10131 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10133 #ifndef DISABLE_REMOTING
10134 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10135 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10136 MonoInst *iargs [5];
10138 GSHAREDVT_FAILURE (op);
10140 iargs [0] = sp [0];
10141 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10142 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10143 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10145 iargs [4] = sp [1];
10147 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10148 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10149 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10150 CHECK_CFG_EXCEPTION;
10151 g_assert (costs > 0);
10153 cfg->real_offset += 5;
10156 inline_costs += costs;
10158 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10165 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10167 if (mini_is_gsharedvt_klass (cfg, klass)) {
10168 MonoInst *offset_ins;
10170 context_used = mini_class_check_context_used (cfg, klass);
10172 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10173 dreg = alloc_ireg_mp (cfg);
10174 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10175 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10176 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10178 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10180 if (sp [0]->opcode != OP_LDADDR)
10181 store->flags |= MONO_INST_FAULT;
10183 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10184 /* insert call to write barrier */
10188 dreg = alloc_ireg_mp (cfg);
10189 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10190 emit_write_barrier (cfg, ptr, sp [1]);
10193 store->flags |= ins_flag;
10200 #ifndef DISABLE_REMOTING
10201 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10202 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10203 MonoInst *iargs [4];
10205 GSHAREDVT_FAILURE (op);
10207 iargs [0] = sp [0];
10208 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10209 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10210 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10211 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10212 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10213 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10214 CHECK_CFG_EXCEPTION;
10216 g_assert (costs > 0);
10218 cfg->real_offset += 5;
10222 inline_costs += costs;
10224 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10230 if (sp [0]->type == STACK_VTYPE) {
10233 /* Have to compute the address of the variable */
10235 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10237 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10239 g_assert (var->klass == klass);
10241 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10245 if (op == CEE_LDFLDA) {
10246 if (is_magic_tls_access (field)) {
10247 GSHAREDVT_FAILURE (*ip);
10249 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10251 if (sp [0]->type == STACK_OBJ) {
10252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10253 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10256 dreg = alloc_ireg_mp (cfg);
10258 if (mini_is_gsharedvt_klass (cfg, klass)) {
10259 MonoInst *offset_ins;
10261 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10262 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10264 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10266 ins->klass = mono_class_from_mono_type (field->type);
10267 ins->type = STACK_MP;
10273 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10275 if (mini_is_gsharedvt_klass (cfg, klass)) {
10276 MonoInst *offset_ins;
10278 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10279 dreg = alloc_ireg_mp (cfg);
10280 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10281 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10283 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10285 load->flags |= ins_flag;
10286 if (sp [0]->opcode != OP_LDADDR)
10287 load->flags |= MONO_INST_FAULT;
10301 * We can only support shared generic static
10302 * field access on architectures where the
10303 * trampoline code has been extended to handle
10304 * the generic class init.
10306 #ifndef MONO_ARCH_VTABLE_REG
10307 GENERIC_SHARING_FAILURE (op);
10310 context_used = mini_class_check_context_used (cfg, klass);
10312 ftype = mono_field_get_type (field);
10314 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10317 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10318 * to be called here.
10320 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10321 mono_class_vtable (cfg->domain, klass);
10322 CHECK_TYPELOAD (klass);
10324 mono_domain_lock (cfg->domain);
10325 if (cfg->domain->special_static_fields)
10326 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10327 mono_domain_unlock (cfg->domain);
10329 is_special_static = mono_class_field_is_special_static (field);
10331 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10332 thread_ins = mono_get_thread_intrinsic (cfg);
10336 /* Generate IR to compute the field address */
10337 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10339 * Fast access to TLS data
10340 * Inline version of get_thread_static_data () in
10344 int idx, static_data_reg, array_reg, dreg;
10346 GSHAREDVT_FAILURE (op);
10348 // offset &= 0x7fffffff;
10349 // idx = (offset >> 24) - 1;
10350 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10351 MONO_ADD_INS (cfg->cbb, thread_ins);
10352 static_data_reg = alloc_ireg (cfg);
10353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10355 if (cfg->compile_aot) {
10356 int offset_reg, offset2_reg, idx_reg;
10358 /* For TLS variables, this will return the TLS offset */
10359 EMIT_NEW_SFLDACONST (cfg, ins, field);
10360 offset_reg = ins->dreg;
10361 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10362 idx_reg = alloc_ireg (cfg);
10363 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10364 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10367 array_reg = alloc_ireg (cfg);
10368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10369 offset2_reg = alloc_ireg (cfg);
10370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10371 dreg = alloc_ireg (cfg);
10372 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10374 offset = (gsize)addr & 0x7fffffff;
10375 idx = (offset >> 24) - 1;
10377 array_reg = alloc_ireg (cfg);
10378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10379 dreg = alloc_ireg (cfg);
10380 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10382 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10383 (cfg->compile_aot && is_special_static) ||
10384 (context_used && is_special_static)) {
10385 MonoInst *iargs [2];
10387 g_assert (field->parent);
10388 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10389 if (context_used) {
10390 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10391 field, MONO_RGCTX_INFO_CLASS_FIELD);
10393 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10395 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10396 } else if (context_used) {
10397 MonoInst *static_data;
10400 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10401 method->klass->name_space, method->klass->name, method->name,
10402 depth, field->offset);
10405 if (mono_class_needs_cctor_run (klass, method))
10406 emit_generic_class_init (cfg, klass);
10409 * The pointer we're computing here is
10411 * super_info.static_data + field->offset
10413 static_data = emit_get_rgctx_klass (cfg, context_used,
10414 klass, MONO_RGCTX_INFO_STATIC_DATA);
10416 if (mini_is_gsharedvt_klass (cfg, klass)) {
10417 MonoInst *offset_ins;
10419 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10420 dreg = alloc_ireg_mp (cfg);
10421 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10422 } else if (field->offset == 0) {
10425 int addr_reg = mono_alloc_preg (cfg);
10426 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10428 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10429 MonoInst *iargs [2];
10431 g_assert (field->parent);
10432 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10433 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10434 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10436 MonoVTable *vtable = NULL;
10438 if (!cfg->compile_aot)
10439 vtable = mono_class_vtable (cfg->domain, klass);
10440 CHECK_TYPELOAD (klass);
10443 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10444 if (!(g_slist_find (class_inits, klass))) {
10445 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10446 if (cfg->verbose_level > 2)
10447 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10448 class_inits = g_slist_prepend (class_inits, klass);
10451 if (cfg->run_cctors) {
10453 /* This makes so that inline cannot trigger */
10454 /* .cctors: too many apps depend on them */
10455 /* running with a specific order... */
10457 if (! vtable->initialized)
10458 INLINE_FAILURE ("class init");
10459 ex = mono_runtime_class_init_full (vtable, FALSE);
10461 set_exception_object (cfg, ex);
10462 goto exception_exit;
10466 if (cfg->compile_aot)
10467 EMIT_NEW_SFLDACONST (cfg, ins, field);
10470 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10472 EMIT_NEW_PCONST (cfg, ins, addr);
10475 MonoInst *iargs [1];
10476 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10477 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10481 /* Generate IR to do the actual load/store operation */
10483 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10484 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10485 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10486 emit_memory_barrier (cfg, FullBarrier);
10489 if (op == CEE_LDSFLDA) {
10490 ins->klass = mono_class_from_mono_type (ftype);
10491 ins->type = STACK_PTR;
10493 } else if (op == CEE_STSFLD) {
10496 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10497 store->flags |= ins_flag;
10499 gboolean is_const = FALSE;
10500 MonoVTable *vtable = NULL;
10501 gpointer addr = NULL;
10503 if (!context_used) {
10504 vtable = mono_class_vtable (cfg->domain, klass);
10505 CHECK_TYPELOAD (klass);
10507 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10508 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10509 int ro_type = ftype->type;
10511 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10512 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10513 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10516 GSHAREDVT_FAILURE (op);
10518 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10521 case MONO_TYPE_BOOLEAN:
10523 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10527 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10530 case MONO_TYPE_CHAR:
10532 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10536 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10541 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10545 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10550 case MONO_TYPE_PTR:
10551 case MONO_TYPE_FNPTR:
10552 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10553 type_to_eval_stack_type ((cfg), field->type, *sp);
10556 case MONO_TYPE_STRING:
10557 case MONO_TYPE_OBJECT:
10558 case MONO_TYPE_CLASS:
10559 case MONO_TYPE_SZARRAY:
10560 case MONO_TYPE_ARRAY:
10561 if (!mono_gc_is_moving ()) {
10562 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10563 type_to_eval_stack_type ((cfg), field->type, *sp);
10571 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10576 case MONO_TYPE_VALUETYPE:
10586 CHECK_STACK_OVF (1);
10588 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10589 load->flags |= ins_flag;
10595 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10596 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10597 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10598 emit_memory_barrier (cfg, FullBarrier);
10609 token = read32 (ip + 1);
10610 klass = mini_get_class (method, token, generic_context);
10611 CHECK_TYPELOAD (klass);
10612 if (ins_flag & MONO_INST_VOLATILE) {
10613 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10614 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10615 emit_memory_barrier (cfg, FullBarrier);
10617 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10618 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10619 ins->flags |= ins_flag;
10620 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10621 generic_class_is_reference_type (cfg, klass)) {
10622 /* insert call to write barrier */
10623 emit_write_barrier (cfg, sp [0], sp [1]);
10635 const char *data_ptr;
10637 guint32 field_token;
10643 token = read32 (ip + 1);
10645 klass = mini_get_class (method, token, generic_context);
10646 CHECK_TYPELOAD (klass);
10648 context_used = mini_class_check_context_used (cfg, klass);
10650 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10651 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10652 ins->sreg1 = sp [0]->dreg;
10653 ins->type = STACK_I4;
10654 ins->dreg = alloc_ireg (cfg);
10655 MONO_ADD_INS (cfg->cbb, ins);
10656 *sp = mono_decompose_opcode (cfg, ins);
10659 if (context_used) {
10660 MonoInst *args [3];
10661 MonoClass *array_class = mono_array_class_get (klass, 1);
10662 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10664 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10667 args [0] = emit_get_rgctx_klass (cfg, context_used,
10668 array_class, MONO_RGCTX_INFO_VTABLE);
10673 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10675 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10677 if (cfg->opt & MONO_OPT_SHARED) {
10678 /* Decompose now to avoid problems with references to the domainvar */
10679 MonoInst *iargs [3];
10681 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10682 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10683 iargs [2] = sp [0];
10685 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10687 /* Decompose later since it is needed by abcrem */
10688 MonoClass *array_type = mono_array_class_get (klass, 1);
10689 mono_class_vtable (cfg->domain, array_type);
10690 CHECK_TYPELOAD (array_type);
10692 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10693 ins->dreg = alloc_ireg_ref (cfg);
10694 ins->sreg1 = sp [0]->dreg;
10695 ins->inst_newa_class = klass;
10696 ins->type = STACK_OBJ;
10697 ins->klass = array_type;
10698 MONO_ADD_INS (cfg->cbb, ins);
10699 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10700 cfg->cbb->has_array_access = TRUE;
10702 /* Needed so mono_emit_load_get_addr () gets called */
10703 mono_get_got_var (cfg);
10713 * we inline/optimize the initialization sequence if possible.
10714 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10715 * for small sizes open code the memcpy
10716 * ensure the rva field is big enough
10718 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10719 MonoMethod *memcpy_method = get_memcpy_method ();
10720 MonoInst *iargs [3];
10721 int add_reg = alloc_ireg_mp (cfg);
10723 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10724 if (cfg->compile_aot) {
10725 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10727 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10729 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10730 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10739 if (sp [0]->type != STACK_OBJ)
10742 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10743 ins->dreg = alloc_preg (cfg);
10744 ins->sreg1 = sp [0]->dreg;
10745 ins->type = STACK_I4;
10746 /* This flag will be inherited by the decomposition */
10747 ins->flags |= MONO_INST_FAULT;
10748 MONO_ADD_INS (cfg->cbb, ins);
10749 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10750 cfg->cbb->has_array_access = TRUE;
10758 if (sp [0]->type != STACK_OBJ)
10761 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10763 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10764 CHECK_TYPELOAD (klass);
10765 /* we need to make sure that this array is exactly the type it needs
10766 * to be for correctness. the wrappers are lax with their usage
10767 * so we need to ignore them here
10769 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10770 MonoClass *array_class = mono_array_class_get (klass, 1);
10771 mini_emit_check_array_type (cfg, sp [0], array_class);
10772 CHECK_TYPELOAD (array_class);
10776 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10781 case CEE_LDELEM_I1:
10782 case CEE_LDELEM_U1:
10783 case CEE_LDELEM_I2:
10784 case CEE_LDELEM_U2:
10785 case CEE_LDELEM_I4:
10786 case CEE_LDELEM_U4:
10787 case CEE_LDELEM_I8:
10789 case CEE_LDELEM_R4:
10790 case CEE_LDELEM_R8:
10791 case CEE_LDELEM_REF: {
10797 if (*ip == CEE_LDELEM) {
10799 token = read32 (ip + 1);
10800 klass = mini_get_class (method, token, generic_context);
10801 CHECK_TYPELOAD (klass);
10802 mono_class_init (klass);
10805 klass = array_access_to_klass (*ip);
10807 if (sp [0]->type != STACK_OBJ)
10810 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10812 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10813 // FIXME-VT: OP_ICONST optimization
10814 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10815 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10816 ins->opcode = OP_LOADV_MEMBASE;
10817 } else if (sp [1]->opcode == OP_ICONST) {
10818 int array_reg = sp [0]->dreg;
10819 int index_reg = sp [1]->dreg;
10820 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10822 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10823 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10825 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10826 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10829 if (*ip == CEE_LDELEM)
10836 case CEE_STELEM_I1:
10837 case CEE_STELEM_I2:
10838 case CEE_STELEM_I4:
10839 case CEE_STELEM_I8:
10840 case CEE_STELEM_R4:
10841 case CEE_STELEM_R8:
10842 case CEE_STELEM_REF:
10847 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10849 if (*ip == CEE_STELEM) {
10851 token = read32 (ip + 1);
10852 klass = mini_get_class (method, token, generic_context);
10853 CHECK_TYPELOAD (klass);
10854 mono_class_init (klass);
10857 klass = array_access_to_klass (*ip);
10859 if (sp [0]->type != STACK_OBJ)
10862 emit_array_store (cfg, klass, sp, TRUE);
10864 if (*ip == CEE_STELEM)
10871 case CEE_CKFINITE: {
10875 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10876 ins->sreg1 = sp [0]->dreg;
10877 ins->dreg = alloc_freg (cfg);
10878 ins->type = STACK_R8;
10879 MONO_ADD_INS (bblock, ins);
10881 *sp++ = mono_decompose_opcode (cfg, ins);
10886 case CEE_REFANYVAL: {
10887 MonoInst *src_var, *src;
10889 int klass_reg = alloc_preg (cfg);
10890 int dreg = alloc_preg (cfg);
10892 GSHAREDVT_FAILURE (*ip);
10895 MONO_INST_NEW (cfg, ins, *ip);
10898 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10899 CHECK_TYPELOAD (klass);
10900 mono_class_init (klass);
10902 context_used = mini_class_check_context_used (cfg, klass);
10905 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10907 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10908 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10909 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10911 if (context_used) {
10912 MonoInst *klass_ins;
10914 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10915 klass, MONO_RGCTX_INFO_KLASS);
10918 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10919 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10921 mini_emit_class_check (cfg, klass_reg, klass);
10923 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10924 ins->type = STACK_MP;
10929 case CEE_MKREFANY: {
10930 MonoInst *loc, *addr;
10932 GSHAREDVT_FAILURE (*ip);
10935 MONO_INST_NEW (cfg, ins, *ip);
10938 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10939 CHECK_TYPELOAD (klass);
10940 mono_class_init (klass);
10942 context_used = mini_class_check_context_used (cfg, klass);
10944 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10945 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10947 if (context_used) {
10948 MonoInst *const_ins;
10949 int type_reg = alloc_preg (cfg);
10951 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10952 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10954 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10955 } else if (cfg->compile_aot) {
10956 int const_reg = alloc_preg (cfg);
10957 int type_reg = alloc_preg (cfg);
10959 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10960 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10961 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10962 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10964 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10965 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10969 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10970 ins->type = STACK_VTYPE;
10971 ins->klass = mono_defaults.typed_reference_class;
10976 case CEE_LDTOKEN: {
10978 MonoClass *handle_class;
10980 CHECK_STACK_OVF (1);
10983 n = read32 (ip + 1);
10985 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10986 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10987 handle = mono_method_get_wrapper_data (method, n);
10988 handle_class = mono_method_get_wrapper_data (method, n + 1);
10989 if (handle_class == mono_defaults.typehandle_class)
10990 handle = &((MonoClass*)handle)->byval_arg;
10993 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10997 mono_class_init (handle_class);
10998 if (cfg->generic_sharing_context) {
10999 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11000 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11001 /* This case handles ldtoken
11002 of an open type, like for
11005 } else if (handle_class == mono_defaults.typehandle_class) {
11006 /* If we get a MONO_TYPE_CLASS
11007 then we need to provide the
11009 instantiation of it. */
11010 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
11013 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11014 } else if (handle_class == mono_defaults.fieldhandle_class)
11015 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11016 else if (handle_class == mono_defaults.methodhandle_class)
11017 context_used = mini_method_check_context_used (cfg, handle);
11019 g_assert_not_reached ();
11022 if ((cfg->opt & MONO_OPT_SHARED) &&
11023 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11024 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11025 MonoInst *addr, *vtvar, *iargs [3];
11026 int method_context_used;
11028 method_context_used = mini_method_check_context_used (cfg, method);
11030 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11032 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11033 EMIT_NEW_ICONST (cfg, iargs [1], n);
11034 if (method_context_used) {
11035 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11036 method, MONO_RGCTX_INFO_METHOD);
11037 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11039 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11040 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11042 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11044 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11046 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11048 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11049 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11050 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11051 (cmethod->klass == mono_defaults.systemtype_class) &&
11052 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11053 MonoClass *tclass = mono_class_from_mono_type (handle);
11055 mono_class_init (tclass);
11056 if (context_used) {
11057 ins = emit_get_rgctx_klass (cfg, context_used,
11058 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11059 } else if (cfg->compile_aot) {
11060 if (method->wrapper_type) {
11061 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
11062 /* Special case for static synchronized wrappers */
11063 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11065 /* FIXME: n is not a normal token */
11067 EMIT_NEW_PCONST (cfg, ins, NULL);
11070 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11073 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11075 ins->type = STACK_OBJ;
11076 ins->klass = cmethod->klass;
11079 MonoInst *addr, *vtvar;
11081 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11083 if (context_used) {
11084 if (handle_class == mono_defaults.typehandle_class) {
11085 ins = emit_get_rgctx_klass (cfg, context_used,
11086 mono_class_from_mono_type (handle),
11087 MONO_RGCTX_INFO_TYPE);
11088 } else if (handle_class == mono_defaults.methodhandle_class) {
11089 ins = emit_get_rgctx_method (cfg, context_used,
11090 handle, MONO_RGCTX_INFO_METHOD);
11091 } else if (handle_class == mono_defaults.fieldhandle_class) {
11092 ins = emit_get_rgctx_field (cfg, context_used,
11093 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11095 g_assert_not_reached ();
11097 } else if (cfg->compile_aot) {
11098 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11100 EMIT_NEW_PCONST (cfg, ins, handle);
11102 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11103 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11104 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11114 MONO_INST_NEW (cfg, ins, OP_THROW);
11116 ins->sreg1 = sp [0]->dreg;
11118 bblock->out_of_line = TRUE;
11119 MONO_ADD_INS (bblock, ins);
11120 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11121 MONO_ADD_INS (bblock, ins);
11124 link_bblock (cfg, bblock, end_bblock);
11125 start_new_bblock = 1;
11127 case CEE_ENDFINALLY:
11128 /* mono_save_seq_point_info () depends on this */
11129 if (sp != stack_start)
11130 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11131 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11132 MONO_ADD_INS (bblock, ins);
11134 start_new_bblock = 1;
11137 * Control will leave the method so empty the stack, otherwise
11138 * the next basic block will start with a nonempty stack.
11140 while (sp != stack_start) {
11145 case CEE_LEAVE_S: {
11148 if (*ip == CEE_LEAVE) {
11150 target = ip + 5 + (gint32)read32(ip + 1);
11153 target = ip + 2 + (signed char)(ip [1]);
11156 /* empty the stack */
11157 while (sp != stack_start) {
11162 * If this leave statement is in a catch block, check for a
11163 * pending exception, and rethrow it if necessary.
11164 * We avoid doing this in runtime invoke wrappers, since those are called
11165 * by native code which excepts the wrapper to catch all exceptions.
11167 for (i = 0; i < header->num_clauses; ++i) {
11168 MonoExceptionClause *clause = &header->clauses [i];
11171 * Use <= in the final comparison to handle clauses with multiple
11172 * leave statements, like in bug #78024.
11173 * The ordering of the exception clauses guarantees that we find the
11174 * innermost clause.
11176 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11178 MonoBasicBlock *dont_throw;
11183 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11186 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11188 NEW_BBLOCK (cfg, dont_throw);
11191 * Currently, we always rethrow the abort exception, despite the
11192 * fact that this is not correct. See thread6.cs for an example.
11193 * But propagating the abort exception is more important than
11194 * getting the sematics right.
11196 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11197 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11198 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11200 MONO_START_BB (cfg, dont_throw);
11205 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11207 MonoExceptionClause *clause;
11209 for (tmp = handlers; tmp; tmp = tmp->next) {
11210 clause = tmp->data;
11211 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11213 link_bblock (cfg, bblock, tblock);
11214 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11215 ins->inst_target_bb = tblock;
11216 ins->inst_eh_block = clause;
11217 MONO_ADD_INS (bblock, ins);
11218 bblock->has_call_handler = 1;
11219 if (COMPILE_LLVM (cfg)) {
11220 MonoBasicBlock *target_bb;
11223 * Link the finally bblock with the target, since it will
11224 * conceptually branch there.
11225 * FIXME: Have to link the bblock containing the endfinally.
11227 GET_BBLOCK (cfg, target_bb, target);
11228 link_bblock (cfg, tblock, target_bb);
11231 g_list_free (handlers);
11234 MONO_INST_NEW (cfg, ins, OP_BR);
11235 MONO_ADD_INS (bblock, ins);
11236 GET_BBLOCK (cfg, tblock, target);
11237 link_bblock (cfg, bblock, tblock);
11238 ins->inst_target_bb = tblock;
11239 start_new_bblock = 1;
11241 if (*ip == CEE_LEAVE)
11250 * Mono specific opcodes
11252 case MONO_CUSTOM_PREFIX: {
11254 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11258 case CEE_MONO_ICALL: {
11260 MonoJitICallInfo *info;
11262 token = read32 (ip + 2);
11263 func = mono_method_get_wrapper_data (method, token);
11264 info = mono_find_jit_icall_by_addr (func);
11266 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11269 CHECK_STACK (info->sig->param_count);
11270 sp -= info->sig->param_count;
11272 ins = mono_emit_jit_icall (cfg, info->func, sp);
11273 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11277 inline_costs += 10 * num_calls++;
11281 case CEE_MONO_LDPTR: {
11284 CHECK_STACK_OVF (1);
11286 token = read32 (ip + 2);
11288 ptr = mono_method_get_wrapper_data (method, token);
11289 /* FIXME: Generalize this */
11290 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11291 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11296 EMIT_NEW_PCONST (cfg, ins, ptr);
11299 inline_costs += 10 * num_calls++;
11300 /* Can't embed random pointers into AOT code */
11304 case CEE_MONO_JIT_ICALL_ADDR: {
11305 MonoJitICallInfo *callinfo;
11308 CHECK_STACK_OVF (1);
11310 token = read32 (ip + 2);
11312 ptr = mono_method_get_wrapper_data (method, token);
11313 callinfo = mono_find_jit_icall_by_addr (ptr);
11314 g_assert (callinfo);
11315 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11318 inline_costs += 10 * num_calls++;
11321 case CEE_MONO_ICALL_ADDR: {
11322 MonoMethod *cmethod;
11325 CHECK_STACK_OVF (1);
11327 token = read32 (ip + 2);
11329 cmethod = mono_method_get_wrapper_data (method, token);
11331 if (cfg->compile_aot) {
11332 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11334 ptr = mono_lookup_internal_call (cmethod);
11336 EMIT_NEW_PCONST (cfg, ins, ptr);
11342 case CEE_MONO_VTADDR: {
11343 MonoInst *src_var, *src;
11349 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11350 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11355 case CEE_MONO_NEWOBJ: {
11356 MonoInst *iargs [2];
11358 CHECK_STACK_OVF (1);
11360 token = read32 (ip + 2);
11361 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11362 mono_class_init (klass);
11363 NEW_DOMAINCONST (cfg, iargs [0]);
11364 MONO_ADD_INS (cfg->cbb, iargs [0]);
11365 NEW_CLASSCONST (cfg, iargs [1], klass);
11366 MONO_ADD_INS (cfg->cbb, iargs [1]);
11367 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11369 inline_costs += 10 * num_calls++;
11372 case CEE_MONO_OBJADDR:
11375 MONO_INST_NEW (cfg, ins, OP_MOVE);
11376 ins->dreg = alloc_ireg_mp (cfg);
11377 ins->sreg1 = sp [0]->dreg;
11378 ins->type = STACK_MP;
11379 MONO_ADD_INS (cfg->cbb, ins);
11383 case CEE_MONO_LDNATIVEOBJ:
11385 * Similar to LDOBJ, but instead load the unmanaged
11386 * representation of the vtype to the stack.
11391 token = read32 (ip + 2);
11392 klass = mono_method_get_wrapper_data (method, token);
11393 g_assert (klass->valuetype);
11394 mono_class_init (klass);
11397 MonoInst *src, *dest, *temp;
11400 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11401 temp->backend.is_pinvoke = 1;
11402 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11403 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11405 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11406 dest->type = STACK_VTYPE;
11407 dest->klass = klass;
11413 case CEE_MONO_RETOBJ: {
11415 * Same as RET, but return the native representation of a vtype
11418 g_assert (cfg->ret);
11419 g_assert (mono_method_signature (method)->pinvoke);
11424 token = read32 (ip + 2);
11425 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11427 if (!cfg->vret_addr) {
11428 g_assert (cfg->ret_var_is_local);
11430 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11432 EMIT_NEW_RETLOADA (cfg, ins);
11434 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11436 if (sp != stack_start)
11439 MONO_INST_NEW (cfg, ins, OP_BR);
11440 ins->inst_target_bb = end_bblock;
11441 MONO_ADD_INS (bblock, ins);
11442 link_bblock (cfg, bblock, end_bblock);
11443 start_new_bblock = 1;
11447 case CEE_MONO_CISINST:
11448 case CEE_MONO_CCASTCLASS: {
11453 token = read32 (ip + 2);
11454 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11455 if (ip [1] == CEE_MONO_CISINST)
11456 ins = handle_cisinst (cfg, klass, sp [0]);
11458 ins = handle_ccastclass (cfg, klass, sp [0]);
11464 case CEE_MONO_SAVE_LMF:
11465 case CEE_MONO_RESTORE_LMF:
11466 #ifdef MONO_ARCH_HAVE_LMF_OPS
11467 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11468 MONO_ADD_INS (bblock, ins);
11469 cfg->need_lmf_area = TRUE;
11473 case CEE_MONO_CLASSCONST:
11474 CHECK_STACK_OVF (1);
11476 token = read32 (ip + 2);
11477 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11480 inline_costs += 10 * num_calls++;
11482 case CEE_MONO_NOT_TAKEN:
11483 bblock->out_of_line = TRUE;
11486 case CEE_MONO_TLS: {
11489 CHECK_STACK_OVF (1);
11491 key = (gint32)read32 (ip + 2);
11492 g_assert (key < TLS_KEY_NUM);
11494 ins = mono_create_tls_get (cfg, key);
11496 if (cfg->compile_aot) {
11498 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11499 ins->dreg = alloc_preg (cfg);
11500 ins->type = STACK_PTR;
11502 g_assert_not_reached ();
11505 ins->type = STACK_PTR;
11506 MONO_ADD_INS (bblock, ins);
11511 case CEE_MONO_DYN_CALL: {
11512 MonoCallInst *call;
11514 /* It would be easier to call a trampoline, but that would put an
11515 * extra frame on the stack, confusing exception handling. So
11516 * implement it inline using an opcode for now.
11519 if (!cfg->dyn_call_var) {
11520 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11521 /* prevent it from being register allocated */
11522 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11525 /* Has to use a call inst since it local regalloc expects it */
11526 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11527 ins = (MonoInst*)call;
11529 ins->sreg1 = sp [0]->dreg;
11530 ins->sreg2 = sp [1]->dreg;
11531 MONO_ADD_INS (bblock, ins);
11533 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11536 inline_costs += 10 * num_calls++;
11540 case CEE_MONO_MEMORY_BARRIER: {
11542 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11546 case CEE_MONO_JIT_ATTACH: {
11547 MonoInst *args [16];
11548 MonoInst *ad_ins, *lmf_ins;
11549 MonoBasicBlock *next_bb = NULL;
11551 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11553 EMIT_NEW_PCONST (cfg, ins, NULL);
11554 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11560 ad_ins = mono_get_domain_intrinsic (cfg);
11561 lmf_ins = mono_get_lmf_intrinsic (cfg);
11564 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11565 NEW_BBLOCK (cfg, next_bb);
11567 MONO_ADD_INS (cfg->cbb, ad_ins);
11568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11571 MONO_ADD_INS (cfg->cbb, lmf_ins);
11572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11576 if (cfg->compile_aot) {
11577 /* AOT code is only used in the root domain */
11578 EMIT_NEW_PCONST (cfg, args [0], NULL);
11580 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11582 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11583 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11586 MONO_START_BB (cfg, next_bb);
11592 case CEE_MONO_JIT_DETACH: {
11593 MonoInst *args [16];
11595 /* Restore the original domain */
11596 dreg = alloc_ireg (cfg);
11597 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11598 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11603 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11609 case CEE_PREFIX1: {
11612 case CEE_ARGLIST: {
11613 /* somewhat similar to LDTOKEN */
11614 MonoInst *addr, *vtvar;
11615 CHECK_STACK_OVF (1);
11616 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11618 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11619 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11621 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11622 ins->type = STACK_VTYPE;
11623 ins->klass = mono_defaults.argumenthandle_class;
11636 * The following transforms:
11637 * CEE_CEQ into OP_CEQ
11638 * CEE_CGT into OP_CGT
11639 * CEE_CGT_UN into OP_CGT_UN
11640 * CEE_CLT into OP_CLT
11641 * CEE_CLT_UN into OP_CLT_UN
11643 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11645 MONO_INST_NEW (cfg, ins, cmp->opcode);
11647 cmp->sreg1 = sp [0]->dreg;
11648 cmp->sreg2 = sp [1]->dreg;
11649 type_from_op (cmp, sp [0], sp [1]);
11651 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11652 cmp->opcode = OP_LCOMPARE;
11653 else if (sp [0]->type == STACK_R8)
11654 cmp->opcode = OP_FCOMPARE;
11656 cmp->opcode = OP_ICOMPARE;
11657 MONO_ADD_INS (bblock, cmp);
11658 ins->type = STACK_I4;
11659 ins->dreg = alloc_dreg (cfg, ins->type);
11660 type_from_op (ins, sp [0], sp [1]);
11662 if (cmp->opcode == OP_FCOMPARE) {
11664 * The backends expect the fceq opcodes to do the
11667 cmp->opcode = OP_NOP;
11668 ins->sreg1 = cmp->sreg1;
11669 ins->sreg2 = cmp->sreg2;
11671 MONO_ADD_INS (bblock, ins);
11677 MonoInst *argconst;
11678 MonoMethod *cil_method;
11680 CHECK_STACK_OVF (1);
11682 n = read32 (ip + 2);
11683 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11684 if (!cmethod || mono_loader_get_last_error ())
11686 mono_class_init (cmethod->klass);
11688 mono_save_token_info (cfg, image, n, cmethod);
11690 context_used = mini_method_check_context_used (cfg, cmethod);
11692 cil_method = cmethod;
11693 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11694 METHOD_ACCESS_FAILURE;
11696 if (mono_security_cas_enabled ()) {
11697 if (check_linkdemand (cfg, method, cmethod))
11698 INLINE_FAILURE ("linkdemand");
11699 CHECK_CFG_EXCEPTION;
11700 } else if (mono_security_core_clr_enabled ()) {
11701 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11705 * Optimize the common case of ldftn+delegate creation
11707 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11708 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11709 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11710 MonoInst *target_ins;
11711 MonoMethod *invoke;
11712 int invoke_context_used;
11714 invoke = mono_get_delegate_invoke (ctor_method->klass);
11715 if (!invoke || !mono_method_signature (invoke))
11718 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11720 target_ins = sp [-1];
11722 if (mono_security_core_clr_enabled ())
11723 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11725 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11726 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11727 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11729 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11733 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11734 /* FIXME: SGEN support */
11735 if (invoke_context_used == 0) {
11737 if (cfg->verbose_level > 3)
11738 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11740 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11741 CHECK_CFG_EXCEPTION;
11750 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11751 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11755 inline_costs += 10 * num_calls++;
11758 case CEE_LDVIRTFTN: {
11759 MonoInst *args [2];
11763 n = read32 (ip + 2);
11764 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11765 if (!cmethod || mono_loader_get_last_error ())
11767 mono_class_init (cmethod->klass);
11769 context_used = mini_method_check_context_used (cfg, cmethod);
11771 if (mono_security_cas_enabled ()) {
11772 if (check_linkdemand (cfg, method, cmethod))
11773 INLINE_FAILURE ("linkdemand");
11774 CHECK_CFG_EXCEPTION;
11775 } else if (mono_security_core_clr_enabled ()) {
11776 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11782 args [1] = emit_get_rgctx_method (cfg, context_used,
11783 cmethod, MONO_RGCTX_INFO_METHOD);
11786 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11788 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11791 inline_costs += 10 * num_calls++;
11795 CHECK_STACK_OVF (1);
11797 n = read16 (ip + 2);
11799 EMIT_NEW_ARGLOAD (cfg, ins, n);
11804 CHECK_STACK_OVF (1);
11806 n = read16 (ip + 2);
11808 NEW_ARGLOADA (cfg, ins, n);
11809 MONO_ADD_INS (cfg->cbb, ins);
11817 n = read16 (ip + 2);
11819 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11821 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11825 CHECK_STACK_OVF (1);
11827 n = read16 (ip + 2);
11829 EMIT_NEW_LOCLOAD (cfg, ins, n);
11834 unsigned char *tmp_ip;
11835 CHECK_STACK_OVF (1);
11837 n = read16 (ip + 2);
11840 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11846 EMIT_NEW_LOCLOADA (cfg, ins, n);
11855 n = read16 (ip + 2);
11857 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11859 emit_stloc_ir (cfg, sp, header, n);
11866 if (sp != stack_start)
11868 if (cfg->method != method)
11870 * Inlining this into a loop in a parent could lead to
11871 * stack overflows which is different behavior than the
11872 * non-inlined case, thus disable inlining in this case.
11874 goto inline_failure;
11876 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11877 ins->dreg = alloc_preg (cfg);
11878 ins->sreg1 = sp [0]->dreg;
11879 ins->type = STACK_PTR;
11880 MONO_ADD_INS (cfg->cbb, ins);
11882 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11884 ins->flags |= MONO_INST_INIT;
11889 case CEE_ENDFILTER: {
11890 MonoExceptionClause *clause, *nearest;
11891 int cc, nearest_num;
11895 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11897 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11898 ins->sreg1 = (*sp)->dreg;
11899 MONO_ADD_INS (bblock, ins);
11900 start_new_bblock = 1;
11905 for (cc = 0; cc < header->num_clauses; ++cc) {
11906 clause = &header->clauses [cc];
11907 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11908 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11909 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11914 g_assert (nearest);
11915 if ((ip - header->code) != nearest->handler_offset)
11920 case CEE_UNALIGNED_:
11921 ins_flag |= MONO_INST_UNALIGNED;
11922 /* FIXME: record alignment? we can assume 1 for now */
11926 case CEE_VOLATILE_:
11927 ins_flag |= MONO_INST_VOLATILE;
11931 ins_flag |= MONO_INST_TAILCALL;
11932 cfg->flags |= MONO_CFG_HAS_TAIL;
11933 /* Can't inline tail calls at this time */
11934 inline_costs += 100000;
11941 token = read32 (ip + 2);
11942 klass = mini_get_class (method, token, generic_context);
11943 CHECK_TYPELOAD (klass);
11944 if (generic_class_is_reference_type (cfg, klass))
11945 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11947 mini_emit_initobj (cfg, *sp, NULL, klass);
11951 case CEE_CONSTRAINED_:
11953 token = read32 (ip + 2);
11954 constrained_call = mini_get_class (method, token, generic_context);
11955 CHECK_TYPELOAD (constrained_call);
11959 case CEE_INITBLK: {
11960 MonoInst *iargs [3];
11964 /* Skip optimized paths for volatile operations. */
11965 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11966 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11967 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11968 /* emit_memset only works when val == 0 */
11969 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11972 iargs [0] = sp [0];
11973 iargs [1] = sp [1];
11974 iargs [2] = sp [2];
11975 if (ip [1] == CEE_CPBLK) {
11977 * FIXME: It's unclear whether we should be emitting both the acquire
11978 * and release barriers for cpblk. It is technically both a load and
11979 * store operation, so it seems like that's the sensible thing to do.
11981 MonoMethod *memcpy_method = get_memcpy_method ();
11982 if (ins_flag & MONO_INST_VOLATILE) {
11983 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11984 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
11985 emit_memory_barrier (cfg, FullBarrier);
11987 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11988 call->flags |= ins_flag;
11989 if (ins_flag & MONO_INST_VOLATILE) {
11990 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11991 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
11992 emit_memory_barrier (cfg, FullBarrier);
11995 MonoMethod *memset_method = get_memset_method ();
11996 if (ins_flag & MONO_INST_VOLATILE) {
11997 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11998 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
11999 emit_memory_barrier (cfg, FullBarrier);
12001 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12002 call->flags |= ins_flag;
12013 ins_flag |= MONO_INST_NOTYPECHECK;
12015 ins_flag |= MONO_INST_NORANGECHECK;
12016 /* we ignore the no-nullcheck for now since we
12017 * really do it explicitly only when doing callvirt->call
12021 case CEE_RETHROW: {
12023 int handler_offset = -1;
12025 for (i = 0; i < header->num_clauses; ++i) {
12026 MonoExceptionClause *clause = &header->clauses [i];
12027 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12028 handler_offset = clause->handler_offset;
12033 bblock->flags |= BB_EXCEPTION_UNSAFE;
12035 g_assert (handler_offset != -1);
12037 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12038 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12039 ins->sreg1 = load->dreg;
12040 MONO_ADD_INS (bblock, ins);
12042 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12043 MONO_ADD_INS (bblock, ins);
12046 link_bblock (cfg, bblock, end_bblock);
12047 start_new_bblock = 1;
12055 GSHAREDVT_FAILURE (*ip);
12057 CHECK_STACK_OVF (1);
12059 token = read32 (ip + 2);
12060 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
12061 MonoType *type = mono_type_create_from_typespec (image, token);
12062 val = mono_type_size (type, &ialign);
12064 MonoClass *klass = mono_class_get_full (image, token, generic_context);
12065 CHECK_TYPELOAD (klass);
12066 mono_class_init (klass);
12067 val = mono_type_size (&klass->byval_arg, &ialign);
12069 EMIT_NEW_ICONST (cfg, ins, val);
12074 case CEE_REFANYTYPE: {
12075 MonoInst *src_var, *src;
12077 GSHAREDVT_FAILURE (*ip);
12083 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12085 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12086 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12087 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
12092 case CEE_READONLY_:
12105 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12115 g_warning ("opcode 0x%02x not handled", *ip);
12119 if (start_new_bblock != 1)
12122 bblock->cil_length = ip - bblock->cil_code;
12123 if (bblock->next_bb) {
12124 /* This could already be set because of inlining, #693905 */
12125 MonoBasicBlock *bb = bblock;
12127 while (bb->next_bb)
12129 bb->next_bb = end_bblock;
12131 bblock->next_bb = end_bblock;
12134 if (cfg->method == method && cfg->domainvar) {
12136 MonoInst *get_domain;
12138 cfg->cbb = init_localsbb;
12140 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12141 MONO_ADD_INS (cfg->cbb, get_domain);
12143 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12145 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12146 MONO_ADD_INS (cfg->cbb, store);
12149 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12150 if (cfg->compile_aot)
12151 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12152 mono_get_got_var (cfg);
12155 if (cfg->method == method && cfg->got_var)
12156 mono_emit_load_got_addr (cfg);
12158 if (init_localsbb) {
12159 cfg->cbb = init_localsbb;
12161 for (i = 0; i < header->num_locals; ++i) {
12162 emit_init_local (cfg, i, header->locals [i], init_locals);
12166 if (cfg->init_ref_vars && cfg->method == method) {
12167 /* Emit initialization for ref vars */
12168 // FIXME: Avoid duplication initialization for IL locals.
12169 for (i = 0; i < cfg->num_varinfo; ++i) {
12170 MonoInst *ins = cfg->varinfo [i];
12172 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12173 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12177 if (cfg->lmf_var && cfg->method == method) {
12178 cfg->cbb = init_localsbb;
12179 emit_push_lmf (cfg);
12183 MonoBasicBlock *bb;
12186 * Make seq points at backward branch targets interruptable.
12188 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12189 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12190 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12193 /* Add a sequence point for method entry/exit events */
12195 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12196 MONO_ADD_INS (init_localsbb, ins);
12197 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12198 MONO_ADD_INS (cfg->bb_exit, ins);
12202 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12203 * the code they refer to was dead (#11880).
12205 if (sym_seq_points) {
12206 for (i = 0; i < header->code_size; ++i) {
12207 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12210 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12211 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12218 if (cfg->method == method) {
12219 MonoBasicBlock *bb;
12220 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12221 bb->region = mono_find_block_region (cfg, bb->real_offset);
12223 mono_create_spvar_for_region (cfg, bb->region);
12224 if (cfg->verbose_level > 2)
12225 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12229 g_slist_free (class_inits);
12230 dont_inline = g_list_remove (dont_inline, method);
12232 if (inline_costs < 0) {
12235 /* Method is too large */
12236 mname = mono_method_full_name (method, TRUE);
12237 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12238 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12240 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12241 mono_basic_block_free (original_bb);
12245 if ((cfg->verbose_level > 2) && (cfg->method == method))
12246 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12248 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12249 mono_basic_block_free (original_bb);
12250 return inline_costs;
12253 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12260 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12264 set_exception_type_from_invalid_il (cfg, method, ip);
12268 g_slist_free (class_inits);
12269 mono_basic_block_free (original_bb);
12270 dont_inline = g_list_remove (dont_inline, method);
12271 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12276 store_membase_reg_to_store_membase_imm (int opcode)
12279 case OP_STORE_MEMBASE_REG:
12280 return OP_STORE_MEMBASE_IMM;
12281 case OP_STOREI1_MEMBASE_REG:
12282 return OP_STOREI1_MEMBASE_IMM;
12283 case OP_STOREI2_MEMBASE_REG:
12284 return OP_STOREI2_MEMBASE_IMM;
12285 case OP_STOREI4_MEMBASE_REG:
12286 return OP_STOREI4_MEMBASE_IMM;
12287 case OP_STOREI8_MEMBASE_REG:
12288 return OP_STOREI8_MEMBASE_IMM;
12290 g_assert_not_reached ();
12297 mono_op_to_op_imm (int opcode)
12301 return OP_IADD_IMM;
12303 return OP_ISUB_IMM;
12305 return OP_IDIV_IMM;
12307 return OP_IDIV_UN_IMM;
12309 return OP_IREM_IMM;
12311 return OP_IREM_UN_IMM;
12313 return OP_IMUL_IMM;
12315 return OP_IAND_IMM;
12319 return OP_IXOR_IMM;
12321 return OP_ISHL_IMM;
12323 return OP_ISHR_IMM;
12325 return OP_ISHR_UN_IMM;
12328 return OP_LADD_IMM;
12330 return OP_LSUB_IMM;
12332 return OP_LAND_IMM;
12336 return OP_LXOR_IMM;
12338 return OP_LSHL_IMM;
12340 return OP_LSHR_IMM;
12342 return OP_LSHR_UN_IMM;
12345 return OP_COMPARE_IMM;
12347 return OP_ICOMPARE_IMM;
12349 return OP_LCOMPARE_IMM;
12351 case OP_STORE_MEMBASE_REG:
12352 return OP_STORE_MEMBASE_IMM;
12353 case OP_STOREI1_MEMBASE_REG:
12354 return OP_STOREI1_MEMBASE_IMM;
12355 case OP_STOREI2_MEMBASE_REG:
12356 return OP_STOREI2_MEMBASE_IMM;
12357 case OP_STOREI4_MEMBASE_REG:
12358 return OP_STOREI4_MEMBASE_IMM;
12360 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12362 return OP_X86_PUSH_IMM;
12363 case OP_X86_COMPARE_MEMBASE_REG:
12364 return OP_X86_COMPARE_MEMBASE_IMM;
12366 #if defined(TARGET_AMD64)
12367 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12368 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12370 case OP_VOIDCALL_REG:
12371 return OP_VOIDCALL;
12379 return OP_LOCALLOC_IMM;
12386 ldind_to_load_membase (int opcode)
12390 return OP_LOADI1_MEMBASE;
12392 return OP_LOADU1_MEMBASE;
12394 return OP_LOADI2_MEMBASE;
12396 return OP_LOADU2_MEMBASE;
12398 return OP_LOADI4_MEMBASE;
12400 return OP_LOADU4_MEMBASE;
12402 return OP_LOAD_MEMBASE;
12403 case CEE_LDIND_REF:
12404 return OP_LOAD_MEMBASE;
12406 return OP_LOADI8_MEMBASE;
12408 return OP_LOADR4_MEMBASE;
12410 return OP_LOADR8_MEMBASE;
12412 g_assert_not_reached ();
12419 stind_to_store_membase (int opcode)
12423 return OP_STOREI1_MEMBASE_REG;
12425 return OP_STOREI2_MEMBASE_REG;
12427 return OP_STOREI4_MEMBASE_REG;
12429 case CEE_STIND_REF:
12430 return OP_STORE_MEMBASE_REG;
12432 return OP_STOREI8_MEMBASE_REG;
12434 return OP_STORER4_MEMBASE_REG;
12436 return OP_STORER8_MEMBASE_REG;
12438 g_assert_not_reached ();
12445 mono_load_membase_to_load_mem (int opcode)
12447 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12450 case OP_LOAD_MEMBASE:
12451 return OP_LOAD_MEM;
12452 case OP_LOADU1_MEMBASE:
12453 return OP_LOADU1_MEM;
12454 case OP_LOADU2_MEMBASE:
12455 return OP_LOADU2_MEM;
12456 case OP_LOADI4_MEMBASE:
12457 return OP_LOADI4_MEM;
12458 case OP_LOADU4_MEMBASE:
12459 return OP_LOADU4_MEM;
12460 #if SIZEOF_REGISTER == 8
12461 case OP_LOADI8_MEMBASE:
12462 return OP_LOADI8_MEM;
12471 op_to_op_dest_membase (int store_opcode, int opcode)
12473 #if defined(TARGET_X86)
12474 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12479 return OP_X86_ADD_MEMBASE_REG;
12481 return OP_X86_SUB_MEMBASE_REG;
12483 return OP_X86_AND_MEMBASE_REG;
12485 return OP_X86_OR_MEMBASE_REG;
12487 return OP_X86_XOR_MEMBASE_REG;
12490 return OP_X86_ADD_MEMBASE_IMM;
12493 return OP_X86_SUB_MEMBASE_IMM;
12496 return OP_X86_AND_MEMBASE_IMM;
12499 return OP_X86_OR_MEMBASE_IMM;
12502 return OP_X86_XOR_MEMBASE_IMM;
12508 #if defined(TARGET_AMD64)
12509 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12514 return OP_X86_ADD_MEMBASE_REG;
12516 return OP_X86_SUB_MEMBASE_REG;
12518 return OP_X86_AND_MEMBASE_REG;
12520 return OP_X86_OR_MEMBASE_REG;
12522 return OP_X86_XOR_MEMBASE_REG;
12524 return OP_X86_ADD_MEMBASE_IMM;
12526 return OP_X86_SUB_MEMBASE_IMM;
12528 return OP_X86_AND_MEMBASE_IMM;
12530 return OP_X86_OR_MEMBASE_IMM;
12532 return OP_X86_XOR_MEMBASE_IMM;
12534 return OP_AMD64_ADD_MEMBASE_REG;
12536 return OP_AMD64_SUB_MEMBASE_REG;
12538 return OP_AMD64_AND_MEMBASE_REG;
12540 return OP_AMD64_OR_MEMBASE_REG;
12542 return OP_AMD64_XOR_MEMBASE_REG;
12545 return OP_AMD64_ADD_MEMBASE_IMM;
12548 return OP_AMD64_SUB_MEMBASE_IMM;
12551 return OP_AMD64_AND_MEMBASE_IMM;
12554 return OP_AMD64_OR_MEMBASE_IMM;
12557 return OP_AMD64_XOR_MEMBASE_IMM;
12567 op_to_op_store_membase (int store_opcode, int opcode)
12569 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12572 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12573 return OP_X86_SETEQ_MEMBASE;
12575 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12576 return OP_X86_SETNE_MEMBASE;
12584 op_to_op_src1_membase (int load_opcode, int opcode)
12587 /* FIXME: This has sign extension issues */
12589 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12590 return OP_X86_COMPARE_MEMBASE8_IMM;
12593 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12598 return OP_X86_PUSH_MEMBASE;
12599 case OP_COMPARE_IMM:
12600 case OP_ICOMPARE_IMM:
12601 return OP_X86_COMPARE_MEMBASE_IMM;
12604 return OP_X86_COMPARE_MEMBASE_REG;
12608 #ifdef TARGET_AMD64
12609 /* FIXME: This has sign extension issues */
12611 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12612 return OP_X86_COMPARE_MEMBASE8_IMM;
12617 #ifdef __mono_ilp32__
12618 if (load_opcode == OP_LOADI8_MEMBASE)
12620 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12622 return OP_X86_PUSH_MEMBASE;
12624 /* FIXME: This only works for 32 bit immediates
12625 case OP_COMPARE_IMM:
12626 case OP_LCOMPARE_IMM:
12627 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12628 return OP_AMD64_COMPARE_MEMBASE_IMM;
12630 case OP_ICOMPARE_IMM:
12631 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12632 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12636 #ifdef __mono_ilp32__
12637 if (load_opcode == OP_LOAD_MEMBASE)
12638 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12639 if (load_opcode == OP_LOADI8_MEMBASE)
12641 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12643 return OP_AMD64_COMPARE_MEMBASE_REG;
12646 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12647 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12656 op_to_op_src2_membase (int load_opcode, int opcode)
12659 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12665 return OP_X86_COMPARE_REG_MEMBASE;
12667 return OP_X86_ADD_REG_MEMBASE;
12669 return OP_X86_SUB_REG_MEMBASE;
12671 return OP_X86_AND_REG_MEMBASE;
12673 return OP_X86_OR_REG_MEMBASE;
12675 return OP_X86_XOR_REG_MEMBASE;
12679 #ifdef TARGET_AMD64
12680 #ifdef __mono_ilp32__
12681 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12683 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12687 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12689 return OP_X86_ADD_REG_MEMBASE;
12691 return OP_X86_SUB_REG_MEMBASE;
12693 return OP_X86_AND_REG_MEMBASE;
12695 return OP_X86_OR_REG_MEMBASE;
12697 return OP_X86_XOR_REG_MEMBASE;
12699 #ifdef __mono_ilp32__
12700 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12702 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12707 return OP_AMD64_COMPARE_REG_MEMBASE;
12709 return OP_AMD64_ADD_REG_MEMBASE;
12711 return OP_AMD64_SUB_REG_MEMBASE;
12713 return OP_AMD64_AND_REG_MEMBASE;
12715 return OP_AMD64_OR_REG_MEMBASE;
12717 return OP_AMD64_XOR_REG_MEMBASE;
12726 mono_op_to_op_imm_noemul (int opcode)
12729 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12735 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12742 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12747 return mono_op_to_op_imm (opcode);
12752 * mono_handle_global_vregs:
12754 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12758 mono_handle_global_vregs (MonoCompile *cfg)
12760 gint32 *vreg_to_bb;
12761 MonoBasicBlock *bb;
12764 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12766 #ifdef MONO_ARCH_SIMD_INTRINSICS
12767 if (cfg->uses_simd_intrinsics)
12768 mono_simd_simplify_indirection (cfg);
12771 /* Find local vregs used in more than one bb */
12772 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12773 MonoInst *ins = bb->code;
12774 int block_num = bb->block_num;
12776 if (cfg->verbose_level > 2)
12777 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12780 for (; ins; ins = ins->next) {
12781 const char *spec = INS_INFO (ins->opcode);
12782 int regtype = 0, regindex;
12785 if (G_UNLIKELY (cfg->verbose_level > 2))
12786 mono_print_ins (ins);
12788 g_assert (ins->opcode >= MONO_CEE_LAST);
12790 for (regindex = 0; regindex < 4; regindex ++) {
12793 if (regindex == 0) {
12794 regtype = spec [MONO_INST_DEST];
12795 if (regtype == ' ')
12798 } else if (regindex == 1) {
12799 regtype = spec [MONO_INST_SRC1];
12800 if (regtype == ' ')
12803 } else if (regindex == 2) {
12804 regtype = spec [MONO_INST_SRC2];
12805 if (regtype == ' ')
12808 } else if (regindex == 3) {
12809 regtype = spec [MONO_INST_SRC3];
12810 if (regtype == ' ')
12815 #if SIZEOF_REGISTER == 4
12816 /* In the LLVM case, the long opcodes are not decomposed */
12817 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12819 * Since some instructions reference the original long vreg,
12820 * and some reference the two component vregs, it is quite hard
12821 * to determine when it needs to be global. So be conservative.
12823 if (!get_vreg_to_inst (cfg, vreg)) {
12824 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12826 if (cfg->verbose_level > 2)
12827 printf ("LONG VREG R%d made global.\n", vreg);
12831 * Make the component vregs volatile since the optimizations can
12832 * get confused otherwise.
12834 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12835 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12839 g_assert (vreg != -1);
12841 prev_bb = vreg_to_bb [vreg];
12842 if (prev_bb == 0) {
12843 /* 0 is a valid block num */
12844 vreg_to_bb [vreg] = block_num + 1;
12845 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12846 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12849 if (!get_vreg_to_inst (cfg, vreg)) {
12850 if (G_UNLIKELY (cfg->verbose_level > 2))
12851 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12855 if (vreg_is_ref (cfg, vreg))
12856 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12858 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12861 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12864 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12867 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12870 g_assert_not_reached ();
12874 /* Flag as having been used in more than one bb */
12875 vreg_to_bb [vreg] = -1;
12881 /* If a variable is used in only one bblock, convert it into a local vreg */
12882 for (i = 0; i < cfg->num_varinfo; i++) {
12883 MonoInst *var = cfg->varinfo [i];
12884 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12886 switch (var->type) {
12892 #if SIZEOF_REGISTER == 8
12895 #if !defined(TARGET_X86)
12896 /* Enabling this screws up the fp stack on x86 */
12899 if (mono_arch_is_soft_float ())
12902 /* Arguments are implicitly global */
12903 /* Putting R4 vars into registers doesn't work currently */
12904 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12905 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12907 * Make that the variable's liveness interval doesn't contain a call, since
12908 * that would cause the lvreg to be spilled, making the whole optimization
12911 /* This is too slow for JIT compilation */
12913 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12915 int def_index, call_index, ins_index;
12916 gboolean spilled = FALSE;
12921 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12922 const char *spec = INS_INFO (ins->opcode);
12924 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12925 def_index = ins_index;
12927 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12928 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12929 if (call_index > def_index) {
12935 if (MONO_IS_CALL (ins))
12936 call_index = ins_index;
12946 if (G_UNLIKELY (cfg->verbose_level > 2))
12947 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12948 var->flags |= MONO_INST_IS_DEAD;
12949 cfg->vreg_to_inst [var->dreg] = NULL;
12956 * Compress the varinfo and vars tables so the liveness computation is faster and
12957 * takes up less space.
12960 for (i = 0; i < cfg->num_varinfo; ++i) {
12961 MonoInst *var = cfg->varinfo [i];
12962 if (pos < i && cfg->locals_start == i)
12963 cfg->locals_start = pos;
12964 if (!(var->flags & MONO_INST_IS_DEAD)) {
12966 cfg->varinfo [pos] = cfg->varinfo [i];
12967 cfg->varinfo [pos]->inst_c0 = pos;
12968 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12969 cfg->vars [pos].idx = pos;
12970 #if SIZEOF_REGISTER == 4
12971 if (cfg->varinfo [pos]->type == STACK_I8) {
12972 /* Modify the two component vars too */
12975 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12976 var1->inst_c0 = pos;
12977 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12978 var1->inst_c0 = pos;
12985 cfg->num_varinfo = pos;
12986 if (cfg->locals_start > cfg->num_varinfo)
12987 cfg->locals_start = cfg->num_varinfo;
12991 * mono_spill_global_vars:
12993 * Generate spill code for variables which are not allocated to registers,
12994 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12995 * code is generated which could be optimized by the local optimization passes.
12998 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13000 MonoBasicBlock *bb;
13002 int orig_next_vreg;
13003 guint32 *vreg_to_lvreg;
13005 guint32 i, lvregs_len;
13006 gboolean dest_has_lvreg = FALSE;
13007 guint32 stacktypes [128];
13008 MonoInst **live_range_start, **live_range_end;
13009 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13010 int *gsharedvt_vreg_to_idx = NULL;
13012 *need_local_opts = FALSE;
13014 memset (spec2, 0, sizeof (spec2));
13016 /* FIXME: Move this function to mini.c */
13017 stacktypes ['i'] = STACK_PTR;
13018 stacktypes ['l'] = STACK_I8;
13019 stacktypes ['f'] = STACK_R8;
13020 #ifdef MONO_ARCH_SIMD_INTRINSICS
13021 stacktypes ['x'] = STACK_VTYPE;
13024 #if SIZEOF_REGISTER == 4
13025 /* Create MonoInsts for longs */
13026 for (i = 0; i < cfg->num_varinfo; i++) {
13027 MonoInst *ins = cfg->varinfo [i];
13029 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13030 switch (ins->type) {
13035 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13038 g_assert (ins->opcode == OP_REGOFFSET);
13040 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13042 tree->opcode = OP_REGOFFSET;
13043 tree->inst_basereg = ins->inst_basereg;
13044 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13046 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13048 tree->opcode = OP_REGOFFSET;
13049 tree->inst_basereg = ins->inst_basereg;
13050 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13060 if (cfg->compute_gc_maps) {
13061 /* registers need liveness info even for !non refs */
13062 for (i = 0; i < cfg->num_varinfo; i++) {
13063 MonoInst *ins = cfg->varinfo [i];
13065 if (ins->opcode == OP_REGVAR)
13066 ins->flags |= MONO_INST_GC_TRACK;
13070 if (cfg->gsharedvt) {
13071 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13073 for (i = 0; i < cfg->num_varinfo; ++i) {
13074 MonoInst *ins = cfg->varinfo [i];
13077 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13078 if (i >= cfg->locals_start) {
13080 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13081 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13082 ins->opcode = OP_GSHAREDVT_LOCAL;
13083 ins->inst_imm = idx;
13086 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13087 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13093 /* FIXME: widening and truncation */
13096 * As an optimization, when a variable allocated to the stack is first loaded into
13097 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13098 * the variable again.
13100 orig_next_vreg = cfg->next_vreg;
13101 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13102 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13106 * These arrays contain the first and last instructions accessing a given
13108 * Since we emit bblocks in the same order we process them here, and we
13109 * don't split live ranges, these will precisely describe the live range of
13110 * the variable, i.e. the instruction range where a valid value can be found
13111 * in the variables location.
13112 * The live range is computed using the liveness info computed by the liveness pass.
13113 * We can't use vmv->range, since that is an abstract live range, and we need
13114 * one which is instruction precise.
13115 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13117 /* FIXME: Only do this if debugging info is requested */
13118 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13119 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13120 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13121 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13123 /* Add spill loads/stores */
13124 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13127 if (cfg->verbose_level > 2)
13128 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13130 /* Clear vreg_to_lvreg array */
13131 for (i = 0; i < lvregs_len; i++)
13132 vreg_to_lvreg [lvregs [i]] = 0;
13136 MONO_BB_FOR_EACH_INS (bb, ins) {
13137 const char *spec = INS_INFO (ins->opcode);
13138 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13139 gboolean store, no_lvreg;
13140 int sregs [MONO_MAX_SRC_REGS];
13142 if (G_UNLIKELY (cfg->verbose_level > 2))
13143 mono_print_ins (ins);
13145 if (ins->opcode == OP_NOP)
13149 * We handle LDADDR here as well, since it can only be decomposed
13150 * when variable addresses are known.
13152 if (ins->opcode == OP_LDADDR) {
13153 MonoInst *var = ins->inst_p0;
13155 if (var->opcode == OP_VTARG_ADDR) {
13156 /* Happens on SPARC/S390 where vtypes are passed by reference */
13157 MonoInst *vtaddr = var->inst_left;
13158 if (vtaddr->opcode == OP_REGVAR) {
13159 ins->opcode = OP_MOVE;
13160 ins->sreg1 = vtaddr->dreg;
13162 else if (var->inst_left->opcode == OP_REGOFFSET) {
13163 ins->opcode = OP_LOAD_MEMBASE;
13164 ins->inst_basereg = vtaddr->inst_basereg;
13165 ins->inst_offset = vtaddr->inst_offset;
13168 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13169 /* gsharedvt arg passed by ref */
13170 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13172 ins->opcode = OP_LOAD_MEMBASE;
13173 ins->inst_basereg = var->inst_basereg;
13174 ins->inst_offset = var->inst_offset;
13175 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13176 MonoInst *load, *load2, *load3;
13177 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13178 int reg1, reg2, reg3;
13179 MonoInst *info_var = cfg->gsharedvt_info_var;
13180 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13184 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13187 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13189 g_assert (info_var);
13190 g_assert (locals_var);
13192 /* Mark the instruction used to compute the locals var as used */
13193 cfg->gsharedvt_locals_var_ins = NULL;
13195 /* Load the offset */
13196 if (info_var->opcode == OP_REGOFFSET) {
13197 reg1 = alloc_ireg (cfg);
13198 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13199 } else if (info_var->opcode == OP_REGVAR) {
13201 reg1 = info_var->dreg;
13203 g_assert_not_reached ();
13205 reg2 = alloc_ireg (cfg);
13206 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13207 /* Load the locals area address */
13208 reg3 = alloc_ireg (cfg);
13209 if (locals_var->opcode == OP_REGOFFSET) {
13210 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13211 } else if (locals_var->opcode == OP_REGVAR) {
13212 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13214 g_assert_not_reached ();
13216 /* Compute the address */
13217 ins->opcode = OP_PADD;
13221 mono_bblock_insert_before_ins (bb, ins, load3);
13222 mono_bblock_insert_before_ins (bb, load3, load2);
13224 mono_bblock_insert_before_ins (bb, load2, load);
13226 g_assert (var->opcode == OP_REGOFFSET);
13228 ins->opcode = OP_ADD_IMM;
13229 ins->sreg1 = var->inst_basereg;
13230 ins->inst_imm = var->inst_offset;
13233 *need_local_opts = TRUE;
13234 spec = INS_INFO (ins->opcode);
13237 if (ins->opcode < MONO_CEE_LAST) {
13238 mono_print_ins (ins);
13239 g_assert_not_reached ();
13243 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13247 if (MONO_IS_STORE_MEMBASE (ins)) {
13248 tmp_reg = ins->dreg;
13249 ins->dreg = ins->sreg2;
13250 ins->sreg2 = tmp_reg;
13253 spec2 [MONO_INST_DEST] = ' ';
13254 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13255 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13256 spec2 [MONO_INST_SRC3] = ' ';
13258 } else if (MONO_IS_STORE_MEMINDEX (ins))
13259 g_assert_not_reached ();
13264 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13265 printf ("\t %.3s %d", spec, ins->dreg);
13266 num_sregs = mono_inst_get_src_registers (ins, sregs);
13267 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13268 printf (" %d", sregs [srcindex]);
13275 regtype = spec [MONO_INST_DEST];
13276 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13279 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13280 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13281 MonoInst *store_ins;
13283 MonoInst *def_ins = ins;
13284 int dreg = ins->dreg; /* The original vreg */
13286 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13288 if (var->opcode == OP_REGVAR) {
13289 ins->dreg = var->dreg;
13290 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13292 * Instead of emitting a load+store, use a _membase opcode.
13294 g_assert (var->opcode == OP_REGOFFSET);
13295 if (ins->opcode == OP_MOVE) {
13299 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13300 ins->inst_basereg = var->inst_basereg;
13301 ins->inst_offset = var->inst_offset;
13304 spec = INS_INFO (ins->opcode);
13308 g_assert (var->opcode == OP_REGOFFSET);
13310 prev_dreg = ins->dreg;
13312 /* Invalidate any previous lvreg for this vreg */
13313 vreg_to_lvreg [ins->dreg] = 0;
13317 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13319 store_opcode = OP_STOREI8_MEMBASE_REG;
13322 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13324 #if SIZEOF_REGISTER != 8
13325 if (regtype == 'l') {
13326 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13327 mono_bblock_insert_after_ins (bb, ins, store_ins);
13328 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13329 mono_bblock_insert_after_ins (bb, ins, store_ins);
13330 def_ins = store_ins;
13335 g_assert (store_opcode != OP_STOREV_MEMBASE);
13337 /* Try to fuse the store into the instruction itself */
13338 /* FIXME: Add more instructions */
13339 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13340 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13341 ins->inst_imm = ins->inst_c0;
13342 ins->inst_destbasereg = var->inst_basereg;
13343 ins->inst_offset = var->inst_offset;
13344 spec = INS_INFO (ins->opcode);
13345 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13346 ins->opcode = store_opcode;
13347 ins->inst_destbasereg = var->inst_basereg;
13348 ins->inst_offset = var->inst_offset;
13352 tmp_reg = ins->dreg;
13353 ins->dreg = ins->sreg2;
13354 ins->sreg2 = tmp_reg;
13357 spec2 [MONO_INST_DEST] = ' ';
13358 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13359 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13360 spec2 [MONO_INST_SRC3] = ' ';
13362 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13363 // FIXME: The backends expect the base reg to be in inst_basereg
13364 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13366 ins->inst_basereg = var->inst_basereg;
13367 ins->inst_offset = var->inst_offset;
13368 spec = INS_INFO (ins->opcode);
13370 /* printf ("INS: "); mono_print_ins (ins); */
13371 /* Create a store instruction */
13372 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13374 /* Insert it after the instruction */
13375 mono_bblock_insert_after_ins (bb, ins, store_ins);
13377 def_ins = store_ins;
13380 * We can't assign ins->dreg to var->dreg here, since the
13381 * sregs could use it. So set a flag, and do it after
13384 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13385 dest_has_lvreg = TRUE;
13390 if (def_ins && !live_range_start [dreg]) {
13391 live_range_start [dreg] = def_ins;
13392 live_range_start_bb [dreg] = bb;
13395 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13398 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13399 tmp->inst_c1 = dreg;
13400 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13407 num_sregs = mono_inst_get_src_registers (ins, sregs);
13408 for (srcindex = 0; srcindex < 3; ++srcindex) {
13409 regtype = spec [MONO_INST_SRC1 + srcindex];
13410 sreg = sregs [srcindex];
13412 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13413 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13414 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13415 MonoInst *use_ins = ins;
13416 MonoInst *load_ins;
13417 guint32 load_opcode;
13419 if (var->opcode == OP_REGVAR) {
13420 sregs [srcindex] = var->dreg;
13421 //mono_inst_set_src_registers (ins, sregs);
13422 live_range_end [sreg] = use_ins;
13423 live_range_end_bb [sreg] = bb;
13425 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13428 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13429 /* var->dreg is a hreg */
13430 tmp->inst_c1 = sreg;
13431 mono_bblock_insert_after_ins (bb, ins, tmp);
13437 g_assert (var->opcode == OP_REGOFFSET);
13439 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13441 g_assert (load_opcode != OP_LOADV_MEMBASE);
13443 if (vreg_to_lvreg [sreg]) {
13444 g_assert (vreg_to_lvreg [sreg] != -1);
13446 /* The variable is already loaded to an lvreg */
13447 if (G_UNLIKELY (cfg->verbose_level > 2))
13448 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13449 sregs [srcindex] = vreg_to_lvreg [sreg];
13450 //mono_inst_set_src_registers (ins, sregs);
13454 /* Try to fuse the load into the instruction */
13455 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13456 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13457 sregs [0] = var->inst_basereg;
13458 //mono_inst_set_src_registers (ins, sregs);
13459 ins->inst_offset = var->inst_offset;
13460 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13461 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13462 sregs [1] = var->inst_basereg;
13463 //mono_inst_set_src_registers (ins, sregs);
13464 ins->inst_offset = var->inst_offset;
13466 if (MONO_IS_REAL_MOVE (ins)) {
13467 ins->opcode = OP_NOP;
13470 //printf ("%d ", srcindex); mono_print_ins (ins);
13472 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13474 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13475 if (var->dreg == prev_dreg) {
13477 * sreg refers to the value loaded by the load
13478 * emitted below, but we need to use ins->dreg
13479 * since it refers to the store emitted earlier.
13483 g_assert (sreg != -1);
13484 vreg_to_lvreg [var->dreg] = sreg;
13485 g_assert (lvregs_len < 1024);
13486 lvregs [lvregs_len ++] = var->dreg;
13490 sregs [srcindex] = sreg;
13491 //mono_inst_set_src_registers (ins, sregs);
13493 #if SIZEOF_REGISTER != 8
13494 if (regtype == 'l') {
13495 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13496 mono_bblock_insert_before_ins (bb, ins, load_ins);
13497 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13498 mono_bblock_insert_before_ins (bb, ins, load_ins);
13499 use_ins = load_ins;
13504 #if SIZEOF_REGISTER == 4
13505 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13507 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13508 mono_bblock_insert_before_ins (bb, ins, load_ins);
13509 use_ins = load_ins;
13513 if (var->dreg < orig_next_vreg) {
13514 live_range_end [var->dreg] = use_ins;
13515 live_range_end_bb [var->dreg] = bb;
13518 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13521 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13522 tmp->inst_c1 = var->dreg;
13523 mono_bblock_insert_after_ins (bb, ins, tmp);
13527 mono_inst_set_src_registers (ins, sregs);
13529 if (dest_has_lvreg) {
13530 g_assert (ins->dreg != -1);
13531 vreg_to_lvreg [prev_dreg] = ins->dreg;
13532 g_assert (lvregs_len < 1024);
13533 lvregs [lvregs_len ++] = prev_dreg;
13534 dest_has_lvreg = FALSE;
13538 tmp_reg = ins->dreg;
13539 ins->dreg = ins->sreg2;
13540 ins->sreg2 = tmp_reg;
13543 if (MONO_IS_CALL (ins)) {
13544 /* Clear vreg_to_lvreg array */
13545 for (i = 0; i < lvregs_len; i++)
13546 vreg_to_lvreg [lvregs [i]] = 0;
13548 } else if (ins->opcode == OP_NOP) {
13550 MONO_INST_NULLIFY_SREGS (ins);
13553 if (cfg->verbose_level > 2)
13554 mono_print_ins_index (1, ins);
13557 /* Extend the live range based on the liveness info */
13558 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13559 for (i = 0; i < cfg->num_varinfo; i ++) {
13560 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13562 if (vreg_is_volatile (cfg, vi->vreg))
13563 /* The liveness info is incomplete */
13566 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13567 /* Live from at least the first ins of this bb */
13568 live_range_start [vi->vreg] = bb->code;
13569 live_range_start_bb [vi->vreg] = bb;
13572 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13573 /* Live at least until the last ins of this bb */
13574 live_range_end [vi->vreg] = bb->last_ins;
13575 live_range_end_bb [vi->vreg] = bb;
13581 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13583 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13584 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13586 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13587 for (i = 0; i < cfg->num_varinfo; ++i) {
13588 int vreg = MONO_VARINFO (cfg, i)->vreg;
13591 if (live_range_start [vreg]) {
13592 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13594 ins->inst_c1 = vreg;
13595 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13597 if (live_range_end [vreg]) {
13598 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13600 ins->inst_c1 = vreg;
13601 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13602 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13604 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13610 if (cfg->gsharedvt_locals_var_ins) {
13611 /* Nullify if unused */
13612 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13613 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13616 g_free (live_range_start);
13617 g_free (live_range_end);
13618 g_free (live_range_start_bb);
13619 g_free (live_range_end_bb);
13624 * - use 'iadd' instead of 'int_add'
13625 * - handling ovf opcodes: decompose in method_to_ir.
13626 * - unify iregs/fregs
13627 * -> partly done, the missing parts are:
13628 * - a more complete unification would involve unifying the hregs as well, so
13629 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13630 * would no longer map to the machine hregs, so the code generators would need to
13631 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13632 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13633 * fp/non-fp branches speeds it up by about 15%.
13634 * - use sext/zext opcodes instead of shifts
13636 * - get rid of TEMPLOADs if possible and use vregs instead
13637 * - clean up usage of OP_P/OP_ opcodes
13638 * - cleanup usage of DUMMY_USE
13639 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13641 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13642 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13643 * - make sure handle_stack_args () is called before the branch is emitted
13644 * - when the new IR is done, get rid of all unused stuff
13645 * - COMPARE/BEQ as separate instructions or unify them ?
13646 * - keeping them separate allows specialized compare instructions like
13647 * compare_imm, compare_membase
13648 * - most back ends unify fp compare+branch, fp compare+ceq
13649 * - integrate mono_save_args into inline_method
13650 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13651 * - handle long shift opts on 32 bit platforms somehow: they require
13652 * 3 sregs (2 for arg1 and 1 for arg2)
13653 * - make byref a 'normal' type.
13654 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13655 * variable if needed.
13656 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13657 * like inline_method.
13658 * - remove inlining restrictions
13659 * - fix LNEG and enable cfold of INEG
13660 * - generalize x86 optimizations like ldelema as a peephole optimization
13661 * - add store_mem_imm for amd64
13662 * - optimize the loading of the interruption flag in the managed->native wrappers
13663 * - avoid special handling of OP_NOP in passes
13664 * - move code inserting instructions into one function/macro.
13665 * - try a coalescing phase after liveness analysis
13666 * - add float -> vreg conversion + local optimizations on !x86
13667 * - figure out how to handle decomposed branches during optimizations, ie.
13668 * compare+branch, op_jump_table+op_br etc.
13669 * - promote RuntimeXHandles to vregs
13670 * - vtype cleanups:
13671 * - add a NEW_VARLOADA_VREG macro
13672 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13673 * accessing vtype fields.
13674 * - get rid of I8CONST on 64 bit platforms
13675 * - dealing with the increase in code size due to branches created during opcode
13677 * - use extended basic blocks
13678 * - all parts of the JIT
13679 * - handle_global_vregs () && local regalloc
13680 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13681 * - sources of increase in code size:
13684 * - isinst and castclass
13685 * - lvregs not allocated to global registers even if used multiple times
13686 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13688 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13689 * - add all micro optimizations from the old JIT
13690 * - put tree optimizations into the deadce pass
13691 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13692 * specific function.
13693 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13694 * fcompare + branchCC.
13695 * - create a helper function for allocating a stack slot, taking into account
13696 * MONO_CFG_HAS_SPILLUP.
13698 * - merge the ia64 switch changes.
13699 * - optimize mono_regstate2_alloc_int/float.
13700 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13701 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13702 * parts of the tree could be separated by other instructions, killing the tree
13703 * arguments, or stores killing loads etc. Also, should we fold loads into other
13704 * instructions if the result of the load is used multiple times ?
13705 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13706 * - LAST MERGE: 108395.
13707 * - when returning vtypes in registers, generate IR and append it to the end of the
13708 * last bb instead of doing it in the epilog.
13709 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13717 - When to decompose opcodes:
13718 - earlier: this makes some optimizations hard to implement, since the low level IR
13719 no longer contains the neccessary information. But it is easier to do.
13720 - later: harder to implement, enables more optimizations.
13721 - Branches inside bblocks:
13722 - created when decomposing complex opcodes.
13723 - branches to another bblock: harmless, but not tracked by the branch
13724 optimizations, so need to branch to a label at the start of the bblock.
13725 - branches to inside the same bblock: very problematic, trips up the local
13726 reg allocator. Can be fixed by spitting the current bblock, but that is a
13727 complex operation, since some local vregs can become global vregs etc.
13728 - Local/global vregs:
13729 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13730 local register allocator.
13731 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13732 structure, created by mono_create_var (). Assigned to hregs or the stack by
13733 the global register allocator.
13734 - When to do optimizations like alu->alu_imm:
13735 - earlier -> saves work later on since the IR will be smaller/simpler
13736 - later -> can work on more instructions
13737 - Handling of valuetypes:
13738 - When a vtype is pushed on the stack, a new temporary is created, an
13739 instruction computing its address (LDADDR) is emitted and pushed on
13740 the stack. Need to optimize cases when the vtype is used immediately as in
13741 argument passing, stloc etc.
13742 - Instead of the to_end stuff in the old JIT, simply call the function handling
13743 the values on the stack before emitting the last instruction of the bb.
13746 #endif /* DISABLE_JIT */