2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
371 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
372 * foo<T> (int i) { ldarg.0; box T; }
374 #define UNVERIFIED do { \
375 if (cfg->gsharedvt) { \
376 if (cfg->verbose_level > 2) \
377 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
379 goto exception_exit; \
381 if (mini_get_debug_options ()->break_on_unverified) \
387 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
389 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
391 #define GET_BBLOCK(cfg,tblock,ip) do { \
392 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
394 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
395 NEW_BBLOCK (cfg, (tblock)); \
396 (tblock)->cil_code = (ip); \
397 ADD_BBLOCK (cfg, (tblock)); \
401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
402 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
403 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
404 (dest)->dreg = alloc_ireg_mp ((cfg)); \
405 (dest)->sreg1 = (sr1); \
406 (dest)->sreg2 = (sr2); \
407 (dest)->inst_imm = (imm); \
408 (dest)->backend.shift_amount = (shift); \
409 MONO_ADD_INS ((cfg)->cbb, (dest)); \
413 #if SIZEOF_REGISTER == 8
414 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
415 /* FIXME: Need to add many more cases */ \
416 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
418 int dr = alloc_preg (cfg); \
419 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
420 (ins)->sreg2 = widen->dreg; \
424 #define ADD_WIDEN_OP(ins, arg1, arg2)
427 #define ADD_BINOP(op) do { \
428 MONO_INST_NEW (cfg, ins, (op)); \
430 ins->sreg1 = sp [0]->dreg; \
431 ins->sreg2 = sp [1]->dreg; \
432 type_from_op (ins, sp [0], sp [1]); \
434 /* Have to insert a widening op */ \
435 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
436 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
437 MONO_ADD_INS ((cfg)->cbb, (ins)); \
438 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
441 #define ADD_UNOP(op) do { \
442 MONO_INST_NEW (cfg, ins, (op)); \
444 ins->sreg1 = sp [0]->dreg; \
445 type_from_op (ins, sp [0], NULL); \
447 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
448 MONO_ADD_INS ((cfg)->cbb, (ins)); \
449 *sp++ = mono_decompose_opcode (cfg, ins); \
452 #define ADD_BINCOND(next_block) do { \
455 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
456 cmp->sreg1 = sp [0]->dreg; \
457 cmp->sreg2 = sp [1]->dreg; \
458 type_from_op (cmp, sp [0], sp [1]); \
460 type_from_op (ins, sp [0], sp [1]); \
461 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
462 GET_BBLOCK (cfg, tblock, target); \
463 link_bblock (cfg, bblock, tblock); \
464 ins->inst_true_bb = tblock; \
465 if ((next_block)) { \
466 link_bblock (cfg, bblock, (next_block)); \
467 ins->inst_false_bb = (next_block); \
468 start_new_bblock = 1; \
470 GET_BBLOCK (cfg, tblock, ip); \
471 link_bblock (cfg, bblock, tblock); \
472 ins->inst_false_bb = tblock; \
473 start_new_bblock = 2; \
475 if (sp != stack_start) { \
476 handle_stack_args (cfg, stack_start, sp - stack_start); \
477 CHECK_UNVERIFIABLE (cfg); \
479 MONO_ADD_INS (bblock, cmp); \
480 MONO_ADD_INS (bblock, ins); \
484 * link_bblock: Links two basic blocks
486 * links two basic blocks in the control flow graph, the 'from'
487 * argument is the starting block and the 'to' argument is the block
488 * the control flow ends to after 'from'.
491 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
493 MonoBasicBlock **newa;
497 if (from->cil_code) {
499 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
501 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
504 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
506 printf ("edge from entry to exit\n");
511 for (i = 0; i < from->out_count; ++i) {
512 if (to == from->out_bb [i]) {
518 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
519 for (i = 0; i < from->out_count; ++i) {
520 newa [i] = from->out_bb [i];
528 for (i = 0; i < to->in_count; ++i) {
529 if (from == to->in_bb [i]) {
535 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
536 for (i = 0; i < to->in_count; ++i) {
537 newa [i] = to->in_bb [i];
546 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
548 link_bblock (cfg, from, to);
552 * mono_find_block_region:
554 * We mark each basic block with a region ID. We use that to avoid BB
555 * optimizations when blocks are in different regions.
558 * A region token that encodes where this region is, and information
559 * about the clause owner for this block.
561 * The region encodes the try/catch/filter clause that owns this block
562 * as well as the type. -1 is a special value that represents a block
563 * that is in none of try/catch/filter.
566 mono_find_block_region (MonoCompile *cfg, int offset)
568 MonoMethodHeader *header = cfg->header;
569 MonoExceptionClause *clause;
572 for (i = 0; i < header->num_clauses; ++i) {
573 clause = &header->clauses [i];
574 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
575 (offset < (clause->handler_offset)))
576 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
578 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
579 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
580 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
581 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
582 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
584 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
587 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
588 return ((i + 1) << 8) | clause->flags;
595 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
597 MonoMethodHeader *header = cfg->header;
598 MonoExceptionClause *clause;
602 for (i = 0; i < header->num_clauses; ++i) {
603 clause = &header->clauses [i];
604 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
605 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
606 if (clause->flags == type)
607 res = g_list_append (res, clause);
614 mono_create_spvar_for_region (MonoCompile *cfg, int region)
618 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
622 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
623 /* prevent it from being register allocated */
624 var->flags |= MONO_INST_VOLATILE;
626 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
630 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
632 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
636 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
640 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
644 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
645 /* prevent it from being register allocated */
646 var->flags |= MONO_INST_VOLATILE;
648 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
654 * Returns the type used in the eval stack when @type is loaded.
655 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
658 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
662 type = mini_replace_type (type);
663 inst->klass = klass = mono_class_from_mono_type (type);
665 inst->type = STACK_MP;
670 switch (type->type) {
672 inst->type = STACK_INV;
676 case MONO_TYPE_BOOLEAN:
682 inst->type = STACK_I4;
687 case MONO_TYPE_FNPTR:
688 inst->type = STACK_PTR;
690 case MONO_TYPE_CLASS:
691 case MONO_TYPE_STRING:
692 case MONO_TYPE_OBJECT:
693 case MONO_TYPE_SZARRAY:
694 case MONO_TYPE_ARRAY:
695 inst->type = STACK_OBJ;
699 inst->type = STACK_I8;
703 inst->type = STACK_R8;
705 case MONO_TYPE_VALUETYPE:
706 if (type->data.klass->enumtype) {
707 type = mono_class_enum_basetype (type->data.klass);
711 inst->type = STACK_VTYPE;
714 case MONO_TYPE_TYPEDBYREF:
715 inst->klass = mono_defaults.typed_reference_class;
716 inst->type = STACK_VTYPE;
718 case MONO_TYPE_GENERICINST:
719 type = &type->data.generic_class->container_class->byval_arg;
723 g_assert (cfg->generic_sharing_context);
724 if (mini_is_gsharedvt_type (cfg, type)) {
725 g_assert (cfg->gsharedvt);
726 inst->type = STACK_VTYPE;
728 inst->type = STACK_OBJ;
732 g_error ("unknown type 0x%02x in eval stack type", type->type);
737 * The following tables are used to quickly validate the IL code in type_from_op ().
740 bin_num_table [STACK_MAX] [STACK_MAX] = {
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
753 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
756 /* reduce the size of this table */
758 bin_int_table [STACK_MAX] [STACK_MAX] = {
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
761 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
762 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
763 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
764 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
765 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
766 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
770 bin_comp_table [STACK_MAX] [STACK_MAX] = {
771 /* Inv i L p F & O vt */
773 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
774 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
775 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
776 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
777 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
778 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
779 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
782 /* reduce the size of this table */
784 shift_table [STACK_MAX] [STACK_MAX] = {
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
787 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
788 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
789 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
796 * Tables to map from the non-specific opcode to the matching
797 * type-specific opcode.
799 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
801 binops_op_map [STACK_MAX] = {
802 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
805 /* handles from CEE_NEG to CEE_CONV_U8 */
807 unops_op_map [STACK_MAX] = {
808 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
811 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
813 ovfops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
817 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
819 ovf2ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
823 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
825 ovf3ops_op_map [STACK_MAX] = {
826 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
829 /* handles from CEE_BEQ to CEE_BLT_UN */
831 beqops_op_map [STACK_MAX] = {
832 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
835 /* handles from CEE_CEQ to CEE_CLT_UN */
837 ceqops_op_map [STACK_MAX] = {
838 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
842 * Sets ins->type (the type on the eval stack) according to the
843 * type of the opcode and the arguments to it.
844 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
846 * FIXME: this function sets ins->type unconditionally in some cases, but
847 * it should set it to invalid for some types (a conv.x on an object)
850 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
852 switch (ins->opcode) {
859 /* FIXME: check unverifiable args for STACK_MP */
860 ins->type = bin_num_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
868 ins->type = bin_int_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = shift_table [src1->type] [src2->type];
875 ins->opcode += binops_op_map [ins->type];
880 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
881 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
882 ins->opcode = OP_LCOMPARE;
883 else if (src1->type == STACK_R8)
884 ins->opcode = OP_FCOMPARE;
886 ins->opcode = OP_ICOMPARE;
888 case OP_ICOMPARE_IMM:
889 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
890 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
891 ins->opcode = OP_LCOMPARE_IMM;
903 ins->opcode += beqops_op_map [src1->type];
906 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
907 ins->opcode += ceqops_op_map [src1->type];
913 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
914 ins->opcode += ceqops_op_map [src1->type];
918 ins->type = neg_table [src1->type];
919 ins->opcode += unops_op_map [ins->type];
922 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
923 ins->type = src1->type;
925 ins->type = STACK_INV;
926 ins->opcode += unops_op_map [ins->type];
932 ins->type = STACK_I4;
933 ins->opcode += unops_op_map [src1->type];
936 ins->type = STACK_R8;
937 switch (src1->type) {
940 ins->opcode = OP_ICONV_TO_R_UN;
943 ins->opcode = OP_LCONV_TO_R_UN;
947 case CEE_CONV_OVF_I1:
948 case CEE_CONV_OVF_U1:
949 case CEE_CONV_OVF_I2:
950 case CEE_CONV_OVF_U2:
951 case CEE_CONV_OVF_I4:
952 case CEE_CONV_OVF_U4:
953 ins->type = STACK_I4;
954 ins->opcode += ovf3ops_op_map [src1->type];
956 case CEE_CONV_OVF_I_UN:
957 case CEE_CONV_OVF_U_UN:
958 ins->type = STACK_PTR;
959 ins->opcode += ovf2ops_op_map [src1->type];
961 case CEE_CONV_OVF_I1_UN:
962 case CEE_CONV_OVF_I2_UN:
963 case CEE_CONV_OVF_I4_UN:
964 case CEE_CONV_OVF_U1_UN:
965 case CEE_CONV_OVF_U2_UN:
966 case CEE_CONV_OVF_U4_UN:
967 ins->type = STACK_I4;
968 ins->opcode += ovf2ops_op_map [src1->type];
971 ins->type = STACK_PTR;
972 switch (src1->type) {
974 ins->opcode = OP_ICONV_TO_U;
978 #if SIZEOF_VOID_P == 8
979 ins->opcode = OP_LCONV_TO_U;
981 ins->opcode = OP_MOVE;
985 ins->opcode = OP_LCONV_TO_U;
988 ins->opcode = OP_FCONV_TO_U;
994 ins->type = STACK_I8;
995 ins->opcode += unops_op_map [src1->type];
997 case CEE_CONV_OVF_I8:
998 case CEE_CONV_OVF_U8:
999 ins->type = STACK_I8;
1000 ins->opcode += ovf3ops_op_map [src1->type];
1002 case CEE_CONV_OVF_U8_UN:
1003 case CEE_CONV_OVF_I8_UN:
1004 ins->type = STACK_I8;
1005 ins->opcode += ovf2ops_op_map [src1->type];
1009 ins->type = STACK_R8;
1010 ins->opcode += unops_op_map [src1->type];
1013 ins->type = STACK_R8;
1017 ins->type = STACK_I4;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_CONV_OVF_I:
1022 case CEE_CONV_OVF_U:
1023 ins->type = STACK_PTR;
1024 ins->opcode += ovfops_op_map [src1->type];
1027 case CEE_ADD_OVF_UN:
1029 case CEE_MUL_OVF_UN:
1031 case CEE_SUB_OVF_UN:
1032 ins->type = bin_num_table [src1->type] [src2->type];
1033 ins->opcode += ovfops_op_map [src1->type];
1034 if (ins->type == STACK_R8)
1035 ins->type = STACK_INV;
1037 case OP_LOAD_MEMBASE:
1038 ins->type = STACK_PTR;
1040 case OP_LOADI1_MEMBASE:
1041 case OP_LOADU1_MEMBASE:
1042 case OP_LOADI2_MEMBASE:
1043 case OP_LOADU2_MEMBASE:
1044 case OP_LOADI4_MEMBASE:
1045 case OP_LOADU4_MEMBASE:
1046 ins->type = STACK_PTR;
1048 case OP_LOADI8_MEMBASE:
1049 ins->type = STACK_I8;
1051 case OP_LOADR4_MEMBASE:
1052 case OP_LOADR8_MEMBASE:
1053 ins->type = STACK_R8;
1056 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1060 if (ins->type == STACK_MP)
1061 ins->klass = mono_defaults.object_class;
1066 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1072 param_table [STACK_MAX] [STACK_MAX] = {
1077 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1081 switch (args->type) {
1091 for (i = 0; i < sig->param_count; ++i) {
1092 switch (args [i].type) {
1096 if (!sig->params [i]->byref)
1100 if (sig->params [i]->byref)
1102 switch (sig->params [i]->type) {
1103 case MONO_TYPE_CLASS:
1104 case MONO_TYPE_STRING:
1105 case MONO_TYPE_OBJECT:
1106 case MONO_TYPE_SZARRAY:
1107 case MONO_TYPE_ARRAY:
1114 if (sig->params [i]->byref)
1116 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1125 /*if (!param_table [args [i].type] [sig->params [i]->type])
1133 * When we need a pointer to the current domain many times in a method, we
1134 * call mono_domain_get() once and we store the result in a local variable.
1135 * This function returns the variable that represents the MonoDomain*.
1137 inline static MonoInst *
1138 mono_get_domainvar (MonoCompile *cfg)
1140 if (!cfg->domainvar)
1141 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1142 return cfg->domainvar;
1146 * The got_var contains the address of the Global Offset Table when AOT
1150 mono_get_got_var (MonoCompile *cfg)
1152 #ifdef MONO_ARCH_NEED_GOT_VAR
1153 if (!cfg->compile_aot)
1155 if (!cfg->got_var) {
1156 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1158 return cfg->got_var;
1165 mono_get_vtable_var (MonoCompile *cfg)
1167 g_assert (cfg->generic_sharing_context);
1169 if (!cfg->rgctx_var) {
1170 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1171 /* force the var to be stack allocated */
1172 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1175 return cfg->rgctx_var;
1179 type_from_stack_type (MonoInst *ins) {
1180 switch (ins->type) {
1181 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1182 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1183 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1184 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1186 return &ins->klass->this_arg;
1187 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1188 case STACK_VTYPE: return &ins->klass->byval_arg;
1190 g_error ("stack type %d to monotype not handled\n", ins->type);
1195 static G_GNUC_UNUSED int
1196 type_to_stack_type (MonoType *t)
1198 t = mono_type_get_underlying_type (t);
1202 case MONO_TYPE_BOOLEAN:
1205 case MONO_TYPE_CHAR:
1212 case MONO_TYPE_FNPTR:
1214 case MONO_TYPE_CLASS:
1215 case MONO_TYPE_STRING:
1216 case MONO_TYPE_OBJECT:
1217 case MONO_TYPE_SZARRAY:
1218 case MONO_TYPE_ARRAY:
1226 case MONO_TYPE_VALUETYPE:
1227 case MONO_TYPE_TYPEDBYREF:
1229 case MONO_TYPE_GENERICINST:
1230 if (mono_type_generic_inst_is_valuetype (t))
1236 g_assert_not_reached ();
1243 array_access_to_klass (int opcode)
1247 return mono_defaults.byte_class;
1249 return mono_defaults.uint16_class;
1252 return mono_defaults.int_class;
1255 return mono_defaults.sbyte_class;
1258 return mono_defaults.int16_class;
1261 return mono_defaults.int32_class;
1263 return mono_defaults.uint32_class;
1266 return mono_defaults.int64_class;
1269 return mono_defaults.single_class;
1272 return mono_defaults.double_class;
1273 case CEE_LDELEM_REF:
1274 case CEE_STELEM_REF:
1275 return mono_defaults.object_class;
1277 g_assert_not_reached ();
1283 * We try to share variables when possible
1286 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1291 /* inlining can result in deeper stacks */
1292 if (slot >= cfg->header->max_stack)
1293 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1295 pos = ins->type - 1 + slot * STACK_MAX;
1297 switch (ins->type) {
1304 if ((vnum = cfg->intvars [pos]))
1305 return cfg->varinfo [vnum];
1306 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1307 cfg->intvars [pos] = res->inst_c0;
1310 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1316 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1319 * Don't use this if a generic_context is set, since that means AOT can't
1320 * look up the method using just the image+token.
1321 * table == 0 means this is a reference made from a wrapper.
1323 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1324 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1325 jump_info_token->image = image;
1326 jump_info_token->token = token;
1327 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1332 * This function is called to handle items that are left on the evaluation stack
1333 * at basic block boundaries. What happens is that we save the values to local variables
1334 * and we reload them later when first entering the target basic block (with the
1335 * handle_loaded_temps () function).
1336 * A single joint point will use the same variables (stored in the array bb->out_stack or
1337 * bb->in_stack, if the basic block is before or after the joint point).
1339 * This function needs to be called _before_ emitting the last instruction of
1340 * the bb (i.e. before emitting a branch).
1341 * If the stack merge fails at a join point, cfg->unverifiable is set.
1344 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1347 MonoBasicBlock *bb = cfg->cbb;
1348 MonoBasicBlock *outb;
1349 MonoInst *inst, **locals;
1354 if (cfg->verbose_level > 3)
1355 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1356 if (!bb->out_scount) {
1357 bb->out_scount = count;
1358 //printf ("bblock %d has out:", bb->block_num);
1360 for (i = 0; i < bb->out_count; ++i) {
1361 outb = bb->out_bb [i];
1362 /* exception handlers are linked, but they should not be considered for stack args */
1363 if (outb->flags & BB_EXCEPTION_HANDLER)
1365 //printf (" %d", outb->block_num);
1366 if (outb->in_stack) {
1368 bb->out_stack = outb->in_stack;
1374 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1375 for (i = 0; i < count; ++i) {
1377 * try to reuse temps already allocated for this purpouse, if they occupy the same
1378 * stack slot and if they are of the same type.
1379 * This won't cause conflicts since if 'local' is used to
1380 * store one of the values in the in_stack of a bblock, then
1381 * the same variable will be used for the same outgoing stack
1383 * This doesn't work when inlining methods, since the bblocks
1384 * in the inlined methods do not inherit their in_stack from
1385 * the bblock they are inlined to. See bug #58863 for an
1388 if (cfg->inlined_method)
1389 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1391 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1396 for (i = 0; i < bb->out_count; ++i) {
1397 outb = bb->out_bb [i];
1398 /* exception handlers are linked, but they should not be considered for stack args */
1399 if (outb->flags & BB_EXCEPTION_HANDLER)
1401 if (outb->in_scount) {
1402 if (outb->in_scount != bb->out_scount) {
1403 cfg->unverifiable = TRUE;
1406 continue; /* check they are the same locals */
1408 outb->in_scount = count;
1409 outb->in_stack = bb->out_stack;
1412 locals = bb->out_stack;
1414 for (i = 0; i < count; ++i) {
1415 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1416 inst->cil_code = sp [i]->cil_code;
1417 sp [i] = locals [i];
1418 if (cfg->verbose_level > 3)
1419 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1423 * It is possible that the out bblocks already have in_stack assigned, and
1424 * the in_stacks differ. In this case, we will store to all the different
1431 /* Find a bblock which has a different in_stack */
1433 while (bindex < bb->out_count) {
1434 outb = bb->out_bb [bindex];
1435 /* exception handlers are linked, but they should not be considered for stack args */
1436 if (outb->flags & BB_EXCEPTION_HANDLER) {
1440 if (outb->in_stack != locals) {
1441 for (i = 0; i < count; ++i) {
1442 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1443 inst->cil_code = sp [i]->cil_code;
1444 sp [i] = locals [i];
1445 if (cfg->verbose_level > 3)
1446 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1448 locals = outb->in_stack;
1457 /* Emit code which loads interface_offsets [klass->interface_id]
1458 * The array is stored in memory before vtable.
1461 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1463 if (cfg->compile_aot) {
1464 int ioffset_reg = alloc_preg (cfg);
1465 int iid_reg = alloc_preg (cfg);
1467 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1477 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1479 int ibitmap_reg = alloc_preg (cfg);
1480 #ifdef COMPRESSED_INTERFACE_BITMAP
1482 MonoInst *res, *ins;
1483 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1484 MONO_ADD_INS (cfg->cbb, ins);
1486 if (cfg->compile_aot)
1487 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1489 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1490 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1491 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1493 int ibitmap_byte_reg = alloc_preg (cfg);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1497 if (cfg->compile_aot) {
1498 int iid_reg = alloc_preg (cfg);
1499 int shifted_iid_reg = alloc_preg (cfg);
1500 int ibitmap_byte_address_reg = alloc_preg (cfg);
1501 int masked_iid_reg = alloc_preg (cfg);
1502 int iid_one_bit_reg = alloc_preg (cfg);
1503 int iid_bit_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1506 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1509 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1510 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1511 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1520 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1521 * stored in "klass_reg" implements the interface "klass".
1524 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1526 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1530 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1531 * stored in "vtable_reg" implements the interface "klass".
1534 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1536 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1540 * Emit code which checks whenever the interface id of @klass is smaller than
1541 * than the value given by max_iid_reg.
1544 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1545 MonoBasicBlock *false_target)
1547 if (cfg->compile_aot) {
1548 int iid_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1557 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1560 /* Same as above, but obtains max_iid from a vtable */
1562 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1563 MonoBasicBlock *false_target)
1565 int max_iid_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1568 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1571 /* Same as above, but obtains max_iid from a klass */
1573 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1574 MonoBasicBlock *false_target)
1576 int max_iid_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1579 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1583 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1585 int idepth_reg = alloc_preg (cfg);
1586 int stypes_reg = alloc_preg (cfg);
1587 int stype = alloc_preg (cfg);
1589 mono_class_setup_supertypes (klass);
1591 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1592 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1600 } else if (cfg->compile_aot) {
1601 int const_reg = alloc_preg (cfg);
1602 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1611 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1617 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1619 int intf_reg = alloc_preg (cfg);
1621 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1622 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1627 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1631 * Variant of the above that takes a register to the class, not the vtable.
1634 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1636 int intf_bit_reg = alloc_preg (cfg);
1638 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1639 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1644 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1648 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1652 } else if (cfg->compile_aot) {
1653 int const_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1655 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1659 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1663 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1665 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1669 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1671 if (cfg->compile_aot) {
1672 int const_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1682 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1685 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1688 int rank_reg = alloc_preg (cfg);
1689 int eclass_reg = alloc_preg (cfg);
1691 g_assert (!klass_inst);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1694 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1695 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1697 if (klass->cast_class == mono_defaults.object_class) {
1698 int parent_reg = alloc_preg (cfg);
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1700 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1701 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1702 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1703 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1704 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1705 } else if (klass->cast_class == mono_defaults.enum_class) {
1706 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1707 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1708 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1710 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1711 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1714 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1715 /* Check that the object is a vector too */
1716 int bounds_reg = alloc_preg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1719 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1722 int idepth_reg = alloc_preg (cfg);
1723 int stypes_reg = alloc_preg (cfg);
1724 int stype = alloc_preg (cfg);
1726 mono_class_setup_supertypes (klass);
1728 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1731 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1735 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1740 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1742 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1746 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1750 g_assert (val == 0);
1755 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1761 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1764 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1766 #if SIZEOF_REGISTER == 8
1768 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1774 val_reg = alloc_preg (cfg);
1776 if (SIZEOF_REGISTER == 8)
1777 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1779 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1782 /* This could be optimized further if neccesary */
1784 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1791 #if !NO_UNALIGNED_ACCESS
1792 if (SIZEOF_REGISTER == 8) {
1794 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1824 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1831 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1832 g_assert (size < 10000);
1835 /* This could be optimized further if neccesary */
1837 cur_reg = alloc_preg (cfg);
1838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1846 #if !NO_UNALIGNED_ACCESS
1847 if (SIZEOF_REGISTER == 8) {
1849 cur_reg = alloc_preg (cfg);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1860 cur_reg = alloc_preg (cfg);
1861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1868 cur_reg = alloc_preg (cfg);
1869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1876 cur_reg = alloc_preg (cfg);
1877 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1886 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1890 if (cfg->compile_aot) {
1891 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1892 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1894 ins->sreg2 = c->dreg;
1895 MONO_ADD_INS (cfg->cbb, ins);
1897 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1899 ins->inst_offset = mini_get_tls_offset (tls_key);
1900 MONO_ADD_INS (cfg->cbb, ins);
1907 * Emit IR to push the current LMF onto the LMF stack.
1910 emit_push_lmf (MonoCompile *cfg)
1913 * Emit IR to push the LMF:
1914 * lmf_addr = <lmf_addr from tls>
1915 * lmf->lmf_addr = lmf_addr
1916 * lmf->prev_lmf = *lmf_addr
1919 int lmf_reg, prev_lmf_reg;
1920 MonoInst *ins, *lmf_ins;
1925 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1926 /* Load current lmf */
1927 lmf_ins = mono_get_lmf_intrinsic (cfg);
1929 MONO_ADD_INS (cfg->cbb, lmf_ins);
1930 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1931 lmf_reg = ins->dreg;
1932 /* Save previous_lmf */
1933 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1935 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1938 * Store lmf_addr in a variable, so it can be allocated to a global register.
1940 if (!cfg->lmf_addr_var)
1941 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1944 ins = mono_get_jit_tls_intrinsic (cfg);
1946 int jit_tls_dreg = ins->dreg;
1948 MONO_ADD_INS (cfg->cbb, ins);
1949 lmf_reg = alloc_preg (cfg);
1950 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1952 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1957 MONO_ADD_INS (cfg->cbb, lmf_ins);
1960 MonoInst *args [16], *jit_tls_ins, *ins;
1962 /* Inline mono_get_lmf_addr () */
1963 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
1965 /* Load mono_jit_tls_id */
1966 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
1967 /* call pthread_getspecific () */
1968 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
1969 /* lmf_addr = &jit_tls->lmf */
1970 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1973 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1977 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1979 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1980 lmf_reg = ins->dreg;
1982 prev_lmf_reg = alloc_preg (cfg);
1983 /* Save previous_lmf */
1984 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1985 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1987 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1994 * Emit IR to pop the current LMF from the LMF stack.
1997 emit_pop_lmf (MonoCompile *cfg)
1999 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2005 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2006 lmf_reg = ins->dreg;
2008 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2009 /* Load previous_lmf */
2010 prev_lmf_reg = alloc_preg (cfg);
2011 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2013 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2016 * Emit IR to pop the LMF:
2017 * *(lmf->lmf_addr) = lmf->prev_lmf
2019 /* This could be called before emit_push_lmf () */
2020 if (!cfg->lmf_addr_var)
2021 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2022 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2024 prev_lmf_reg = alloc_preg (cfg);
2025 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2026 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2031 emit_instrumentation_call (MonoCompile *cfg, void *func)
2033 MonoInst *iargs [1];
2036 * Avoid instrumenting inlined methods since it can
2037 * distort profiling results.
2039 if (cfg->method != cfg->current_method)
2042 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2043 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2044 mono_emit_jit_icall (cfg, func, iargs);
2049 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2052 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2055 type = mini_get_basic_type_from_generic (gsctx, type);
2056 type = mini_replace_type (type);
2057 switch (type->type) {
2058 case MONO_TYPE_VOID:
2059 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2062 case MONO_TYPE_BOOLEAN:
2065 case MONO_TYPE_CHAR:
2068 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2072 case MONO_TYPE_FNPTR:
2073 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2074 case MONO_TYPE_CLASS:
2075 case MONO_TYPE_STRING:
2076 case MONO_TYPE_OBJECT:
2077 case MONO_TYPE_SZARRAY:
2078 case MONO_TYPE_ARRAY:
2079 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2082 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2085 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2086 case MONO_TYPE_VALUETYPE:
2087 if (type->data.klass->enumtype) {
2088 type = mono_class_enum_basetype (type->data.klass);
2091 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2092 case MONO_TYPE_TYPEDBYREF:
2093 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2094 case MONO_TYPE_GENERICINST:
2095 type = &type->data.generic_class->container_class->byval_arg;
2098 case MONO_TYPE_MVAR:
2100 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2102 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2108 * target_type_is_incompatible:
2109 * @cfg: MonoCompile context
2111 * Check that the item @arg on the evaluation stack can be stored
2112 * in the target type (can be a local, or field, etc).
2113 * The cfg arg can be used to check if we need verification or just
2116 * Returns: non-0 value if arg can't be stored on a target.
2119 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2121 MonoType *simple_type;
2124 target = mini_replace_type (target);
2125 if (target->byref) {
2126 /* FIXME: check that the pointed to types match */
2127 if (arg->type == STACK_MP)
2128 return arg->klass != mono_class_from_mono_type (target);
2129 if (arg->type == STACK_PTR)
2134 simple_type = mono_type_get_underlying_type (target);
2135 switch (simple_type->type) {
2136 case MONO_TYPE_VOID:
2140 case MONO_TYPE_BOOLEAN:
2143 case MONO_TYPE_CHAR:
2146 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2150 /* STACK_MP is needed when setting pinned locals */
2151 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2156 case MONO_TYPE_FNPTR:
2158 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2159 * in native int. (#688008).
2161 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2164 case MONO_TYPE_CLASS:
2165 case MONO_TYPE_STRING:
2166 case MONO_TYPE_OBJECT:
2167 case MONO_TYPE_SZARRAY:
2168 case MONO_TYPE_ARRAY:
2169 if (arg->type != STACK_OBJ)
2171 /* FIXME: check type compatibility */
2175 if (arg->type != STACK_I8)
2180 if (arg->type != STACK_R8)
2183 case MONO_TYPE_VALUETYPE:
2184 if (arg->type != STACK_VTYPE)
2186 klass = mono_class_from_mono_type (simple_type);
2187 if (klass != arg->klass)
2190 case MONO_TYPE_TYPEDBYREF:
2191 if (arg->type != STACK_VTYPE)
2193 klass = mono_class_from_mono_type (simple_type);
2194 if (klass != arg->klass)
2197 case MONO_TYPE_GENERICINST:
2198 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2199 if (arg->type != STACK_VTYPE)
2201 klass = mono_class_from_mono_type (simple_type);
2202 if (klass != arg->klass)
2206 if (arg->type != STACK_OBJ)
2208 /* FIXME: check type compatibility */
2212 case MONO_TYPE_MVAR:
2213 g_assert (cfg->generic_sharing_context);
2214 if (mini_type_var_is_vt (cfg, simple_type)) {
2215 if (arg->type != STACK_VTYPE)
2218 if (arg->type != STACK_OBJ)
2223 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2229 * Prepare arguments for passing to a function call.
2230 * Return a non-zero value if the arguments can't be passed to the given
2232 * The type checks are not yet complete and some conversions may need
2233 * casts on 32 or 64 bit architectures.
2235 * FIXME: implement this using target_type_is_incompatible ()
2238 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2240 MonoType *simple_type;
2244 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2248 for (i = 0; i < sig->param_count; ++i) {
2249 if (sig->params [i]->byref) {
2250 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2254 simple_type = sig->params [i];
2255 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2257 switch (simple_type->type) {
2258 case MONO_TYPE_VOID:
2263 case MONO_TYPE_BOOLEAN:
2266 case MONO_TYPE_CHAR:
2269 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2275 case MONO_TYPE_FNPTR:
2276 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2279 case MONO_TYPE_CLASS:
2280 case MONO_TYPE_STRING:
2281 case MONO_TYPE_OBJECT:
2282 case MONO_TYPE_SZARRAY:
2283 case MONO_TYPE_ARRAY:
2284 if (args [i]->type != STACK_OBJ)
2289 if (args [i]->type != STACK_I8)
2294 if (args [i]->type != STACK_R8)
2297 case MONO_TYPE_VALUETYPE:
2298 if (simple_type->data.klass->enumtype) {
2299 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2302 if (args [i]->type != STACK_VTYPE)
2305 case MONO_TYPE_TYPEDBYREF:
2306 if (args [i]->type != STACK_VTYPE)
2309 case MONO_TYPE_GENERICINST:
2310 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2313 case MONO_TYPE_MVAR:
2315 if (args [i]->type != STACK_VTYPE)
2319 g_error ("unknown type 0x%02x in check_call_signature",
2327 callvirt_to_call (int opcode)
2330 case OP_CALL_MEMBASE:
2332 case OP_VOIDCALL_MEMBASE:
2334 case OP_FCALL_MEMBASE:
2336 case OP_VCALL_MEMBASE:
2338 case OP_LCALL_MEMBASE:
2341 g_assert_not_reached ();
2347 #ifdef MONO_ARCH_HAVE_IMT
2348 /* Either METHOD or IMT_ARG needs to be set */
2350 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2354 if (COMPILE_LLVM (cfg)) {
2355 method_reg = alloc_preg (cfg);
2358 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2359 } else if (cfg->compile_aot) {
2360 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2363 MONO_INST_NEW (cfg, ins, OP_PCONST);
2364 ins->inst_p0 = method;
2365 ins->dreg = method_reg;
2366 MONO_ADD_INS (cfg->cbb, ins);
2370 call->imt_arg_reg = method_reg;
2372 #ifdef MONO_ARCH_IMT_REG
2373 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2375 /* Need this to keep the IMT arg alive */
2376 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2381 #ifdef MONO_ARCH_IMT_REG
2382 method_reg = alloc_preg (cfg);
2385 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2386 } else if (cfg->compile_aot) {
2387 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2390 MONO_INST_NEW (cfg, ins, OP_PCONST);
2391 ins->inst_p0 = method;
2392 ins->dreg = method_reg;
2393 MONO_ADD_INS (cfg->cbb, ins);
2396 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2398 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2403 static MonoJumpInfo *
2404 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2406 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2410 ji->data.target = target;
2416 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2418 if (cfg->generic_sharing_context)
2419 return mono_class_check_context_used (klass);
2425 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2427 if (cfg->generic_sharing_context)
2428 return mono_method_check_context_used (method);
2434 * check_method_sharing:
2436 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2439 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2441 gboolean pass_vtable = FALSE;
2442 gboolean pass_mrgctx = FALSE;
2444 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2445 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2446 gboolean sharable = FALSE;
2448 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2451 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2452 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2453 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2455 sharable = sharing_enabled && context_sharable;
2459 * Pass vtable iff target method might
2460 * be shared, which means that sharing
2461 * is enabled for its class and its
2462 * context is sharable (and it's not a
2465 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2469 if (mini_method_get_context (cmethod) &&
2470 mini_method_get_context (cmethod)->method_inst) {
2471 g_assert (!pass_vtable);
2473 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2476 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2477 MonoGenericContext *context = mini_method_get_context (cmethod);
2478 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2480 if (sharing_enabled && context_sharable)
2482 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2487 if (out_pass_vtable)
2488 *out_pass_vtable = pass_vtable;
2489 if (out_pass_mrgctx)
2490 *out_pass_mrgctx = pass_mrgctx;
2493 inline static MonoCallInst *
2494 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2495 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2499 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2504 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2506 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2508 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2511 call->signature = sig;
2512 call->rgctx_reg = rgctx;
2513 sig_ret = mini_replace_type (sig->ret);
2515 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2518 if (mini_type_is_vtype (cfg, sig_ret)) {
2519 call->vret_var = cfg->vret_addr;
2520 //g_assert_not_reached ();
2522 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2523 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2526 temp->backend.is_pinvoke = sig->pinvoke;
2529 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2530 * address of return value to increase optimization opportunities.
2531 * Before vtype decomposition, the dreg of the call ins itself represents the
2532 * fact the call modifies the return value. After decomposition, the call will
2533 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2534 * will be transformed into an LDADDR.
2536 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2537 loada->dreg = alloc_preg (cfg);
2538 loada->inst_p0 = temp;
2539 /* We reference the call too since call->dreg could change during optimization */
2540 loada->inst_p1 = call;
2541 MONO_ADD_INS (cfg->cbb, loada);
2543 call->inst.dreg = temp->dreg;
2545 call->vret_var = loada;
2546 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2547 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2549 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2550 if (COMPILE_SOFT_FLOAT (cfg)) {
2552 * If the call has a float argument, we would need to do an r8->r4 conversion using
2553 * an icall, but that cannot be done during the call sequence since it would clobber
2554 * the call registers + the stack. So we do it before emitting the call.
2556 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2558 MonoInst *in = call->args [i];
2560 if (i >= sig->hasthis)
2561 t = sig->params [i - sig->hasthis];
2563 t = &mono_defaults.int_class->byval_arg;
2564 t = mono_type_get_underlying_type (t);
2566 if (!t->byref && t->type == MONO_TYPE_R4) {
2567 MonoInst *iargs [1];
2571 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2573 /* The result will be in an int vreg */
2574 call->args [i] = conv;
2580 call->need_unbox_trampoline = unbox_trampoline;
2583 if (COMPILE_LLVM (cfg))
2584 mono_llvm_emit_call (cfg, call);
2586 mono_arch_emit_call (cfg, call);
2588 mono_arch_emit_call (cfg, call);
2591 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2592 cfg->flags |= MONO_CFG_HAS_CALLS;
2598 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2600 #ifdef MONO_ARCH_RGCTX_REG
2601 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2602 cfg->uses_rgctx_reg = TRUE;
2603 call->rgctx_reg = TRUE;
2605 call->rgctx_arg_reg = rgctx_reg;
2612 inline static MonoInst*
2613 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2618 gboolean check_sp = FALSE;
2620 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2621 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2623 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2628 rgctx_reg = mono_alloc_preg (cfg);
2629 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2633 if (!cfg->stack_inbalance_var)
2634 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2636 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2637 ins->dreg = cfg->stack_inbalance_var->dreg;
2638 MONO_ADD_INS (cfg->cbb, ins);
2641 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2643 call->inst.sreg1 = addr->dreg;
2646 emit_imt_argument (cfg, call, NULL, imt_arg);
2648 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2653 sp_reg = mono_alloc_preg (cfg);
2655 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2657 MONO_ADD_INS (cfg->cbb, ins);
2659 /* Restore the stack so we don't crash when throwing the exception */
2660 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2661 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2662 MONO_ADD_INS (cfg->cbb, ins);
2664 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2665 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2669 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2671 return (MonoInst*)call;
2675 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2678 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2680 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2683 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2684 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2686 #ifndef DISABLE_REMOTING
2687 gboolean might_be_remote = FALSE;
2689 gboolean virtual = this != NULL;
2690 gboolean enable_for_aot = TRUE;
2694 gboolean need_unbox_trampoline;
2697 sig = mono_method_signature (method);
2700 rgctx_reg = mono_alloc_preg (cfg);
2701 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2704 if (method->string_ctor) {
2705 /* Create the real signature */
2706 /* FIXME: Cache these */
2707 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2708 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2713 context_used = mini_method_check_context_used (cfg, method);
2715 #ifndef DISABLE_REMOTING
2716 might_be_remote = this && sig->hasthis &&
2717 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2718 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2720 if (might_be_remote && context_used) {
2723 g_assert (cfg->generic_sharing_context);
2725 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2727 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2731 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2733 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2735 #ifndef DISABLE_REMOTING
2736 if (might_be_remote)
2737 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2740 call->method = method;
2741 call->inst.flags |= MONO_INST_HAS_METHOD;
2742 call->inst.inst_left = this;
2743 call->tail_call = tail;
2746 int vtable_reg, slot_reg, this_reg;
2749 this_reg = this->dreg;
2751 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2752 MonoInst *dummy_use;
2754 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2756 /* Make a call to delegate->invoke_impl */
2757 call->inst.inst_basereg = this_reg;
2758 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2759 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2761 /* We must emit a dummy use here because the delegate trampoline will
2762 replace the 'this' argument with the delegate target making this activation
2763 no longer a root for the delegate.
2764 This is an issue for delegates that target collectible code such as dynamic
2765 methods of GC'able assemblies.
2767 For a test case look into #667921.
2769 FIXME: a dummy use is not the best way to do it as the local register allocator
2770 will put it on a caller save register and spil it around the call.
2771 Ideally, we would either put it on a callee save register or only do the store part.
2773 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2775 return (MonoInst*)call;
2778 if ((!cfg->compile_aot || enable_for_aot) &&
2779 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2780 (MONO_METHOD_IS_FINAL (method) &&
2781 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2782 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2784 * the method is not virtual, we just need to ensure this is not null
2785 * and then we can call the method directly.
2787 #ifndef DISABLE_REMOTING
2788 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2790 * The check above ensures method is not gshared, this is needed since
2791 * gshared methods can't have wrappers.
2793 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2797 if (!method->string_ctor)
2798 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2800 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2801 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2803 * the method is virtual, but we can statically dispatch since either
2804 * it's class or the method itself are sealed.
2805 * But first we need to ensure it's not a null reference.
2807 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2809 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2811 vtable_reg = alloc_preg (cfg);
2812 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2813 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2815 #ifdef MONO_ARCH_HAVE_IMT
2817 guint32 imt_slot = mono_method_get_imt_slot (method);
2818 emit_imt_argument (cfg, call, call->method, imt_arg);
2819 slot_reg = vtable_reg;
2820 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2823 if (slot_reg == -1) {
2824 slot_reg = alloc_preg (cfg);
2825 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2826 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2829 slot_reg = vtable_reg;
2830 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2831 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2832 #ifdef MONO_ARCH_HAVE_IMT
2834 g_assert (mono_method_signature (method)->generic_param_count);
2835 emit_imt_argument (cfg, call, call->method, imt_arg);
2840 call->inst.sreg1 = slot_reg;
2841 call->inst.inst_offset = offset;
2842 call->virtual = TRUE;
2846 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2849 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2851 return (MonoInst*)call;
2855 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2857 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2861 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2868 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2871 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2873 return (MonoInst*)call;
2877 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2879 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2883 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2887 * mono_emit_abs_call:
2889 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2891 inline static MonoInst*
2892 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2893 MonoMethodSignature *sig, MonoInst **args)
2895 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2899 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2902 if (cfg->abs_patches == NULL)
2903 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2904 g_hash_table_insert (cfg->abs_patches, ji, ji);
2905 ins = mono_emit_native_call (cfg, ji, sig, args);
2906 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2911 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2913 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2914 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2918 * Native code might return non register sized integers
2919 * without initializing the upper bits.
2921 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2922 case OP_LOADI1_MEMBASE:
2923 widen_op = OP_ICONV_TO_I1;
2925 case OP_LOADU1_MEMBASE:
2926 widen_op = OP_ICONV_TO_U1;
2928 case OP_LOADI2_MEMBASE:
2929 widen_op = OP_ICONV_TO_I2;
2931 case OP_LOADU2_MEMBASE:
2932 widen_op = OP_ICONV_TO_U2;
2938 if (widen_op != -1) {
2939 int dreg = alloc_preg (cfg);
2942 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2943 widen->type = ins->type;
2953 get_memcpy_method (void)
2955 static MonoMethod *memcpy_method = NULL;
2956 if (!memcpy_method) {
2957 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2959 g_error ("Old corlib found. Install a new one");
2961 return memcpy_method;
2965 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2967 MonoClassField *field;
2968 gpointer iter = NULL;
2970 while ((field = mono_class_get_fields (klass, &iter))) {
2973 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2975 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2976 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2977 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2978 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2980 MonoClass *field_class = mono_class_from_mono_type (field->type);
2981 if (field_class->has_references)
2982 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2988 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2990 int card_table_shift_bits;
2991 gpointer card_table_mask;
2993 MonoInst *dummy_use;
2994 int nursery_shift_bits;
2995 size_t nursery_size;
2996 gboolean has_card_table_wb = FALSE;
2998 if (!cfg->gen_write_barriers)
3001 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3003 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3005 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3006 has_card_table_wb = TRUE;
3009 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3012 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3013 wbarrier->sreg1 = ptr->dreg;
3014 wbarrier->sreg2 = value->dreg;
3015 MONO_ADD_INS (cfg->cbb, wbarrier);
3016 } else if (card_table) {
3017 int offset_reg = alloc_preg (cfg);
3018 int card_reg = alloc_preg (cfg);
3021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3022 if (card_table_mask)
3023 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3025 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3026 * IMM's larger than 32bits.
3028 if (cfg->compile_aot) {
3029 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3031 MONO_INST_NEW (cfg, ins, OP_PCONST);
3032 ins->inst_p0 = card_table;
3033 ins->dreg = card_reg;
3034 MONO_ADD_INS (cfg->cbb, ins);
3037 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3038 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3040 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3041 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3044 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3048 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3050 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3051 unsigned need_wb = 0;
3056 /*types with references can't have alignment smaller than sizeof(void*) */
3057 if (align < SIZEOF_VOID_P)
3060 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3061 if (size > 32 * SIZEOF_VOID_P)
3064 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3066 /* We don't unroll more than 5 stores to avoid code bloat. */
3067 if (size > 5 * SIZEOF_VOID_P) {
3068 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3069 size += (SIZEOF_VOID_P - 1);
3070 size &= ~(SIZEOF_VOID_P - 1);
3072 EMIT_NEW_ICONST (cfg, iargs [2], size);
3073 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3074 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3078 destreg = iargs [0]->dreg;
3079 srcreg = iargs [1]->dreg;
3082 dest_ptr_reg = alloc_preg (cfg);
3083 tmp_reg = alloc_preg (cfg);
3086 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3088 while (size >= SIZEOF_VOID_P) {
3089 MonoInst *load_inst;
3090 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3091 load_inst->dreg = tmp_reg;
3092 load_inst->inst_basereg = srcreg;
3093 load_inst->inst_offset = offset;
3094 MONO_ADD_INS (cfg->cbb, load_inst);
3096 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3099 emit_write_barrier (cfg, iargs [0], load_inst);
3101 offset += SIZEOF_VOID_P;
3102 size -= SIZEOF_VOID_P;
3105 /*tmp += sizeof (void*)*/
3106 if (size >= SIZEOF_VOID_P) {
3107 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3108 MONO_ADD_INS (cfg->cbb, iargs [0]);
3112 /* Those cannot be references since size < sizeof (void*) */
3114 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3115 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3121 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3122 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3128 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3129 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3138 * Emit code to copy a valuetype of type @klass whose address is stored in
3139 * @src->dreg to memory whose address is stored at @dest->dreg.
3142 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3144 MonoInst *iargs [4];
3145 int context_used, n;
3147 MonoMethod *memcpy_method;
3148 MonoInst *size_ins = NULL;
3149 MonoInst *memcpy_ins = NULL;
3153 * This check breaks with spilled vars... need to handle it during verification anyway.
3154 * g_assert (klass && klass == src->klass && klass == dest->klass);
3157 if (mini_is_gsharedvt_klass (cfg, klass)) {
3159 context_used = mini_class_check_context_used (cfg, klass);
3160 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3161 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3165 n = mono_class_native_size (klass, &align);
3167 n = mono_class_value_size (klass, &align);
3169 /* if native is true there should be no references in the struct */
3170 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3171 /* Avoid barriers when storing to the stack */
3172 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3173 (dest->opcode == OP_LDADDR))) {
3179 context_used = mini_class_check_context_used (cfg, klass);
3181 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3182 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3184 } else if (context_used) {
3185 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3187 if (cfg->compile_aot) {
3188 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3190 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3191 mono_class_compute_gc_descriptor (klass);
3196 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3198 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3203 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3204 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3205 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3210 iargs [2] = size_ins;
3212 EMIT_NEW_ICONST (cfg, iargs [2], n);
3214 memcpy_method = get_memcpy_method ();
3216 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3218 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3223 get_memset_method (void)
3225 static MonoMethod *memset_method = NULL;
3226 if (!memset_method) {
3227 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3229 g_error ("Old corlib found. Install a new one");
3231 return memset_method;
3235 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3237 MonoInst *iargs [3];
3238 int n, context_used;
3240 MonoMethod *memset_method;
3241 MonoInst *size_ins = NULL;
3242 MonoInst *bzero_ins = NULL;
3243 static MonoMethod *bzero_method;
3245 /* FIXME: Optimize this for the case when dest is an LDADDR */
3247 mono_class_init (klass);
3248 if (mini_is_gsharedvt_klass (cfg, klass)) {
3249 context_used = mini_class_check_context_used (cfg, klass);
3250 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3251 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3253 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3254 g_assert (bzero_method);
3256 iargs [1] = size_ins;
3257 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3261 n = mono_class_value_size (klass, &align);
3263 if (n <= sizeof (gpointer) * 5) {
3264 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3267 memset_method = get_memset_method ();
3269 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3270 EMIT_NEW_ICONST (cfg, iargs [2], n);
3271 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3276 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3278 MonoInst *this = NULL;
3280 g_assert (cfg->generic_sharing_context);
3282 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3283 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3284 !method->klass->valuetype)
3285 EMIT_NEW_ARGLOAD (cfg, this, 0);
3287 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3288 MonoInst *mrgctx_loc, *mrgctx_var;
3291 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3293 mrgctx_loc = mono_get_vtable_var (cfg);
3294 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3297 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3298 MonoInst *vtable_loc, *vtable_var;
3302 vtable_loc = mono_get_vtable_var (cfg);
3303 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3305 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3306 MonoInst *mrgctx_var = vtable_var;
3309 vtable_reg = alloc_preg (cfg);
3310 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3311 vtable_var->type = STACK_PTR;
3319 vtable_reg = alloc_preg (cfg);
3320 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3325 static MonoJumpInfoRgctxEntry *
3326 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3328 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3329 res->method = method;
3330 res->in_mrgctx = in_mrgctx;
3331 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3332 res->data->type = patch_type;
3333 res->data->data.target = patch_data;
3334 res->info_type = info_type;
3339 static inline MonoInst*
3340 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3342 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3346 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3347 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3349 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3350 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3352 return emit_rgctx_fetch (cfg, rgctx, entry);
3356 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3357 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3359 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3360 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3362 return emit_rgctx_fetch (cfg, rgctx, entry);
3366 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3367 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3369 MonoJumpInfoGSharedVtCall *call_info;
3370 MonoJumpInfoRgctxEntry *entry;
3373 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3374 call_info->sig = sig;
3375 call_info->method = cmethod;
3377 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3378 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3380 return emit_rgctx_fetch (cfg, rgctx, entry);
3385 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3386 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3388 MonoJumpInfoRgctxEntry *entry;
3391 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3392 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3394 return emit_rgctx_fetch (cfg, rgctx, entry);
3398 * emit_get_rgctx_method:
3400 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3401 * normal constants, else emit a load from the rgctx.
3404 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3405 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3407 if (!context_used) {
3410 switch (rgctx_type) {
3411 case MONO_RGCTX_INFO_METHOD:
3412 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3414 case MONO_RGCTX_INFO_METHOD_RGCTX:
3415 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3418 g_assert_not_reached ();
3421 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3422 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3424 return emit_rgctx_fetch (cfg, rgctx, entry);
3429 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3430 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3432 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3433 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3435 return emit_rgctx_fetch (cfg, rgctx, entry);
3439 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3441 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3442 MonoRuntimeGenericContextInfoTemplate *template;
3447 for (i = 0; i < info->num_entries; ++i) {
3448 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3450 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3454 if (info->num_entries == info->count_entries) {
3455 MonoRuntimeGenericContextInfoTemplate *new_entries;
3456 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3458 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3460 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3461 info->entries = new_entries;
3462 info->count_entries = new_count_entries;
3465 idx = info->num_entries;
3466 template = &info->entries [idx];
3467 template->info_type = rgctx_type;
3468 template->data = data;
3470 info->num_entries ++;
3476 * emit_get_gsharedvt_info:
3478 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3481 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3486 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3487 /* Load info->entries [idx] */
3488 dreg = alloc_preg (cfg);
3489 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3495 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3497 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3501 * On return the caller must check @klass for load errors.
3504 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3506 MonoInst *vtable_arg;
3510 context_used = mini_class_check_context_used (cfg, klass);
3513 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3514 klass, MONO_RGCTX_INFO_VTABLE);
3516 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3520 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3523 if (COMPILE_LLVM (cfg))
3524 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3526 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3527 #ifdef MONO_ARCH_VTABLE_REG
3528 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3529 cfg->uses_vtable_reg = TRUE;
3536 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3540 if (cfg->gen_seq_points && cfg->method == method) {
3541 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3543 ins->flags |= MONO_INST_NONEMPTY_STACK;
3544 MONO_ADD_INS (cfg->cbb, ins);
3549 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3551 if (mini_get_debug_options ()->better_cast_details) {
3552 int to_klass_reg = alloc_preg (cfg);
3553 int vtable_reg = alloc_preg (cfg);
3554 int klass_reg = alloc_preg (cfg);
3555 MonoBasicBlock *is_null_bb = NULL;
3559 NEW_BBLOCK (cfg, is_null_bb);
3561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3562 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3565 tls_get = mono_get_jit_tls_intrinsic (cfg);
3567 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3571 MONO_ADD_INS (cfg->cbb, tls_get);
3572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3575 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3576 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3577 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3580 MONO_START_BB (cfg, is_null_bb);
3582 *out_bblock = cfg->cbb;
3588 reset_cast_details (MonoCompile *cfg)
3590 /* Reset the variables holding the cast details */
3591 if (mini_get_debug_options ()->better_cast_details) {
3592 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3594 MONO_ADD_INS (cfg->cbb, tls_get);
3595 /* It is enough to reset the from field */
3596 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3601 * On return the caller must check @array_class for load errors
3604 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3606 int vtable_reg = alloc_preg (cfg);
3609 context_used = mini_class_check_context_used (cfg, array_class);
3611 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3613 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3615 if (cfg->opt & MONO_OPT_SHARED) {
3616 int class_reg = alloc_preg (cfg);
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3618 if (cfg->compile_aot) {
3619 int klass_reg = alloc_preg (cfg);
3620 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3621 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3625 } else if (context_used) {
3626 MonoInst *vtable_ins;
3628 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3629 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3631 if (cfg->compile_aot) {
3635 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3637 vt_reg = alloc_preg (cfg);
3638 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3642 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3644 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3648 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3650 reset_cast_details (cfg);
3654 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3655 * generic code is generated.
3658 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3660 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3663 MonoInst *rgctx, *addr;
3665 /* FIXME: What if the class is shared? We might not
3666 have to get the address of the method from the
3668 addr = emit_get_rgctx_method (cfg, context_used, method,
3669 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3671 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3673 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3675 gboolean pass_vtable, pass_mrgctx;
3676 MonoInst *rgctx_arg = NULL;
3678 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3679 g_assert (!pass_mrgctx);
3682 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3685 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3688 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3693 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3697 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3698 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3699 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3700 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3702 obj_reg = sp [0]->dreg;
3703 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3704 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3706 /* FIXME: generics */
3707 g_assert (klass->rank == 0);
3710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3711 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3713 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3714 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3717 MonoInst *element_class;
3719 /* This assertion is from the unboxcast insn */
3720 g_assert (klass->rank == 0);
3722 element_class = emit_get_rgctx_klass (cfg, context_used,
3723 klass->element_class, MONO_RGCTX_INFO_KLASS);
3725 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3726 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3728 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3729 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3730 reset_cast_details (cfg);
3733 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3734 MONO_ADD_INS (cfg->cbb, add);
3735 add->type = STACK_MP;
3742 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3744 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3745 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3749 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3755 args [1] = klass_inst;
3758 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3760 NEW_BBLOCK (cfg, is_ref_bb);
3761 NEW_BBLOCK (cfg, is_nullable_bb);
3762 NEW_BBLOCK (cfg, end_bb);
3763 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3765 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3768 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3770 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3771 addr_reg = alloc_dreg (cfg, STACK_MP);
3775 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3776 MONO_ADD_INS (cfg->cbb, addr);
3778 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3781 MONO_START_BB (cfg, is_ref_bb);
3783 /* Save the ref to a temporary */
3784 dreg = alloc_ireg (cfg);
3785 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3786 addr->dreg = addr_reg;
3787 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3788 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3791 MONO_START_BB (cfg, is_nullable_bb);
3794 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3795 MonoInst *unbox_call;
3796 MonoMethodSignature *unbox_sig;
3799 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3801 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3802 unbox_sig->ret = &klass->byval_arg;
3803 unbox_sig->param_count = 1;
3804 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3805 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3807 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3808 addr->dreg = addr_reg;
3811 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3814 MONO_START_BB (cfg, end_bb);
3817 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3819 *out_cbb = cfg->cbb;
3825 * Returns NULL and set the cfg exception on error.
3828 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3830 MonoInst *iargs [2];
3836 MonoInst *iargs [2];
3838 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3840 if (cfg->opt & MONO_OPT_SHARED)
3841 rgctx_info = MONO_RGCTX_INFO_KLASS;
3843 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3844 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3846 if (cfg->opt & MONO_OPT_SHARED) {
3847 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3849 alloc_ftn = mono_object_new;
3852 alloc_ftn = mono_object_new_specific;
3855 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3856 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3858 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3861 if (cfg->opt & MONO_OPT_SHARED) {
3862 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3863 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3865 alloc_ftn = mono_object_new;
3866 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3867 /* This happens often in argument checking code, eg. throw new FooException... */
3868 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3869 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3870 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3872 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3873 MonoMethod *managed_alloc = NULL;
3877 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3878 cfg->exception_ptr = klass;
3882 #ifndef MONO_CROSS_COMPILE
3883 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3886 if (managed_alloc) {
3887 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3888 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3890 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3892 guint32 lw = vtable->klass->instance_size;
3893 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3894 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3895 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3898 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3902 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3906 * Returns NULL and set the cfg exception on error.
3909 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3911 MonoInst *alloc, *ins;
3913 *out_cbb = cfg->cbb;
3915 if (mono_class_is_nullable (klass)) {
3916 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3919 /* FIXME: What if the class is shared? We might not
3920 have to get the method address from the RGCTX. */
3921 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3922 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3923 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3925 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3927 gboolean pass_vtable, pass_mrgctx;
3928 MonoInst *rgctx_arg = NULL;
3930 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3931 g_assert (!pass_mrgctx);
3934 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3937 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3940 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3944 if (mini_is_gsharedvt_klass (cfg, klass)) {
3945 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3946 MonoInst *res, *is_ref, *src_var, *addr;
3949 dreg = alloc_ireg (cfg);
3951 NEW_BBLOCK (cfg, is_ref_bb);
3952 NEW_BBLOCK (cfg, is_nullable_bb);
3953 NEW_BBLOCK (cfg, end_bb);
3954 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3956 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3958 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3959 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3962 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3965 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3966 ins->opcode = OP_STOREV_MEMBASE;
3968 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3969 res->type = STACK_OBJ;
3971 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3974 MONO_START_BB (cfg, is_ref_bb);
3975 addr_reg = alloc_ireg (cfg);
3977 /* val is a vtype, so has to load the value manually */
3978 src_var = get_vreg_to_inst (cfg, val->dreg);
3980 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3981 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3982 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3983 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3986 MONO_START_BB (cfg, is_nullable_bb);
3989 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3990 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3992 MonoMethodSignature *box_sig;
3995 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3996 * construct that method at JIT time, so have to do things by hand.
3998 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3999 box_sig->ret = &mono_defaults.object_class->byval_arg;
4000 box_sig->param_count = 1;
4001 box_sig->params [0] = &klass->byval_arg;
4002 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4003 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4004 res->type = STACK_OBJ;
4008 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4010 MONO_START_BB (cfg, end_bb);
4012 *out_cbb = cfg->cbb;
4016 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4020 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4027 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4030 MonoGenericContainer *container;
4031 MonoGenericInst *ginst;
4033 if (klass->generic_class) {
4034 container = klass->generic_class->container_class->generic_container;
4035 ginst = klass->generic_class->context.class_inst;
4036 } else if (klass->generic_container && context_used) {
4037 container = klass->generic_container;
4038 ginst = container->context.class_inst;
4043 for (i = 0; i < container->type_argc; ++i) {
4045 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4047 type = ginst->type_argv [i];
4048 if (mini_type_is_reference (cfg, type))
4054 // FIXME: This doesn't work yet (class libs tests fail?)
4055 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4058 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4060 MonoMethod *mono_castclass;
4063 mono_castclass = mono_marshal_get_castclass_with_cache ();
4065 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4066 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4067 reset_cast_details (cfg);
4073 * Returns NULL and set the cfg exception on error.
4076 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4078 MonoBasicBlock *is_null_bb;
4079 int obj_reg = src->dreg;
4080 int vtable_reg = alloc_preg (cfg);
4081 MonoInst *klass_inst = NULL;
4086 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4087 MonoInst *cache_ins;
4089 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4094 /* klass - it's the second element of the cache entry*/
4095 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4098 args [2] = cache_ins;
4100 return emit_castclass_with_cache (cfg, klass, args, NULL);
4103 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4106 NEW_BBLOCK (cfg, is_null_bb);
4108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4109 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4111 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4113 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4114 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4115 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4117 int klass_reg = alloc_preg (cfg);
4119 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4121 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4122 /* the remoting code is broken, access the class for now */
4123 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4124 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4126 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4127 cfg->exception_ptr = klass;
4130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4133 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4135 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4137 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4138 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4142 MONO_START_BB (cfg, is_null_bb);
4144 reset_cast_details (cfg);
4150 * Returns NULL and set the cfg exception on error.
4153 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4156 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4157 int obj_reg = src->dreg;
4158 int vtable_reg = alloc_preg (cfg);
4159 int res_reg = alloc_ireg_ref (cfg);
4160 MonoInst *klass_inst = NULL;
4165 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4166 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4167 MonoInst *cache_ins;
4169 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4174 /* klass - it's the second element of the cache entry*/
4175 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4178 args [2] = cache_ins;
4180 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4183 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4186 NEW_BBLOCK (cfg, is_null_bb);
4187 NEW_BBLOCK (cfg, false_bb);
4188 NEW_BBLOCK (cfg, end_bb);
4190 /* Do the assignment at the beginning, so the other assignment can be if converted */
4191 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4192 ins->type = STACK_OBJ;
4195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4196 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4200 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4201 g_assert (!context_used);
4202 /* the is_null_bb target simply copies the input register to the output */
4203 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4205 int klass_reg = alloc_preg (cfg);
4208 int rank_reg = alloc_preg (cfg);
4209 int eclass_reg = alloc_preg (cfg);
4211 g_assert (!context_used);
4212 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4213 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4214 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4215 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4216 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4217 if (klass->cast_class == mono_defaults.object_class) {
4218 int parent_reg = alloc_preg (cfg);
4219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4220 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4221 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4223 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4224 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4225 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4226 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4227 } else if (klass->cast_class == mono_defaults.enum_class) {
4228 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4229 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4230 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4231 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4233 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4234 /* Check that the object is a vector too */
4235 int bounds_reg = alloc_preg (cfg);
4236 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4237 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4238 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4241 /* the is_null_bb target simply copies the input register to the output */
4242 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4244 } else if (mono_class_is_nullable (klass)) {
4245 g_assert (!context_used);
4246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4247 /* the is_null_bb target simply copies the input register to the output */
4248 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4250 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4251 g_assert (!context_used);
4252 /* the remoting code is broken, access the class for now */
4253 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4254 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4256 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4257 cfg->exception_ptr = klass;
4260 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4263 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4268 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4269 /* the is_null_bb target simply copies the input register to the output */
4270 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4275 MONO_START_BB (cfg, false_bb);
4277 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4278 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4280 MONO_START_BB (cfg, is_null_bb);
4282 MONO_START_BB (cfg, end_bb);
4288 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4290 /* This opcode takes as input an object reference and a class, and returns:
4291 0) if the object is an instance of the class,
4292 1) if the object is not instance of the class,
4293 2) if the object is a proxy whose type cannot be determined */
4296 #ifndef DISABLE_REMOTING
4297 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4299 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4301 int obj_reg = src->dreg;
4302 int dreg = alloc_ireg (cfg);
4304 #ifndef DISABLE_REMOTING
4305 int klass_reg = alloc_preg (cfg);
4308 NEW_BBLOCK (cfg, true_bb);
4309 NEW_BBLOCK (cfg, false_bb);
4310 NEW_BBLOCK (cfg, end_bb);
4311 #ifndef DISABLE_REMOTING
4312 NEW_BBLOCK (cfg, false2_bb);
4313 NEW_BBLOCK (cfg, no_proxy_bb);
4316 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4317 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4319 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4320 #ifndef DISABLE_REMOTING
4321 NEW_BBLOCK (cfg, interface_fail_bb);
4324 tmp_reg = alloc_preg (cfg);
4325 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4326 #ifndef DISABLE_REMOTING
4327 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4328 MONO_START_BB (cfg, interface_fail_bb);
4329 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4331 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4333 tmp_reg = alloc_preg (cfg);
4334 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4338 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4341 #ifndef DISABLE_REMOTING
4342 tmp_reg = alloc_preg (cfg);
4343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4346 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4347 tmp_reg = alloc_preg (cfg);
4348 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4349 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4351 tmp_reg = alloc_preg (cfg);
4352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4356 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4359 MONO_START_BB (cfg, no_proxy_bb);
4361 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4363 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4367 MONO_START_BB (cfg, false_bb);
4369 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4370 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4372 #ifndef DISABLE_REMOTING
4373 MONO_START_BB (cfg, false2_bb);
4375 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4376 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4379 MONO_START_BB (cfg, true_bb);
4381 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4383 MONO_START_BB (cfg, end_bb);
4386 MONO_INST_NEW (cfg, ins, OP_ICONST);
4388 ins->type = STACK_I4;
4394 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4396 /* This opcode takes as input an object reference and a class, and returns:
4397 0) if the object is an instance of the class,
4398 1) if the object is a proxy whose type cannot be determined
4399 an InvalidCastException exception is thrown otherwhise*/
4402 #ifndef DISABLE_REMOTING
4403 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4405 MonoBasicBlock *ok_result_bb;
4407 int obj_reg = src->dreg;
4408 int dreg = alloc_ireg (cfg);
4409 int tmp_reg = alloc_preg (cfg);
4411 #ifndef DISABLE_REMOTING
4412 int klass_reg = alloc_preg (cfg);
4413 NEW_BBLOCK (cfg, end_bb);
4416 NEW_BBLOCK (cfg, ok_result_bb);
4418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4421 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4423 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4424 #ifndef DISABLE_REMOTING
4425 NEW_BBLOCK (cfg, interface_fail_bb);
4427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4428 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4429 MONO_START_BB (cfg, interface_fail_bb);
4430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4432 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4434 tmp_reg = alloc_preg (cfg);
4435 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4437 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4439 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4440 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4442 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4443 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4444 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4447 #ifndef DISABLE_REMOTING
4448 NEW_BBLOCK (cfg, no_proxy_bb);
4450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4452 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4454 tmp_reg = alloc_preg (cfg);
4455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4456 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4458 tmp_reg = alloc_preg (cfg);
4459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4463 NEW_BBLOCK (cfg, fail_1_bb);
4465 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4467 MONO_START_BB (cfg, fail_1_bb);
4469 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4472 MONO_START_BB (cfg, no_proxy_bb);
4474 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4476 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4480 MONO_START_BB (cfg, ok_result_bb);
4482 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4484 #ifndef DISABLE_REMOTING
4485 MONO_START_BB (cfg, end_bb);
4489 MONO_INST_NEW (cfg, ins, OP_ICONST);
4491 ins->type = STACK_I4;
4497 * Returns NULL and set the cfg exception on error.
4499 static G_GNUC_UNUSED MonoInst*
4500 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4504 gpointer *trampoline;
4505 MonoInst *obj, *method_ins, *tramp_ins;
4509 obj = handle_alloc (cfg, klass, FALSE, 0);
4513 /* Inline the contents of mono_delegate_ctor */
4515 /* Set target field */
4516 /* Optimize away setting of NULL target */
4517 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4518 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4519 if (cfg->gen_write_barriers) {
4520 dreg = alloc_preg (cfg);
4521 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4522 emit_write_barrier (cfg, ptr, target);
4526 /* Set method field */
4527 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4528 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4529 if (cfg->gen_write_barriers) {
4530 dreg = alloc_preg (cfg);
4531 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4532 emit_write_barrier (cfg, ptr, method_ins);
4535 * To avoid looking up the compiled code belonging to the target method
4536 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4537 * store it, and we fill it after the method has been compiled.
4539 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4540 MonoInst *code_slot_ins;
4543 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4545 domain = mono_domain_get ();
4546 mono_domain_lock (domain);
4547 if (!domain_jit_info (domain)->method_code_hash)
4548 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4549 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4551 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4552 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4554 mono_domain_unlock (domain);
4556 if (cfg->compile_aot)
4557 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4559 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4561 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4564 /* Set invoke_impl field */
4565 if (cfg->compile_aot) {
4566 MonoClassMethodPair *del_tramp;
4568 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4569 del_tramp->klass = klass;
4570 del_tramp->method = context_used ? NULL : method;
4571 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4573 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4574 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4576 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4578 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4584 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4586 MonoJitICallInfo *info;
4588 /* Need to register the icall so it gets an icall wrapper */
4589 info = mono_get_array_new_va_icall (rank);
4591 cfg->flags |= MONO_CFG_HAS_VARARGS;
4593 /* mono_array_new_va () needs a vararg calling convention */
4594 cfg->disable_llvm = TRUE;
4596 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4597 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4601 mono_emit_load_got_addr (MonoCompile *cfg)
4603 MonoInst *getaddr, *dummy_use;
4605 if (!cfg->got_var || cfg->got_var_allocated)
4608 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4609 getaddr->cil_code = cfg->header->code;
4610 getaddr->dreg = cfg->got_var->dreg;
4612 /* Add it to the start of the first bblock */
4613 if (cfg->bb_entry->code) {
4614 getaddr->next = cfg->bb_entry->code;
4615 cfg->bb_entry->code = getaddr;
4618 MONO_ADD_INS (cfg->bb_entry, getaddr);
4620 cfg->got_var_allocated = TRUE;
4623 * Add a dummy use to keep the got_var alive, since real uses might
4624 * only be generated by the back ends.
4625 * Add it to end_bblock, so the variable's lifetime covers the whole
4627 * It would be better to make the usage of the got var explicit in all
4628 * cases when the backend needs it (i.e. calls, throw etc.), so this
4629 * wouldn't be needed.
4631 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4632 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4635 static int inline_limit;
4636 static gboolean inline_limit_inited;
4639 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4641 MonoMethodHeaderSummary header;
4643 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4644 MonoMethodSignature *sig = mono_method_signature (method);
4648 if (cfg->generic_sharing_context)
4651 if (cfg->inline_depth > 10)
4654 #ifdef MONO_ARCH_HAVE_LMF_OPS
4655 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4656 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4657 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4662 if (!mono_method_get_header_summary (method, &header))
4665 /*runtime, icall and pinvoke are checked by summary call*/
4666 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4667 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4668 (mono_class_is_marshalbyref (method->klass)) ||
4672 /* also consider num_locals? */
4673 /* Do the size check early to avoid creating vtables */
4674 if (!inline_limit_inited) {
4675 if (g_getenv ("MONO_INLINELIMIT"))
4676 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4678 inline_limit = INLINE_LENGTH_LIMIT;
4679 inline_limit_inited = TRUE;
4681 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4685 * if we can initialize the class of the method right away, we do,
4686 * otherwise we don't allow inlining if the class needs initialization,
4687 * since it would mean inserting a call to mono_runtime_class_init()
4688 * inside the inlined code
4690 if (!(cfg->opt & MONO_OPT_SHARED)) {
4691 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4692 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4693 vtable = mono_class_vtable (cfg->domain, method->klass);
4696 if (!cfg->compile_aot)
4697 mono_runtime_class_init (vtable);
4698 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4699 if (cfg->run_cctors && method->klass->has_cctor) {
4700 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4701 if (!method->klass->runtime_info)
4702 /* No vtable created yet */
4704 vtable = mono_class_vtable (cfg->domain, method->klass);
4707 /* This makes so that inline cannot trigger */
4708 /* .cctors: too many apps depend on them */
4709 /* running with a specific order... */
4710 if (! vtable->initialized)
4712 mono_runtime_class_init (vtable);
4714 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4715 if (!method->klass->runtime_info)
4716 /* No vtable created yet */
4718 vtable = mono_class_vtable (cfg->domain, method->klass);
4721 if (!vtable->initialized)
4726 * If we're compiling for shared code
4727 * the cctor will need to be run at aot method load time, for example,
4728 * or at the end of the compilation of the inlining method.
4730 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4735 * CAS - do not inline methods with declarative security
4736 * Note: this has to be before any possible return TRUE;
4738 if (mono_security_method_has_declsec (method))
4741 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4742 if (mono_arch_is_soft_float ()) {
4744 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4746 for (i = 0; i < sig->param_count; ++i)
4747 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4756 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4758 if (!cfg->compile_aot) {
4760 if (vtable->initialized)
4764 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4765 if (cfg->method == method)
4769 if (!mono_class_needs_cctor_run (klass, method))
4772 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4773 /* The initialization is already done before the method is called */
4780 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4784 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4787 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4790 mono_class_init (klass);
4791 size = mono_class_array_element_size (klass);
4794 mult_reg = alloc_preg (cfg);
4795 array_reg = arr->dreg;
4796 index_reg = index->dreg;
4798 #if SIZEOF_REGISTER == 8
4799 /* The array reg is 64 bits but the index reg is only 32 */
4800 if (COMPILE_LLVM (cfg)) {
4802 index2_reg = index_reg;
4804 index2_reg = alloc_preg (cfg);
4805 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4808 if (index->type == STACK_I8) {
4809 index2_reg = alloc_preg (cfg);
4810 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4812 index2_reg = index_reg;
4817 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4819 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4820 if (size == 1 || size == 2 || size == 4 || size == 8) {
4821 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4823 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4824 ins->klass = mono_class_get_element_class (klass);
4825 ins->type = STACK_MP;
4831 add_reg = alloc_ireg_mp (cfg);
4834 MonoInst *rgctx_ins;
4837 g_assert (cfg->generic_sharing_context);
4838 context_used = mini_class_check_context_used (cfg, klass);
4839 g_assert (context_used);
4840 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4841 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4843 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4845 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4846 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4847 ins->klass = mono_class_get_element_class (klass);
4848 ins->type = STACK_MP;
4849 MONO_ADD_INS (cfg->cbb, ins);
4854 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4856 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4858 int bounds_reg = alloc_preg (cfg);
4859 int add_reg = alloc_ireg_mp (cfg);
4860 int mult_reg = alloc_preg (cfg);
4861 int mult2_reg = alloc_preg (cfg);
4862 int low1_reg = alloc_preg (cfg);
4863 int low2_reg = alloc_preg (cfg);
4864 int high1_reg = alloc_preg (cfg);
4865 int high2_reg = alloc_preg (cfg);
4866 int realidx1_reg = alloc_preg (cfg);
4867 int realidx2_reg = alloc_preg (cfg);
4868 int sum_reg = alloc_preg (cfg);
4869 int index1, index2, tmpreg;
4873 mono_class_init (klass);
4874 size = mono_class_array_element_size (klass);
4876 index1 = index_ins1->dreg;
4877 index2 = index_ins2->dreg;
4879 #if SIZEOF_REGISTER == 8
4880 /* The array reg is 64 bits but the index reg is only 32 */
4881 if (COMPILE_LLVM (cfg)) {
4884 tmpreg = alloc_preg (cfg);
4885 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4887 tmpreg = alloc_preg (cfg);
4888 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4892 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4896 /* range checking */
4897 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4898 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4900 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4901 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4902 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4903 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4904 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4905 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4906 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4908 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4909 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4910 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4911 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4912 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4913 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4914 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4916 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4917 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4919 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4920 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4922 ins->type = STACK_MP;
4924 MONO_ADD_INS (cfg->cbb, ins);
4931 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4935 MonoMethod *addr_method;
4938 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4941 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4943 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4944 /* emit_ldelema_2 depends on OP_LMUL */
4945 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4946 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4950 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4951 addr_method = mono_marshal_get_array_address (rank, element_size);
4952 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4957 static MonoBreakPolicy
4958 always_insert_breakpoint (MonoMethod *method)
4960 return MONO_BREAK_POLICY_ALWAYS;
4963 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4966 * mono_set_break_policy:
4967 * policy_callback: the new callback function
4969 * Allow embedders to decide wherther to actually obey breakpoint instructions
4970 * (both break IL instructions and Debugger.Break () method calls), for example
4971 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4972 * untrusted or semi-trusted code.
4974 * @policy_callback will be called every time a break point instruction needs to
4975 * be inserted with the method argument being the method that calls Debugger.Break()
4976 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4977 * if it wants the breakpoint to not be effective in the given method.
4978 * #MONO_BREAK_POLICY_ALWAYS is the default.
4981 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4983 if (policy_callback)
4984 break_policy_func = policy_callback;
4986 break_policy_func = always_insert_breakpoint;
4990 should_insert_brekpoint (MonoMethod *method) {
4991 switch (break_policy_func (method)) {
4992 case MONO_BREAK_POLICY_ALWAYS:
4994 case MONO_BREAK_POLICY_NEVER:
4996 case MONO_BREAK_POLICY_ON_DBG:
4997 g_warning ("mdb no longer supported");
5000 g_warning ("Incorrect value returned from break policy callback");
5005 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5007 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5009 MonoInst *addr, *store, *load;
5010 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5012 /* the bounds check is already done by the callers */
5013 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5015 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5016 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5017 if (mini_type_is_reference (cfg, fsig->params [2]))
5018 emit_write_barrier (cfg, addr, load);
5020 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5021 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5028 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5030 return mini_type_is_reference (cfg, &klass->byval_arg);
5034 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5036 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5037 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5038 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5039 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5040 MonoInst *iargs [3];
5043 mono_class_setup_vtable (obj_array);
5044 g_assert (helper->slot);
5046 if (sp [0]->type != STACK_OBJ)
5048 if (sp [2]->type != STACK_OBJ)
5055 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5059 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5062 // FIXME-VT: OP_ICONST optimization
5063 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5064 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5065 ins->opcode = OP_STOREV_MEMBASE;
5066 } else if (sp [1]->opcode == OP_ICONST) {
5067 int array_reg = sp [0]->dreg;
5068 int index_reg = sp [1]->dreg;
5069 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
5072 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5073 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5075 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5076 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5077 if (generic_class_is_reference_type (cfg, klass))
5078 emit_write_barrier (cfg, addr, sp [2]);
5085 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5090 eklass = mono_class_from_mono_type (fsig->params [2]);
5092 eklass = mono_class_from_mono_type (fsig->ret);
5095 return emit_array_store (cfg, eklass, args, FALSE);
5097 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5098 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5104 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5108 //Only allow for valuetypes
5109 if (!param_klass->valuetype || !return_klass->valuetype)
5113 if (param_klass->has_references || return_klass->has_references)
5116 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5117 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5118 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5121 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5122 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5125 //And have the same size
5126 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5132 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5134 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5135 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5137 //Valuetypes that are semantically equivalent
5138 if (is_unsafe_mov_compatible (param_klass, return_klass))
5141 //Arrays of valuetypes that are semantically equivalent
5142 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5149 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5151 #ifdef MONO_ARCH_SIMD_INTRINSICS
5152 MonoInst *ins = NULL;
5154 if (cfg->opt & MONO_OPT_SIMD) {
5155 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5161 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5165 emit_memory_barrier (MonoCompile *cfg, int kind)
5167 MonoInst *ins = NULL;
5168 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5169 MONO_ADD_INS (cfg->cbb, ins);
5170 ins->backend.memory_barrier_kind = kind;
5176 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5178 MonoInst *ins = NULL;
5181 /* The LLVM backend supports these intrinsics */
5182 if (cmethod->klass == mono_defaults.math_class) {
5183 if (strcmp (cmethod->name, "Sin") == 0) {
5185 } else if (strcmp (cmethod->name, "Cos") == 0) {
5187 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5189 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5194 MONO_INST_NEW (cfg, ins, opcode);
5195 ins->type = STACK_R8;
5196 ins->dreg = mono_alloc_freg (cfg);
5197 ins->sreg1 = args [0]->dreg;
5198 MONO_ADD_INS (cfg->cbb, ins);
5202 if (cfg->opt & MONO_OPT_CMOV) {
5203 if (strcmp (cmethod->name, "Min") == 0) {
5204 if (fsig->params [0]->type == MONO_TYPE_I4)
5206 if (fsig->params [0]->type == MONO_TYPE_U4)
5207 opcode = OP_IMIN_UN;
5208 else if (fsig->params [0]->type == MONO_TYPE_I8)
5210 else if (fsig->params [0]->type == MONO_TYPE_U8)
5211 opcode = OP_LMIN_UN;
5212 } else if (strcmp (cmethod->name, "Max") == 0) {
5213 if (fsig->params [0]->type == MONO_TYPE_I4)
5215 if (fsig->params [0]->type == MONO_TYPE_U4)
5216 opcode = OP_IMAX_UN;
5217 else if (fsig->params [0]->type == MONO_TYPE_I8)
5219 else if (fsig->params [0]->type == MONO_TYPE_U8)
5220 opcode = OP_LMAX_UN;
5225 MONO_INST_NEW (cfg, ins, opcode);
5226 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5227 ins->dreg = mono_alloc_ireg (cfg);
5228 ins->sreg1 = args [0]->dreg;
5229 ins->sreg2 = args [1]->dreg;
5230 MONO_ADD_INS (cfg->cbb, ins);
5238 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5240 if (cmethod->klass == mono_defaults.array_class) {
5241 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5242 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5243 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5244 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5245 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5246 return emit_array_unsafe_mov (cfg, fsig, args);
5253 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5255 MonoInst *ins = NULL;
5257 static MonoClass *runtime_helpers_class = NULL;
5258 if (! runtime_helpers_class)
5259 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5260 "System.Runtime.CompilerServices", "RuntimeHelpers");
5262 if (cmethod->klass == mono_defaults.string_class) {
5263 if (strcmp (cmethod->name, "get_Chars") == 0) {
5264 int dreg = alloc_ireg (cfg);
5265 int index_reg = alloc_preg (cfg);
5266 int mult_reg = alloc_preg (cfg);
5267 int add_reg = alloc_preg (cfg);
5269 #if SIZEOF_REGISTER == 8
5270 /* The array reg is 64 bits but the index reg is only 32 */
5271 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5273 index_reg = args [1]->dreg;
5275 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5277 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5278 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5279 add_reg = ins->dreg;
5280 /* Avoid a warning */
5282 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5286 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5287 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5288 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5290 type_from_op (ins, NULL, NULL);
5292 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5293 int dreg = alloc_ireg (cfg);
5294 /* Decompose later to allow more optimizations */
5295 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5296 ins->type = STACK_I4;
5297 ins->flags |= MONO_INST_FAULT;
5298 cfg->cbb->has_array_access = TRUE;
5299 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5302 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5303 int mult_reg = alloc_preg (cfg);
5304 int add_reg = alloc_preg (cfg);
5306 /* The corlib functions check for oob already. */
5307 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5308 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5310 return cfg->cbb->last_ins;
5313 } else if (cmethod->klass == mono_defaults.object_class) {
5315 if (strcmp (cmethod->name, "GetType") == 0) {
5316 int dreg = alloc_ireg_ref (cfg);
5317 int vt_reg = alloc_preg (cfg);
5318 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5319 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5320 type_from_op (ins, NULL, NULL);
5323 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5324 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5325 int dreg = alloc_ireg (cfg);
5326 int t1 = alloc_ireg (cfg);
5328 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5329 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5330 ins->type = STACK_I4;
5334 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5335 MONO_INST_NEW (cfg, ins, OP_NOP);
5336 MONO_ADD_INS (cfg->cbb, ins);
5340 } else if (cmethod->klass == mono_defaults.array_class) {
5341 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5342 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5344 #ifndef MONO_BIG_ARRAYS
5346 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5349 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5350 int dreg = alloc_ireg (cfg);
5351 int bounds_reg = alloc_ireg_mp (cfg);
5352 MonoBasicBlock *end_bb, *szarray_bb;
5353 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5355 NEW_BBLOCK (cfg, end_bb);
5356 NEW_BBLOCK (cfg, szarray_bb);
5358 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5359 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5362 /* Non-szarray case */
5364 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5365 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5367 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5368 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5370 MONO_START_BB (cfg, szarray_bb);
5373 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5374 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5376 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5377 MONO_START_BB (cfg, end_bb);
5379 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5380 ins->type = STACK_I4;
5386 if (cmethod->name [0] != 'g')
5389 if (strcmp (cmethod->name, "get_Rank") == 0) {
5390 int dreg = alloc_ireg (cfg);
5391 int vtable_reg = alloc_preg (cfg);
5392 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5393 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5394 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5395 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5396 type_from_op (ins, NULL, NULL);
5399 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5400 int dreg = alloc_ireg (cfg);
5402 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5403 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5404 type_from_op (ins, NULL, NULL);
5409 } else if (cmethod->klass == runtime_helpers_class) {
5411 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5412 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5416 } else if (cmethod->klass == mono_defaults.thread_class) {
5417 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5418 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5419 MONO_ADD_INS (cfg->cbb, ins);
5421 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5422 return emit_memory_barrier (cfg, FullBarrier);
5424 } else if (cmethod->klass == mono_defaults.monitor_class) {
5426 /* FIXME this should be integrated to the check below once we support the trampoline version */
5427 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5428 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5429 MonoMethod *fast_method = NULL;
5431 /* Avoid infinite recursion */
5432 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5435 fast_method = mono_monitor_get_fast_path (cmethod);
5439 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5443 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5444 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5447 if (COMPILE_LLVM (cfg)) {
5449 * Pass the argument normally, the LLVM backend will handle the
5450 * calling convention problems.
5452 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5454 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5455 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5456 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5457 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5460 return (MonoInst*)call;
5461 } else if (strcmp (cmethod->name, "Exit") == 0) {
5464 if (COMPILE_LLVM (cfg)) {
5465 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5467 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5468 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5469 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5470 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5473 return (MonoInst*)call;
5475 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5477 MonoMethod *fast_method = NULL;
5479 /* Avoid infinite recursion */
5480 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5481 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5482 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5485 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5486 strcmp (cmethod->name, "Exit") == 0)
5487 fast_method = mono_monitor_get_fast_path (cmethod);
5491 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5494 } else if (cmethod->klass->image == mono_defaults.corlib &&
5495 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5496 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5499 #if SIZEOF_REGISTER == 8
5500 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5501 /* 64 bit reads are already atomic */
5502 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5503 ins->dreg = mono_alloc_preg (cfg);
5504 ins->inst_basereg = args [0]->dreg;
5505 ins->inst_offset = 0;
5506 MONO_ADD_INS (cfg->cbb, ins);
5510 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5511 if (strcmp (cmethod->name, "Increment") == 0) {
5512 MonoInst *ins_iconst;
5515 if (fsig->params [0]->type == MONO_TYPE_I4) {
5516 opcode = OP_ATOMIC_ADD_NEW_I4;
5517 cfg->has_atomic_add_new_i4 = TRUE;
5519 #if SIZEOF_REGISTER == 8
5520 else if (fsig->params [0]->type == MONO_TYPE_I8)
5521 opcode = OP_ATOMIC_ADD_NEW_I8;
5524 if (!mono_arch_opcode_supported (opcode))
5526 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5527 ins_iconst->inst_c0 = 1;
5528 ins_iconst->dreg = mono_alloc_ireg (cfg);
5529 MONO_ADD_INS (cfg->cbb, ins_iconst);
5531 MONO_INST_NEW (cfg, ins, opcode);
5532 ins->dreg = mono_alloc_ireg (cfg);
5533 ins->inst_basereg = args [0]->dreg;
5534 ins->inst_offset = 0;
5535 ins->sreg2 = ins_iconst->dreg;
5536 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5537 MONO_ADD_INS (cfg->cbb, ins);
5539 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5540 MonoInst *ins_iconst;
5543 if (fsig->params [0]->type == MONO_TYPE_I4) {
5544 opcode = OP_ATOMIC_ADD_NEW_I4;
5545 cfg->has_atomic_add_new_i4 = TRUE;
5547 #if SIZEOF_REGISTER == 8
5548 else if (fsig->params [0]->type == MONO_TYPE_I8)
5549 opcode = OP_ATOMIC_ADD_NEW_I8;
5552 if (!mono_arch_opcode_supported (opcode))
5554 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5555 ins_iconst->inst_c0 = -1;
5556 ins_iconst->dreg = mono_alloc_ireg (cfg);
5557 MONO_ADD_INS (cfg->cbb, ins_iconst);
5559 MONO_INST_NEW (cfg, ins, opcode);
5560 ins->dreg = mono_alloc_ireg (cfg);
5561 ins->inst_basereg = args [0]->dreg;
5562 ins->inst_offset = 0;
5563 ins->sreg2 = ins_iconst->dreg;
5564 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5565 MONO_ADD_INS (cfg->cbb, ins);
5567 } else if (strcmp (cmethod->name, "Add") == 0) {
5570 if (fsig->params [0]->type == MONO_TYPE_I4) {
5571 opcode = OP_ATOMIC_ADD_NEW_I4;
5572 cfg->has_atomic_add_new_i4 = TRUE;
5574 #if SIZEOF_REGISTER == 8
5575 else if (fsig->params [0]->type == MONO_TYPE_I8)
5576 opcode = OP_ATOMIC_ADD_NEW_I8;
5579 if (!mono_arch_opcode_supported (opcode))
5581 MONO_INST_NEW (cfg, ins, opcode);
5582 ins->dreg = mono_alloc_ireg (cfg);
5583 ins->inst_basereg = args [0]->dreg;
5584 ins->inst_offset = 0;
5585 ins->sreg2 = args [1]->dreg;
5586 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5587 MONO_ADD_INS (cfg->cbb, ins);
5590 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5592 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5593 if (strcmp (cmethod->name, "Exchange") == 0) {
5595 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5597 if (fsig->params [0]->type == MONO_TYPE_I4) {
5598 opcode = OP_ATOMIC_EXCHANGE_I4;
5599 cfg->has_atomic_exchange_i4 = TRUE;
5601 #if SIZEOF_REGISTER == 8
5602 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5603 (fsig->params [0]->type == MONO_TYPE_I))
5604 opcode = OP_ATOMIC_EXCHANGE_I8;
5606 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5607 opcode = OP_ATOMIC_EXCHANGE_I4;
5608 cfg->has_atomic_exchange_i4 = TRUE;
5614 if (!mono_arch_opcode_supported (opcode))
5617 MONO_INST_NEW (cfg, ins, opcode);
5618 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5619 ins->inst_basereg = args [0]->dreg;
5620 ins->inst_offset = 0;
5621 ins->sreg2 = args [1]->dreg;
5622 MONO_ADD_INS (cfg->cbb, ins);
5624 switch (fsig->params [0]->type) {
5626 ins->type = STACK_I4;
5630 ins->type = STACK_I8;
5632 case MONO_TYPE_OBJECT:
5633 ins->type = STACK_OBJ;
5636 g_assert_not_reached ();
5639 if (cfg->gen_write_barriers && is_ref)
5640 emit_write_barrier (cfg, args [0], args [1]);
5642 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5644 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5645 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5647 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5648 if (fsig->params [1]->type == MONO_TYPE_I4)
5650 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5651 size = sizeof (gpointer);
5652 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5655 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5657 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5658 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5659 ins->sreg1 = args [0]->dreg;
5660 ins->sreg2 = args [1]->dreg;
5661 ins->sreg3 = args [2]->dreg;
5662 ins->type = STACK_I4;
5663 MONO_ADD_INS (cfg->cbb, ins);
5664 cfg->has_atomic_cas_i4 = TRUE;
5665 } else if (size == 8) {
5666 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5668 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5669 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5670 ins->sreg1 = args [0]->dreg;
5671 ins->sreg2 = args [1]->dreg;
5672 ins->sreg3 = args [2]->dreg;
5673 ins->type = STACK_I8;
5674 MONO_ADD_INS (cfg->cbb, ins);
5676 /* g_assert_not_reached (); */
5678 if (cfg->gen_write_barriers && is_ref)
5679 emit_write_barrier (cfg, args [0], args [1]);
5681 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5683 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5684 ins = emit_memory_barrier (cfg, FullBarrier);
5688 } else if (cmethod->klass->image == mono_defaults.corlib) {
5689 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5690 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5691 if (should_insert_brekpoint (cfg->method)) {
5692 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5694 MONO_INST_NEW (cfg, ins, OP_NOP);
5695 MONO_ADD_INS (cfg->cbb, ins);
5699 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5700 && strcmp (cmethod->klass->name, "Environment") == 0) {
5702 EMIT_NEW_ICONST (cfg, ins, 1);
5704 EMIT_NEW_ICONST (cfg, ins, 0);
5708 } else if (cmethod->klass == mono_defaults.math_class) {
5710 * There is general branches code for Min/Max, but it does not work for
5712 * http://everything2.com/?node_id=1051618
5714 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5715 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5717 MonoJumpInfoToken *ji;
5720 cfg->disable_llvm = TRUE;
5722 if (args [0]->opcode == OP_GOT_ENTRY) {
5723 pi = args [0]->inst_p1;
5724 g_assert (pi->opcode == OP_PATCH_INFO);
5725 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5728 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5729 ji = args [0]->inst_p0;
5732 NULLIFY_INS (args [0]);
5735 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5736 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5737 ins->dreg = mono_alloc_ireg (cfg);
5739 ins->inst_p0 = mono_string_to_utf8 (s);
5740 MONO_ADD_INS (cfg->cbb, ins);
5745 #ifdef MONO_ARCH_SIMD_INTRINSICS
5746 if (cfg->opt & MONO_OPT_SIMD) {
5747 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5753 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5757 if (COMPILE_LLVM (cfg)) {
5758 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5763 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5767 * This entry point could be used later for arbitrary method
5770 inline static MonoInst*
5771 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5772 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5774 if (method->klass == mono_defaults.string_class) {
5775 /* managed string allocation support */
5776 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5777 MonoInst *iargs [2];
5778 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5779 MonoMethod *managed_alloc = NULL;
5781 g_assert (vtable); /*Should not fail since it System.String*/
5782 #ifndef MONO_CROSS_COMPILE
5783 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5787 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5788 iargs [1] = args [0];
5789 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5796 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5798 MonoInst *store, *temp;
5801 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5802 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5805 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5806 * would be different than the MonoInst's used to represent arguments, and
5807 * the ldelema implementation can't deal with that.
5808 * Solution: When ldelema is used on an inline argument, create a var for
5809 * it, emit ldelema on that var, and emit the saving code below in
5810 * inline_method () if needed.
5812 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5813 cfg->args [i] = temp;
5814 /* This uses cfg->args [i] which is set by the preceeding line */
5815 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5816 store->cil_code = sp [0]->cil_code;
5821 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5822 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5824 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5826 check_inline_called_method_name_limit (MonoMethod *called_method)
5829 static const char *limit = NULL;
5831 if (limit == NULL) {
5832 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5834 if (limit_string != NULL)
5835 limit = limit_string;
5840 if (limit [0] != '\0') {
5841 char *called_method_name = mono_method_full_name (called_method, TRUE);
5843 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5844 g_free (called_method_name);
5846 //return (strncmp_result <= 0);
5847 return (strncmp_result == 0);
5854 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5856 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5859 static const char *limit = NULL;
5861 if (limit == NULL) {
5862 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5863 if (limit_string != NULL) {
5864 limit = limit_string;
5870 if (limit [0] != '\0') {
5871 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5873 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5874 g_free (caller_method_name);
5876 //return (strncmp_result <= 0);
5877 return (strncmp_result == 0);
5885 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5887 static double r8_0 = 0.0;
5891 rtype = mini_replace_type (rtype);
5895 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5896 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5897 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5898 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5899 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5900 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5901 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5902 ins->type = STACK_R8;
5903 ins->inst_p0 = (void*)&r8_0;
5905 MONO_ADD_INS (cfg->cbb, ins);
5906 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5907 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5908 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5909 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5910 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5912 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5917 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5921 rtype = mini_replace_type (rtype);
5925 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5926 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5927 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5928 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5929 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5930 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5931 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5932 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5933 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5934 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5935 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5936 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5938 emit_init_rvar (cfg, dreg, rtype);
5942 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5944 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5946 MonoInst *var = cfg->locals [local];
5947 if (COMPILE_SOFT_FLOAT (cfg)) {
5949 int reg = alloc_dreg (cfg, var->type);
5950 emit_init_rvar (cfg, reg, type);
5951 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5954 emit_init_rvar (cfg, var->dreg, type);
5956 emit_dummy_init_rvar (cfg, var->dreg, type);
5961 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5962 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5964 MonoInst *ins, *rvar = NULL;
5965 MonoMethodHeader *cheader;
5966 MonoBasicBlock *ebblock, *sbblock;
5968 MonoMethod *prev_inlined_method;
5969 MonoInst **prev_locals, **prev_args;
5970 MonoType **prev_arg_types;
5971 guint prev_real_offset;
5972 GHashTable *prev_cbb_hash;
5973 MonoBasicBlock **prev_cil_offset_to_bb;
5974 MonoBasicBlock *prev_cbb;
5975 unsigned char* prev_cil_start;
5976 guint32 prev_cil_offset_to_bb_len;
5977 MonoMethod *prev_current_method;
5978 MonoGenericContext *prev_generic_context;
5979 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5981 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5983 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5984 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5987 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5988 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5992 if (cfg->verbose_level > 2)
5993 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5995 if (!cmethod->inline_info) {
5996 cfg->stat_inlineable_methods++;
5997 cmethod->inline_info = 1;
6000 /* allocate local variables */
6001 cheader = mono_method_get_header (cmethod);
6003 if (cheader == NULL || mono_loader_get_last_error ()) {
6004 MonoLoaderError *error = mono_loader_get_last_error ();
6007 mono_metadata_free_mh (cheader);
6008 if (inline_always && error)
6009 mono_cfg_set_exception (cfg, error->exception_type);
6011 mono_loader_clear_error ();
6015 /*Must verify before creating locals as it can cause the JIT to assert.*/
6016 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6017 mono_metadata_free_mh (cheader);
6021 /* allocate space to store the return value */
6022 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6023 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6026 prev_locals = cfg->locals;
6027 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6028 for (i = 0; i < cheader->num_locals; ++i)
6029 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6031 /* allocate start and end blocks */
6032 /* This is needed so if the inline is aborted, we can clean up */
6033 NEW_BBLOCK (cfg, sbblock);
6034 sbblock->real_offset = real_offset;
6036 NEW_BBLOCK (cfg, ebblock);
6037 ebblock->block_num = cfg->num_bblocks++;
6038 ebblock->real_offset = real_offset;
6040 prev_args = cfg->args;
6041 prev_arg_types = cfg->arg_types;
6042 prev_inlined_method = cfg->inlined_method;
6043 cfg->inlined_method = cmethod;
6044 cfg->ret_var_set = FALSE;
6045 cfg->inline_depth ++;
6046 prev_real_offset = cfg->real_offset;
6047 prev_cbb_hash = cfg->cbb_hash;
6048 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6049 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6050 prev_cil_start = cfg->cil_start;
6051 prev_cbb = cfg->cbb;
6052 prev_current_method = cfg->current_method;
6053 prev_generic_context = cfg->generic_context;
6054 prev_ret_var_set = cfg->ret_var_set;
6056 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6059 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
6061 ret_var_set = cfg->ret_var_set;
6063 cfg->inlined_method = prev_inlined_method;
6064 cfg->real_offset = prev_real_offset;
6065 cfg->cbb_hash = prev_cbb_hash;
6066 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6067 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6068 cfg->cil_start = prev_cil_start;
6069 cfg->locals = prev_locals;
6070 cfg->args = prev_args;
6071 cfg->arg_types = prev_arg_types;
6072 cfg->current_method = prev_current_method;
6073 cfg->generic_context = prev_generic_context;
6074 cfg->ret_var_set = prev_ret_var_set;
6075 cfg->inline_depth --;
6077 if ((costs >= 0 && costs < 60) || inline_always) {
6078 if (cfg->verbose_level > 2)
6079 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6081 cfg->stat_inlined_methods++;
6083 /* always add some code to avoid block split failures */
6084 MONO_INST_NEW (cfg, ins, OP_NOP);
6085 MONO_ADD_INS (prev_cbb, ins);
6087 prev_cbb->next_bb = sbblock;
6088 link_bblock (cfg, prev_cbb, sbblock);
6091 * Get rid of the begin and end bblocks if possible to aid local
6094 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6096 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6097 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6099 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6100 MonoBasicBlock *prev = ebblock->in_bb [0];
6101 mono_merge_basic_blocks (cfg, prev, ebblock);
6103 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6104 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6105 cfg->cbb = prev_cbb;
6109 * Its possible that the rvar is set in some prev bblock, but not in others.
6115 for (i = 0; i < ebblock->in_count; ++i) {
6116 bb = ebblock->in_bb [i];
6118 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6121 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6131 * If the inlined method contains only a throw, then the ret var is not
6132 * set, so set it to a dummy value.
6135 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6137 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6140 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6143 if (cfg->verbose_level > 2)
6144 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6145 cfg->exception_type = MONO_EXCEPTION_NONE;
6146 mono_loader_clear_error ();
6148 /* This gets rid of the newly added bblocks */
6149 cfg->cbb = prev_cbb;
6151 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6156 * Some of these comments may well be out-of-date.
6157 * Design decisions: we do a single pass over the IL code (and we do bblock
6158 * splitting/merging in the few cases when it's required: a back jump to an IL
6159 * address that was not already seen as bblock starting point).
6160 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6161 * Complex operations are decomposed in simpler ones right away. We need to let the
6162 * arch-specific code peek and poke inside this process somehow (except when the
6163 * optimizations can take advantage of the full semantic info of coarse opcodes).
6164 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6165 * MonoInst->opcode initially is the IL opcode or some simplification of that
6166 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6167 * opcode with value bigger than OP_LAST.
6168 * At this point the IR can be handed over to an interpreter, a dumb code generator
6169 * or to the optimizing code generator that will translate it to SSA form.
6171 * Profiling directed optimizations.
6172 * We may compile by default with few or no optimizations and instrument the code
6173 * or the user may indicate what methods to optimize the most either in a config file
6174 * or through repeated runs where the compiler applies offline the optimizations to
6175 * each method and then decides if it was worth it.
6178 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6179 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6180 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6181 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6182 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6183 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6184 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6185 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6187 /* offset from br.s -> br like opcodes */
6188 #define BIG_BRANCH_OFFSET 13
6191 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6193 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6195 return b == NULL || b == bb;
6199 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6201 unsigned char *ip = start;
6202 unsigned char *target;
6205 MonoBasicBlock *bblock;
6206 const MonoOpcode *opcode;
6209 cli_addr = ip - start;
6210 i = mono_opcode_value ((const guint8 **)&ip, end);
6213 opcode = &mono_opcodes [i];
6214 switch (opcode->argument) {
6215 case MonoInlineNone:
6218 case MonoInlineString:
6219 case MonoInlineType:
6220 case MonoInlineField:
6221 case MonoInlineMethod:
6224 case MonoShortInlineR:
6231 case MonoShortInlineVar:
6232 case MonoShortInlineI:
6235 case MonoShortInlineBrTarget:
6236 target = start + cli_addr + 2 + (signed char)ip [1];
6237 GET_BBLOCK (cfg, bblock, target);
6240 GET_BBLOCK (cfg, bblock, ip);
6242 case MonoInlineBrTarget:
6243 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6244 GET_BBLOCK (cfg, bblock, target);
6247 GET_BBLOCK (cfg, bblock, ip);
6249 case MonoInlineSwitch: {
6250 guint32 n = read32 (ip + 1);
6253 cli_addr += 5 + 4 * n;
6254 target = start + cli_addr;
6255 GET_BBLOCK (cfg, bblock, target);
6257 for (j = 0; j < n; ++j) {
6258 target = start + cli_addr + (gint32)read32 (ip);
6259 GET_BBLOCK (cfg, bblock, target);
6269 g_assert_not_reached ();
6272 if (i == CEE_THROW) {
6273 unsigned char *bb_start = ip - 1;
6275 /* Find the start of the bblock containing the throw */
6277 while ((bb_start >= start) && !bblock) {
6278 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6282 bblock->out_of_line = 1;
6292 static inline MonoMethod *
6293 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6297 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6298 method = mono_method_get_wrapper_data (m, token);
6300 method = mono_class_inflate_generic_method (method, context);
6302 method = mono_get_method_full (m->klass->image, token, klass, context);
6308 static inline MonoMethod *
6309 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6311 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6313 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6319 static inline MonoClass*
6320 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6324 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6325 klass = mono_method_get_wrapper_data (method, token);
6327 klass = mono_class_inflate_generic_class (klass, context);
6329 klass = mono_class_get_full (method->klass->image, token, context);
6332 mono_class_init (klass);
6336 static inline MonoMethodSignature*
6337 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6339 MonoMethodSignature *fsig;
6341 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6344 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6346 fsig = mono_inflate_generic_signature (fsig, context, &error);
6348 g_assert (mono_error_ok (&error));
6351 fsig = mono_metadata_parse_signature (method->klass->image, token);
6357 * Returns TRUE if the JIT should abort inlining because "callee"
6358 * is influenced by security attributes.
6361 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6365 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6369 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6370 if (result == MONO_JIT_SECURITY_OK)
6373 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6374 /* Generate code to throw a SecurityException before the actual call/link */
6375 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6378 NEW_ICONST (cfg, args [0], 4);
6379 NEW_METHODCONST (cfg, args [1], caller);
6380 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6381 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6382 /* don't hide previous results */
6383 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6384 cfg->exception_data = result;
6392 throw_exception (void)
6394 static MonoMethod *method = NULL;
6397 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6398 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6405 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6407 MonoMethod *thrower = throw_exception ();
6410 EMIT_NEW_PCONST (cfg, args [0], ex);
6411 mono_emit_method_call (cfg, thrower, args, NULL);
6415 * Return the original method is a wrapper is specified. We can only access
6416 * the custom attributes from the original method.
6419 get_original_method (MonoMethod *method)
6421 if (method->wrapper_type == MONO_WRAPPER_NONE)
6424 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6425 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6428 /* in other cases we need to find the original method */
6429 return mono_marshal_method_from_wrapper (method);
6433 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6434 MonoBasicBlock *bblock, unsigned char *ip)
6436 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6437 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6439 emit_throw_exception (cfg, ex);
6443 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6444 MonoBasicBlock *bblock, unsigned char *ip)
6446 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6447 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6449 emit_throw_exception (cfg, ex);
6453 * Check that the IL instructions at ip are the array initialization
6454 * sequence and return the pointer to the data and the size.
6457 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6460 * newarr[System.Int32]
6462 * ldtoken field valuetype ...
6463 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6465 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6466 guint32 token = read32 (ip + 7);
6467 guint32 field_token = read32 (ip + 2);
6468 guint32 field_index = field_token & 0xffffff;
6470 const char *data_ptr;
6472 MonoMethod *cmethod;
6473 MonoClass *dummy_class;
6474 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6480 *out_field_token = field_token;
6482 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6485 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6487 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6488 case MONO_TYPE_BOOLEAN:
6492 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6493 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6494 case MONO_TYPE_CHAR:
6511 if (size > mono_type_size (field->type, &dummy_align))
6514 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6515 if (!method->klass->image->dynamic) {
6516 field_index = read32 (ip + 2) & 0xffffff;
6517 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6518 data_ptr = mono_image_rva_map (method->klass->image, rva);
6519 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6520 /* for aot code we do the lookup on load */
6521 if (aot && data_ptr)
6522 return GUINT_TO_POINTER (rva);
6524 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6526 data_ptr = mono_field_get_data (field);
6534 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6536 char *method_fname = mono_method_full_name (method, TRUE);
6538 MonoMethodHeader *header = mono_method_get_header (method);
6540 if (header->code_size == 0)
6541 method_code = g_strdup ("method body is empty.");
6543 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6544 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6545 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6546 g_free (method_fname);
6547 g_free (method_code);
6548 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6552 set_exception_object (MonoCompile *cfg, MonoException *exception)
6554 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6555 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6556 cfg->exception_ptr = exception;
6560 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6563 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6564 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6565 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6566 /* Optimize reg-reg moves away */
6568 * Can't optimize other opcodes, since sp[0] might point to
6569 * the last ins of a decomposed opcode.
6571 sp [0]->dreg = (cfg)->locals [n]->dreg;
6573 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6578 * ldloca inhibits many optimizations so try to get rid of it in common
6581 static inline unsigned char *
6582 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6592 local = read16 (ip + 2);
6596 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6597 /* From the INITOBJ case */
6598 token = read32 (ip + 2);
6599 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6600 CHECK_TYPELOAD (klass);
6601 type = mini_replace_type (&klass->byval_arg);
6602 emit_init_local (cfg, local, type, TRUE);
6610 is_exception_class (MonoClass *class)
6613 if (class == mono_defaults.exception_class)
6615 class = class->parent;
6621 * is_jit_optimizer_disabled:
6623 * Determine whenever M's assembly has a DebuggableAttribute with the
6624 * IsJITOptimizerDisabled flag set.
6627 is_jit_optimizer_disabled (MonoMethod *m)
6629 MonoAssembly *ass = m->klass->image->assembly;
6630 MonoCustomAttrInfo* attrs;
6631 static MonoClass *klass;
6633 gboolean val = FALSE;
6636 if (ass->jit_optimizer_disabled_inited)
6637 return ass->jit_optimizer_disabled;
6640 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6643 ass->jit_optimizer_disabled = FALSE;
6644 mono_memory_barrier ();
6645 ass->jit_optimizer_disabled_inited = TRUE;
6649 attrs = mono_custom_attrs_from_assembly (ass);
6651 for (i = 0; i < attrs->num_attrs; ++i) {
6652 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6655 MonoMethodSignature *sig;
6657 if (!attr->ctor || attr->ctor->klass != klass)
6659 /* Decode the attribute. See reflection.c */
6660 len = attr->data_size;
6661 p = (const char*)attr->data;
6662 g_assert (read16 (p) == 0x0001);
6665 // FIXME: Support named parameters
6666 sig = mono_method_signature (attr->ctor);
6667 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6669 /* Two boolean arguments */
6673 mono_custom_attrs_free (attrs);
6676 ass->jit_optimizer_disabled = val;
6677 mono_memory_barrier ();
6678 ass->jit_optimizer_disabled_inited = TRUE;
6684 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6686 gboolean supported_tail_call;
6689 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6690 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6692 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6695 for (i = 0; i < fsig->param_count; ++i) {
6696 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6697 /* These can point to the current method's stack */
6698 supported_tail_call = FALSE;
6700 if (fsig->hasthis && cmethod->klass->valuetype)
6701 /* this might point to the current method's stack */
6702 supported_tail_call = FALSE;
6703 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6704 supported_tail_call = FALSE;
6705 if (cfg->method->save_lmf)
6706 supported_tail_call = FALSE;
6707 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6708 supported_tail_call = FALSE;
6709 if (call_opcode != CEE_CALL)
6710 supported_tail_call = FALSE;
6712 /* Debugging support */
6714 if (supported_tail_call) {
6715 if (!mono_debug_count ())
6716 supported_tail_call = FALSE;
6720 return supported_tail_call;
6723 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6724 * it to the thread local value based on the tls_offset field. Every other kind of access to
6725 * the field causes an assert.
6728 is_magic_tls_access (MonoClassField *field)
6730 if (strcmp (field->name, "tlsdata"))
6732 if (strcmp (field->parent->name, "ThreadLocal`1"))
6734 return field->parent->image == mono_defaults.corlib;
6737 /* emits the code needed to access a managed tls var (like ThreadStatic)
6738 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6739 * pointer for the current thread.
6740 * Returns the MonoInst* representing the address of the tls var.
6743 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6746 int static_data_reg, array_reg, dreg;
6747 int offset2_reg, idx_reg;
6748 // inlined access to the tls data
6749 // idx = (offset >> 24) - 1;
6750 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6751 static_data_reg = alloc_ireg (cfg);
6752 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6753 idx_reg = alloc_ireg (cfg);
6754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6755 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6756 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6757 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6758 array_reg = alloc_ireg (cfg);
6759 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6760 offset2_reg = alloc_ireg (cfg);
6761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6762 dreg = alloc_ireg (cfg);
6763 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6768 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6769 * this address is cached per-method in cached_tls_addr.
6772 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6774 MonoInst *load, *addr, *temp, *store, *thread_ins;
6775 MonoClassField *offset_field;
6777 if (*cached_tls_addr) {
6778 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6781 thread_ins = mono_get_thread_intrinsic (cfg);
6782 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6784 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6786 MONO_ADD_INS (cfg->cbb, thread_ins);
6788 MonoMethod *thread_method;
6789 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6790 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6792 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6793 addr->klass = mono_class_from_mono_type (tls_field->type);
6794 addr->type = STACK_MP;
6795 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6796 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6798 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6803 * mono_method_to_ir:
6805 * Translate the .net IL into linear IR.
6808 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6809 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6810 guint inline_offset, gboolean is_virtual_call)
6813 MonoInst *ins, **sp, **stack_start;
6814 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6815 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6816 MonoMethod *cmethod, *method_definition;
6817 MonoInst **arg_array;
6818 MonoMethodHeader *header;
6820 guint32 token, ins_flag;
6822 MonoClass *constrained_call = NULL;
6823 unsigned char *ip, *end, *target, *err_pos;
6824 MonoMethodSignature *sig;
6825 MonoGenericContext *generic_context = NULL;
6826 MonoGenericContainer *generic_container = NULL;
6827 MonoType **param_types;
6828 int i, n, start_new_bblock, dreg;
6829 int num_calls = 0, inline_costs = 0;
6830 int breakpoint_id = 0;
6832 MonoBoolean security, pinvoke;
6833 MonoSecurityManager* secman = NULL;
6834 MonoDeclSecurityActions actions;
6835 GSList *class_inits = NULL;
6836 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6838 gboolean init_locals, seq_points, skip_dead_blocks;
6839 gboolean disable_inline, sym_seq_points = FALSE;
6840 MonoInst *cached_tls_addr = NULL;
6841 MonoDebugMethodInfo *minfo;
6842 MonoBitSet *seq_point_locs = NULL;
6843 MonoBitSet *seq_point_set_locs = NULL;
6845 disable_inline = is_jit_optimizer_disabled (method);
6847 /* serialization and xdomain stuff may need access to private fields and methods */
6848 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6849 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6850 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6851 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6852 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6853 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6855 dont_verify |= mono_security_smcs_hack_enabled ();
6857 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6858 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6859 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6860 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6861 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6863 image = method->klass->image;
6864 header = mono_method_get_header (method);
6866 MonoLoaderError *error;
6868 if ((error = mono_loader_get_last_error ())) {
6869 mono_cfg_set_exception (cfg, error->exception_type);
6871 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6872 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6874 goto exception_exit;
6876 generic_container = mono_method_get_generic_container (method);
6877 sig = mono_method_signature (method);
6878 num_args = sig->hasthis + sig->param_count;
6879 ip = (unsigned char*)header->code;
6880 cfg->cil_start = ip;
6881 end = ip + header->code_size;
6882 cfg->stat_cil_code_size += header->code_size;
6884 seq_points = cfg->gen_seq_points && cfg->method == method;
6885 #ifdef PLATFORM_ANDROID
6886 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6889 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6890 /* We could hit a seq point before attaching to the JIT (#8338) */
6894 if (cfg->gen_seq_points && cfg->method == method) {
6895 minfo = mono_debug_lookup_method (method);
6897 int i, n_il_offsets;
6901 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
6902 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6903 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6904 sym_seq_points = TRUE;
6905 for (i = 0; i < n_il_offsets; ++i) {
6906 if (il_offsets [i] < header->code_size)
6907 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6909 g_free (il_offsets);
6910 g_free (line_numbers);
6915 * Methods without init_locals set could cause asserts in various passes
6916 * (#497220). To work around this, we emit dummy initialization opcodes
6917 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6918 * on some platforms.
6920 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6921 init_locals = header->init_locals;
6925 method_definition = method;
6926 while (method_definition->is_inflated) {
6927 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6928 method_definition = imethod->declaring;
6931 /* SkipVerification is not allowed if core-clr is enabled */
6932 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6934 dont_verify_stloc = TRUE;
6937 if (sig->is_inflated)
6938 generic_context = mono_method_get_context (method);
6939 else if (generic_container)
6940 generic_context = &generic_container->context;
6941 cfg->generic_context = generic_context;
6943 if (!cfg->generic_sharing_context)
6944 g_assert (!sig->has_type_parameters);
6946 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6947 g_assert (method->is_inflated);
6948 g_assert (mono_method_get_context (method)->method_inst);
6950 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6951 g_assert (sig->generic_param_count);
6953 if (cfg->method == method) {
6954 cfg->real_offset = 0;
6956 cfg->real_offset = inline_offset;
6959 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6960 cfg->cil_offset_to_bb_len = header->code_size;
6962 cfg->current_method = method;
6964 if (cfg->verbose_level > 2)
6965 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6967 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6969 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6970 for (n = 0; n < sig->param_count; ++n)
6971 param_types [n + sig->hasthis] = sig->params [n];
6972 cfg->arg_types = param_types;
6974 dont_inline = g_list_prepend (dont_inline, method);
6975 if (cfg->method == method) {
6977 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6978 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6981 NEW_BBLOCK (cfg, start_bblock);
6982 cfg->bb_entry = start_bblock;
6983 start_bblock->cil_code = NULL;
6984 start_bblock->cil_length = 0;
6985 #if defined(__native_client_codegen__)
6986 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6987 ins->dreg = alloc_dreg (cfg, STACK_I4);
6988 MONO_ADD_INS (start_bblock, ins);
6992 NEW_BBLOCK (cfg, end_bblock);
6993 cfg->bb_exit = end_bblock;
6994 end_bblock->cil_code = NULL;
6995 end_bblock->cil_length = 0;
6996 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6997 g_assert (cfg->num_bblocks == 2);
6999 arg_array = cfg->args;
7001 if (header->num_clauses) {
7002 cfg->spvars = g_hash_table_new (NULL, NULL);
7003 cfg->exvars = g_hash_table_new (NULL, NULL);
7005 /* handle exception clauses */
7006 for (i = 0; i < header->num_clauses; ++i) {
7007 MonoBasicBlock *try_bb;
7008 MonoExceptionClause *clause = &header->clauses [i];
7009 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7010 try_bb->real_offset = clause->try_offset;
7011 try_bb->try_start = TRUE;
7012 try_bb->region = ((i + 1) << 8) | clause->flags;
7013 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7014 tblock->real_offset = clause->handler_offset;
7015 tblock->flags |= BB_EXCEPTION_HANDLER;
7018 * Linking the try block with the EH block hinders inlining as we won't be able to
7019 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7021 if (COMPILE_LLVM (cfg))
7022 link_bblock (cfg, try_bb, tblock);
7024 if (*(ip + clause->handler_offset) == CEE_POP)
7025 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7027 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7028 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7029 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7030 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7031 MONO_ADD_INS (tblock, ins);
7033 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7034 /* finally clauses already have a seq point */
7035 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7036 MONO_ADD_INS (tblock, ins);
7039 /* todo: is a fault block unsafe to optimize? */
7040 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7041 tblock->flags |= BB_EXCEPTION_UNSAFE;
7045 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7047 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7049 /* catch and filter blocks get the exception object on the stack */
7050 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7051 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7052 MonoInst *dummy_use;
7054 /* mostly like handle_stack_args (), but just sets the input args */
7055 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7056 tblock->in_scount = 1;
7057 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7058 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7061 * Add a dummy use for the exvar so its liveness info will be
7065 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7067 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7068 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7069 tblock->flags |= BB_EXCEPTION_HANDLER;
7070 tblock->real_offset = clause->data.filter_offset;
7071 tblock->in_scount = 1;
7072 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7073 /* The filter block shares the exvar with the handler block */
7074 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7075 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7076 MONO_ADD_INS (tblock, ins);
7080 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7081 clause->data.catch_class &&
7082 cfg->generic_sharing_context &&
7083 mono_class_check_context_used (clause->data.catch_class)) {
7085 * In shared generic code with catch
7086 * clauses containing type variables
7087 * the exception handling code has to
7088 * be able to get to the rgctx.
7089 * Therefore we have to make sure that
7090 * the vtable/mrgctx argument (for
7091 * static or generic methods) or the
7092 * "this" argument (for non-static
7093 * methods) are live.
7095 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7096 mini_method_get_context (method)->method_inst ||
7097 method->klass->valuetype) {
7098 mono_get_vtable_var (cfg);
7100 MonoInst *dummy_use;
7102 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7107 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7108 cfg->cbb = start_bblock;
7109 cfg->args = arg_array;
7110 mono_save_args (cfg, sig, inline_args);
7113 /* FIRST CODE BLOCK */
7114 NEW_BBLOCK (cfg, bblock);
7115 bblock->cil_code = ip;
7119 ADD_BBLOCK (cfg, bblock);
7121 if (cfg->method == method) {
7122 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7123 if (breakpoint_id) {
7124 MONO_INST_NEW (cfg, ins, OP_BREAK);
7125 MONO_ADD_INS (bblock, ins);
7129 if (mono_security_cas_enabled ())
7130 secman = mono_security_manager_get_methods ();
7132 security = (secman && mono_security_method_has_declsec (method));
7133 /* at this point having security doesn't mean we have any code to generate */
7134 if (security && (cfg->method == method)) {
7135 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7136 * And we do not want to enter the next section (with allocation) if we
7137 * have nothing to generate */
7138 security = mono_declsec_get_demands (method, &actions);
7141 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7142 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7144 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7145 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7146 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7148 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7149 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7153 mono_custom_attrs_free (custom);
7156 custom = mono_custom_attrs_from_class (wrapped->klass);
7157 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7161 mono_custom_attrs_free (custom);
7164 /* not a P/Invoke after all */
7169 /* we use a separate basic block for the initialization code */
7170 NEW_BBLOCK (cfg, init_localsbb);
7171 cfg->bb_init = init_localsbb;
7172 init_localsbb->real_offset = cfg->real_offset;
7173 start_bblock->next_bb = init_localsbb;
7174 init_localsbb->next_bb = bblock;
7175 link_bblock (cfg, start_bblock, init_localsbb);
7176 link_bblock (cfg, init_localsbb, bblock);
7178 cfg->cbb = init_localsbb;
7180 if (cfg->gsharedvt && cfg->method == method) {
7181 MonoGSharedVtMethodInfo *info;
7182 MonoInst *var, *locals_var;
7185 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7186 info->method = cfg->method;
7187 info->count_entries = 16;
7188 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7189 cfg->gsharedvt_info = info;
7191 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7192 /* prevent it from being register allocated */
7193 //var->flags |= MONO_INST_VOLATILE;
7194 cfg->gsharedvt_info_var = var;
7196 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7197 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7199 /* Allocate locals */
7200 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7201 /* prevent it from being register allocated */
7202 //locals_var->flags |= MONO_INST_VOLATILE;
7203 cfg->gsharedvt_locals_var = locals_var;
7205 dreg = alloc_ireg (cfg);
7206 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7208 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7209 ins->dreg = locals_var->dreg;
7211 MONO_ADD_INS (cfg->cbb, ins);
7212 cfg->gsharedvt_locals_var_ins = ins;
7214 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7217 ins->flags |= MONO_INST_INIT;
7221 /* at this point we know, if security is TRUE, that some code needs to be generated */
7222 if (security && (cfg->method == method)) {
7225 cfg->stat_cas_demand_generation++;
7227 if (actions.demand.blob) {
7228 /* Add code for SecurityAction.Demand */
7229 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7230 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7231 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7232 mono_emit_method_call (cfg, secman->demand, args, NULL);
7234 if (actions.noncasdemand.blob) {
7235 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7236 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7237 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7238 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7239 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7240 mono_emit_method_call (cfg, secman->demand, args, NULL);
7242 if (actions.demandchoice.blob) {
7243 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7244 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7245 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7246 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7247 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7251 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7253 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7256 if (mono_security_core_clr_enabled ()) {
7257 /* check if this is native code, e.g. an icall or a p/invoke */
7258 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7259 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7261 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7262 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7264 /* if this ia a native call then it can only be JITted from platform code */
7265 if ((icall || pinvk) && method->klass && method->klass->image) {
7266 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7267 MonoException *ex = icall ? mono_get_exception_security () :
7268 mono_get_exception_method_access ();
7269 emit_throw_exception (cfg, ex);
7276 CHECK_CFG_EXCEPTION;
7278 if (header->code_size == 0)
7281 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7286 if (cfg->method == method)
7287 mono_debug_init_method (cfg, bblock, breakpoint_id);
7289 for (n = 0; n < header->num_locals; ++n) {
7290 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7295 /* We force the vtable variable here for all shared methods
7296 for the possibility that they might show up in a stack
7297 trace where their exact instantiation is needed. */
7298 if (cfg->generic_sharing_context && method == cfg->method) {
7299 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7300 mini_method_get_context (method)->method_inst ||
7301 method->klass->valuetype) {
7302 mono_get_vtable_var (cfg);
7304 /* FIXME: Is there a better way to do this?
7305 We need the variable live for the duration
7306 of the whole method. */
7307 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7311 /* add a check for this != NULL to inlined methods */
7312 if (is_virtual_call) {
7315 NEW_ARGLOAD (cfg, arg_ins, 0);
7316 MONO_ADD_INS (cfg->cbb, arg_ins);
7317 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7320 skip_dead_blocks = !dont_verify;
7321 if (skip_dead_blocks) {
7322 original_bb = bb = mono_basic_block_split (method, &error);
7323 if (!mono_error_ok (&error)) {
7324 mono_error_cleanup (&error);
7330 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7331 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7334 start_new_bblock = 0;
7337 if (cfg->method == method)
7338 cfg->real_offset = ip - header->code;
7340 cfg->real_offset = inline_offset;
7345 if (start_new_bblock) {
7346 bblock->cil_length = ip - bblock->cil_code;
7347 if (start_new_bblock == 2) {
7348 g_assert (ip == tblock->cil_code);
7350 GET_BBLOCK (cfg, tblock, ip);
7352 bblock->next_bb = tblock;
7355 start_new_bblock = 0;
7356 for (i = 0; i < bblock->in_scount; ++i) {
7357 if (cfg->verbose_level > 3)
7358 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7359 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7363 g_slist_free (class_inits);
7366 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7367 link_bblock (cfg, bblock, tblock);
7368 if (sp != stack_start) {
7369 handle_stack_args (cfg, stack_start, sp - stack_start);
7371 CHECK_UNVERIFIABLE (cfg);
7373 bblock->next_bb = tblock;
7376 for (i = 0; i < bblock->in_scount; ++i) {
7377 if (cfg->verbose_level > 3)
7378 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7379 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7382 g_slist_free (class_inits);
7387 if (skip_dead_blocks) {
7388 int ip_offset = ip - header->code;
7390 if (ip_offset == bb->end)
7394 int op_size = mono_opcode_size (ip, end);
7395 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7397 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7399 if (ip_offset + op_size == bb->end) {
7400 MONO_INST_NEW (cfg, ins, OP_NOP);
7401 MONO_ADD_INS (bblock, ins);
7402 start_new_bblock = 1;
7410 * Sequence points are points where the debugger can place a breakpoint.
7411 * Currently, we generate these automatically at points where the IL
7414 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7416 * Make methods interruptable at the beginning, and at the targets of
7417 * backward branches.
7418 * Also, do this at the start of every bblock in methods with clauses too,
7419 * to be able to handle instructions with inprecise control flow like
7421 * Backward branches are handled at the end of method-to-ir ().
7423 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7425 /* Avoid sequence points on empty IL like .volatile */
7426 // FIXME: Enable this
7427 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7428 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7429 if (sp != stack_start)
7430 ins->flags |= MONO_INST_NONEMPTY_STACK;
7431 MONO_ADD_INS (cfg->cbb, ins);
7434 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7437 bblock->real_offset = cfg->real_offset;
7439 if ((cfg->method == method) && cfg->coverage_info) {
7440 guint32 cil_offset = ip - header->code;
7441 cfg->coverage_info->data [cil_offset].cil_code = ip;
7443 /* TODO: Use an increment here */
7444 #if defined(TARGET_X86)
7445 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7446 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7448 MONO_ADD_INS (cfg->cbb, ins);
7450 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7451 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7455 if (cfg->verbose_level > 3)
7456 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7460 if (seq_points && !sym_seq_points && sp != stack_start) {
7462 * The C# compiler uses these nops to notify the JIT that it should
7463 * insert seq points.
7465 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7466 MONO_ADD_INS (cfg->cbb, ins);
7468 if (cfg->keep_cil_nops)
7469 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7471 MONO_INST_NEW (cfg, ins, OP_NOP);
7473 MONO_ADD_INS (bblock, ins);
7476 if (should_insert_brekpoint (cfg->method)) {
7477 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7479 MONO_INST_NEW (cfg, ins, OP_NOP);
7482 MONO_ADD_INS (bblock, ins);
7488 CHECK_STACK_OVF (1);
7489 n = (*ip)-CEE_LDARG_0;
7491 EMIT_NEW_ARGLOAD (cfg, ins, n);
7499 CHECK_STACK_OVF (1);
7500 n = (*ip)-CEE_LDLOC_0;
7502 EMIT_NEW_LOCLOAD (cfg, ins, n);
7511 n = (*ip)-CEE_STLOC_0;
7514 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7516 emit_stloc_ir (cfg, sp, header, n);
7523 CHECK_STACK_OVF (1);
7526 EMIT_NEW_ARGLOAD (cfg, ins, n);
7532 CHECK_STACK_OVF (1);
7535 NEW_ARGLOADA (cfg, ins, n);
7536 MONO_ADD_INS (cfg->cbb, ins);
7546 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7548 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7553 CHECK_STACK_OVF (1);
7556 EMIT_NEW_LOCLOAD (cfg, ins, n);
7560 case CEE_LDLOCA_S: {
7561 unsigned char *tmp_ip;
7563 CHECK_STACK_OVF (1);
7564 CHECK_LOCAL (ip [1]);
7566 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7572 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7581 CHECK_LOCAL (ip [1]);
7582 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7584 emit_stloc_ir (cfg, sp, header, ip [1]);
7589 CHECK_STACK_OVF (1);
7590 EMIT_NEW_PCONST (cfg, ins, NULL);
7591 ins->type = STACK_OBJ;
7596 CHECK_STACK_OVF (1);
7597 EMIT_NEW_ICONST (cfg, ins, -1);
7610 CHECK_STACK_OVF (1);
7611 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7617 CHECK_STACK_OVF (1);
7619 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7625 CHECK_STACK_OVF (1);
7626 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7632 CHECK_STACK_OVF (1);
7633 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7634 ins->type = STACK_I8;
7635 ins->dreg = alloc_dreg (cfg, STACK_I8);
7637 ins->inst_l = (gint64)read64 (ip);
7638 MONO_ADD_INS (bblock, ins);
7644 gboolean use_aotconst = FALSE;
7646 #ifdef TARGET_POWERPC
7647 /* FIXME: Clean this up */
7648 if (cfg->compile_aot)
7649 use_aotconst = TRUE;
7652 /* FIXME: we should really allocate this only late in the compilation process */
7653 f = mono_domain_alloc (cfg->domain, sizeof (float));
7655 CHECK_STACK_OVF (1);
7661 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7663 dreg = alloc_freg (cfg);
7664 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7665 ins->type = STACK_R8;
7667 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7668 ins->type = STACK_R8;
7669 ins->dreg = alloc_dreg (cfg, STACK_R8);
7671 MONO_ADD_INS (bblock, ins);
7681 gboolean use_aotconst = FALSE;
7683 #ifdef TARGET_POWERPC
7684 /* FIXME: Clean this up */
7685 if (cfg->compile_aot)
7686 use_aotconst = TRUE;
7689 /* FIXME: we should really allocate this only late in the compilation process */
7690 d = mono_domain_alloc (cfg->domain, sizeof (double));
7692 CHECK_STACK_OVF (1);
7698 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7700 dreg = alloc_freg (cfg);
7701 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7702 ins->type = STACK_R8;
7704 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7705 ins->type = STACK_R8;
7706 ins->dreg = alloc_dreg (cfg, STACK_R8);
7708 MONO_ADD_INS (bblock, ins);
7717 MonoInst *temp, *store;
7719 CHECK_STACK_OVF (1);
7723 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7724 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7726 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7729 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7742 if (sp [0]->type == STACK_R8)
7743 /* we need to pop the value from the x86 FP stack */
7744 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7750 INLINE_FAILURE ("jmp");
7751 GSHAREDVT_FAILURE (*ip);
7754 if (stack_start != sp)
7756 token = read32 (ip + 1);
7757 /* FIXME: check the signature matches */
7758 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7760 if (!cmethod || mono_loader_get_last_error ())
7763 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7764 GENERIC_SHARING_FAILURE (CEE_JMP);
7766 if (mono_security_cas_enabled ())
7767 CHECK_CFG_EXCEPTION;
7769 emit_instrumentation_call (cfg, mono_profiler_method_leave);
7771 if (ARCH_HAVE_OP_TAIL_CALL) {
7772 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7775 /* Handle tail calls similarly to calls */
7776 n = fsig->param_count + fsig->hasthis;
7780 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7781 call->method = cmethod;
7782 call->tail_call = TRUE;
7783 call->signature = mono_method_signature (cmethod);
7784 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7785 call->inst.inst_p0 = cmethod;
7786 for (i = 0; i < n; ++i)
7787 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7789 mono_arch_emit_call (cfg, call);
7790 MONO_ADD_INS (bblock, (MonoInst*)call);
7792 for (i = 0; i < num_args; ++i)
7793 /* Prevent arguments from being optimized away */
7794 arg_array [i]->flags |= MONO_INST_VOLATILE;
7796 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7797 ins = (MonoInst*)call;
7798 ins->inst_p0 = cmethod;
7799 MONO_ADD_INS (bblock, ins);
7803 start_new_bblock = 1;
7808 case CEE_CALLVIRT: {
7809 MonoInst *addr = NULL;
7810 MonoMethodSignature *fsig = NULL;
7812 int virtual = *ip == CEE_CALLVIRT;
7813 int calli = *ip == CEE_CALLI;
7814 gboolean pass_imt_from_rgctx = FALSE;
7815 MonoInst *imt_arg = NULL;
7816 MonoInst *keep_this_alive = NULL;
7817 gboolean pass_vtable = FALSE;
7818 gboolean pass_mrgctx = FALSE;
7819 MonoInst *vtable_arg = NULL;
7820 gboolean check_this = FALSE;
7821 gboolean supported_tail_call = FALSE;
7822 gboolean tail_call = FALSE;
7823 gboolean need_seq_point = FALSE;
7824 guint32 call_opcode = *ip;
7825 gboolean emit_widen = TRUE;
7826 gboolean push_res = TRUE;
7827 gboolean skip_ret = FALSE;
7828 gboolean delegate_invoke = FALSE;
7831 token = read32 (ip + 1);
7836 //GSHAREDVT_FAILURE (*ip);
7841 fsig = mini_get_signature (method, token, generic_context);
7842 n = fsig->param_count + fsig->hasthis;
7844 if (method->dynamic && fsig->pinvoke) {
7848 * This is a call through a function pointer using a pinvoke
7849 * signature. Have to create a wrapper and call that instead.
7850 * FIXME: This is very slow, need to create a wrapper at JIT time
7851 * instead based on the signature.
7853 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7854 EMIT_NEW_PCONST (cfg, args [1], fsig);
7856 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7859 MonoMethod *cil_method;
7861 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7862 cil_method = cmethod;
7864 if (constrained_call) {
7865 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7866 if (cfg->verbose_level > 2)
7867 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7868 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7869 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7870 cfg->generic_sharing_context)) {
7871 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7874 if (cfg->verbose_level > 2)
7875 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7877 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7879 * This is needed since get_method_constrained can't find
7880 * the method in klass representing a type var.
7881 * The type var is guaranteed to be a reference type in this
7884 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7885 g_assert (!cmethod->klass->valuetype);
7887 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7892 if (!cmethod || mono_loader_get_last_error ())
7894 if (!dont_verify && !cfg->skip_visibility) {
7895 MonoMethod *target_method = cil_method;
7896 if (method->is_inflated) {
7897 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7899 if (!mono_method_can_access_method (method_definition, target_method) &&
7900 !mono_method_can_access_method (method, cil_method))
7901 METHOD_ACCESS_FAILURE;
7904 if (mono_security_core_clr_enabled ())
7905 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7907 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7908 /* MS.NET seems to silently convert this to a callvirt */
7913 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7914 * converts to a callvirt.
7916 * tests/bug-515884.il is an example of this behavior
7918 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7919 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7920 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7924 if (!cmethod->klass->inited)
7925 if (!mono_class_init (cmethod->klass))
7926 TYPE_LOAD_ERROR (cmethod->klass);
7928 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7929 mini_class_is_system_array (cmethod->klass)) {
7930 array_rank = cmethod->klass->rank;
7931 fsig = mono_method_signature (cmethod);
7933 fsig = mono_method_signature (cmethod);
7938 if (fsig->pinvoke) {
7939 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7940 check_for_pending_exc, cfg->compile_aot);
7941 fsig = mono_method_signature (wrapper);
7942 } else if (constrained_call) {
7943 fsig = mono_method_signature (cmethod);
7945 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7949 mono_save_token_info (cfg, image, token, cil_method);
7951 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7953 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7954 * foo (bar (), baz ())
7955 * works correctly. MS does this also:
7956 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7957 * The problem with this approach is that the debugger will stop after all calls returning a value,
7958 * even for simple cases, like:
7961 /* Special case a few common successor opcodes */
7962 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7963 need_seq_point = TRUE;
7966 n = fsig->param_count + fsig->hasthis;
7968 /* Don't support calls made using type arguments for now */
7970 if (cfg->gsharedvt) {
7971 if (mini_is_gsharedvt_signature (cfg, fsig))
7972 GSHAREDVT_FAILURE (*ip);
7976 if (mono_security_cas_enabled ()) {
7977 if (check_linkdemand (cfg, method, cmethod))
7978 INLINE_FAILURE ("linkdemand");
7979 CHECK_CFG_EXCEPTION;
7982 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7983 g_assert_not_reached ();
7986 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7989 if (!cfg->generic_sharing_context && cmethod)
7990 g_assert (!mono_method_check_context_used (cmethod));
7994 //g_assert (!virtual || fsig->hasthis);
7998 if (constrained_call) {
7999 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8001 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8003 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8004 /* The 'Own method' case below */
8005 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8006 /* 'The type parameter is instantiated as a reference type' case below. */
8007 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8008 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8009 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8010 MonoInst *args [16];
8013 * This case handles calls to
8014 * - object:ToString()/Equals()/GetHashCode(),
8015 * - System.IComparable<T>:CompareTo()
8016 * - System.IEquatable<T>:Equals ()
8017 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8021 if (mono_method_check_context_used (cmethod))
8022 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8024 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8025 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8027 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8028 if (fsig->hasthis && fsig->param_count) {
8029 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8030 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8031 ins->dreg = alloc_preg (cfg);
8032 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8033 MONO_ADD_INS (cfg->cbb, ins);
8036 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8039 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8041 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8042 addr_reg = ins->dreg;
8043 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8045 EMIT_NEW_ICONST (cfg, args [3], 0);
8046 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8049 EMIT_NEW_ICONST (cfg, args [3], 0);
8050 EMIT_NEW_ICONST (cfg, args [4], 0);
8052 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8055 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8056 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8057 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
8061 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8062 MONO_ADD_INS (cfg->cbb, add);
8064 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8065 MONO_ADD_INS (cfg->cbb, ins);
8066 /* ins represents the call result */
8071 GSHAREDVT_FAILURE (*ip);
8075 * We have the `constrained.' prefix opcode.
8077 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8079 * The type parameter is instantiated as a valuetype,
8080 * but that type doesn't override the method we're
8081 * calling, so we need to box `this'.
8083 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8084 ins->klass = constrained_call;
8085 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8086 CHECK_CFG_EXCEPTION;
8087 } else if (!constrained_call->valuetype) {
8088 int dreg = alloc_ireg_ref (cfg);
8091 * The type parameter is instantiated as a reference
8092 * type. We have a managed pointer on the stack, so
8093 * we need to dereference it here.
8095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8096 ins->type = STACK_OBJ;
8099 if (cmethod->klass->valuetype) {
8102 /* Interface method */
8105 mono_class_setup_vtable (constrained_call);
8106 CHECK_TYPELOAD (constrained_call);
8107 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8109 TYPE_LOAD_ERROR (constrained_call);
8110 slot = mono_method_get_vtable_slot (cmethod);
8112 TYPE_LOAD_ERROR (cmethod->klass);
8113 cmethod = constrained_call->vtable [ioffset + slot];
8115 if (cmethod->klass == mono_defaults.enum_class) {
8116 /* Enum implements some interfaces, so treat this as the first case */
8117 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8118 ins->klass = constrained_call;
8119 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8120 CHECK_CFG_EXCEPTION;
8125 constrained_call = NULL;
8128 if (!calli && check_call_signature (cfg, fsig, sp))
8131 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8132 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8133 delegate_invoke = TRUE;
8136 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8138 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8139 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8147 * If the callee is a shared method, then its static cctor
8148 * might not get called after the call was patched.
8150 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8151 emit_generic_class_init (cfg, cmethod->klass);
8152 CHECK_TYPELOAD (cmethod->klass);
8156 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8158 if (cfg->generic_sharing_context && cmethod) {
8159 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8161 context_used = mini_method_check_context_used (cfg, cmethod);
8163 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8164 /* Generic method interface
8165 calls are resolved via a
8166 helper function and don't
8168 if (!cmethod_context || !cmethod_context->method_inst)
8169 pass_imt_from_rgctx = TRUE;
8173 * If a shared method calls another
8174 * shared method then the caller must
8175 * have a generic sharing context
8176 * because the magic trampoline
8177 * requires it. FIXME: We shouldn't
8178 * have to force the vtable/mrgctx
8179 * variable here. Instead there
8180 * should be a flag in the cfg to
8181 * request a generic sharing context.
8184 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8185 mono_get_vtable_var (cfg);
8190 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8192 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8194 CHECK_TYPELOAD (cmethod->klass);
8195 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8200 g_assert (!vtable_arg);
8202 if (!cfg->compile_aot) {
8204 * emit_get_rgctx_method () calls mono_class_vtable () so check
8205 * for type load errors before.
8207 mono_class_setup_vtable (cmethod->klass);
8208 CHECK_TYPELOAD (cmethod->klass);
8211 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8213 /* !marshalbyref is needed to properly handle generic methods + remoting */
8214 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8215 MONO_METHOD_IS_FINAL (cmethod)) &&
8216 !mono_class_is_marshalbyref (cmethod->klass)) {
8223 if (pass_imt_from_rgctx) {
8224 g_assert (!pass_vtable);
8227 imt_arg = emit_get_rgctx_method (cfg, context_used,
8228 cmethod, MONO_RGCTX_INFO_METHOD);
8232 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8234 /* Calling virtual generic methods */
8235 if (cmethod && virtual &&
8236 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8237 !(MONO_METHOD_IS_FINAL (cmethod) &&
8238 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8239 fsig->generic_param_count &&
8240 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8241 MonoInst *this_temp, *this_arg_temp, *store;
8242 MonoInst *iargs [4];
8243 gboolean use_imt = FALSE;
8245 g_assert (fsig->is_inflated);
8247 /* Prevent inlining of methods that contain indirect calls */
8248 INLINE_FAILURE ("virtual generic call");
8250 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8251 GSHAREDVT_FAILURE (*ip);
8253 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8254 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8259 g_assert (!imt_arg);
8261 g_assert (cmethod->is_inflated);
8262 imt_arg = emit_get_rgctx_method (cfg, context_used,
8263 cmethod, MONO_RGCTX_INFO_METHOD);
8264 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8266 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8267 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8268 MONO_ADD_INS (bblock, store);
8270 /* FIXME: This should be a managed pointer */
8271 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8273 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8274 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8275 cmethod, MONO_RGCTX_INFO_METHOD);
8276 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8277 addr = mono_emit_jit_icall (cfg,
8278 mono_helper_compile_generic_method, iargs);
8280 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8282 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8289 * Implement a workaround for the inherent races involved in locking:
8295 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8296 * try block, the Exit () won't be executed, see:
8297 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8298 * To work around this, we extend such try blocks to include the last x bytes
8299 * of the Monitor.Enter () call.
8301 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8302 MonoBasicBlock *tbb;
8304 GET_BBLOCK (cfg, tbb, ip + 5);
8306 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8307 * from Monitor.Enter like ArgumentNullException.
8309 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8310 /* Mark this bblock as needing to be extended */
8311 tbb->extend_try_block = TRUE;
8315 /* Conversion to a JIT intrinsic */
8316 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8318 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8319 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8326 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8327 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8328 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8329 !g_list_find (dont_inline, cmethod)) {
8331 gboolean always = FALSE;
8333 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8334 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8335 /* Prevent inlining of methods that call wrappers */
8336 INLINE_FAILURE ("wrapper call");
8337 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8341 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8343 cfg->real_offset += 5;
8346 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8347 /* *sp is already set by inline_method */
8352 inline_costs += costs;
8358 /* Tail recursion elimination */
8359 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8360 gboolean has_vtargs = FALSE;
8363 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8364 INLINE_FAILURE ("tail call");
8366 /* keep it simple */
8367 for (i = fsig->param_count - 1; i >= 0; i--) {
8368 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8373 for (i = 0; i < n; ++i)
8374 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8375 MONO_INST_NEW (cfg, ins, OP_BR);
8376 MONO_ADD_INS (bblock, ins);
8377 tblock = start_bblock->out_bb [0];
8378 link_bblock (cfg, bblock, tblock);
8379 ins->inst_target_bb = tblock;
8380 start_new_bblock = 1;
8382 /* skip the CEE_RET, too */
8383 if (ip_in_bb (cfg, bblock, ip + 5))
8390 inline_costs += 10 * num_calls++;
8393 * Making generic calls out of gsharedvt methods.
8395 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8396 MonoRgctxInfoType info_type;
8399 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8400 //GSHAREDVT_FAILURE (*ip);
8401 // disable for possible remoting calls
8402 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8403 GSHAREDVT_FAILURE (*ip);
8404 if (fsig->generic_param_count) {
8405 /* virtual generic call */
8406 g_assert (mono_use_imt);
8407 g_assert (!imt_arg);
8408 /* Same as the virtual generic case above */
8409 imt_arg = emit_get_rgctx_method (cfg, context_used,
8410 cmethod, MONO_RGCTX_INFO_METHOD);
8411 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8416 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8417 /* test_0_multi_dim_arrays () in gshared.cs */
8418 GSHAREDVT_FAILURE (*ip);
8420 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8421 keep_this_alive = sp [0];
8423 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8424 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8426 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8427 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8429 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8431 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8433 * We pass the address to the gsharedvt trampoline in the rgctx reg
8435 MonoInst *callee = addr;
8437 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8439 GSHAREDVT_FAILURE (*ip);
8441 addr = emit_get_rgctx_sig (cfg, context_used,
8442 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8443 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8447 /* Generic sharing */
8448 /* FIXME: only do this for generic methods if
8449 they are not shared! */
8450 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8451 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8452 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8453 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8454 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8455 INLINE_FAILURE ("gshared");
8457 g_assert (cfg->generic_sharing_context && cmethod);
8461 * We are compiling a call to a
8462 * generic method from shared code,
8463 * which means that we have to look up
8464 * the method in the rgctx and do an
8468 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8470 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8471 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8475 /* Indirect calls */
8477 if (call_opcode == CEE_CALL)
8478 g_assert (context_used);
8479 else if (call_opcode == CEE_CALLI)
8480 g_assert (!vtable_arg);
8482 /* FIXME: what the hell is this??? */
8483 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8484 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8486 /* Prevent inlining of methods with indirect calls */
8487 INLINE_FAILURE ("indirect call");
8489 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8494 * Instead of emitting an indirect call, emit a direct call
8495 * with the contents of the aotconst as the patch info.
8497 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8498 info_type = addr->inst_c1;
8499 info_data = addr->inst_p0;
8501 info_type = addr->inst_right->inst_c1;
8502 info_data = addr->inst_right->inst_left;
8505 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8506 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8511 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8519 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8520 MonoInst *val = sp [fsig->param_count];
8522 if (val->type == STACK_OBJ) {
8523 MonoInst *iargs [2];
8528 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8531 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8532 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8533 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8534 emit_write_barrier (cfg, addr, val);
8535 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8536 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8538 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8539 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8540 if (!cmethod->klass->element_class->valuetype && !readonly)
8541 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8542 CHECK_TYPELOAD (cmethod->klass);
8545 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8548 g_assert_not_reached ();
8555 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8559 /* Tail prefix / tail call optimization */
8561 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8562 /* FIXME: runtime generic context pointer for jumps? */
8563 /* FIXME: handle this for generic sharing eventually */
8564 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8565 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8566 supported_tail_call = TRUE;
8568 if (supported_tail_call) {
8571 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8572 INLINE_FAILURE ("tail call");
8574 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8576 if (ARCH_HAVE_OP_TAIL_CALL) {
8577 /* Handle tail calls similarly to normal calls */
8580 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8582 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8583 call->tail_call = TRUE;
8584 call->method = cmethod;
8585 call->signature = mono_method_signature (cmethod);
8588 * We implement tail calls by storing the actual arguments into the
8589 * argument variables, then emitting a CEE_JMP.
8591 for (i = 0; i < n; ++i) {
8592 /* Prevent argument from being register allocated */
8593 arg_array [i]->flags |= MONO_INST_VOLATILE;
8594 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8596 ins = (MonoInst*)call;
8597 ins->inst_p0 = cmethod;
8598 ins->inst_p1 = arg_array [0];
8599 MONO_ADD_INS (bblock, ins);
8600 link_bblock (cfg, bblock, end_bblock);
8601 start_new_bblock = 1;
8603 // FIXME: Eliminate unreachable epilogs
8606 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8607 * only reachable from this call.
8609 GET_BBLOCK (cfg, tblock, ip + 5);
8610 if (tblock == bblock || tblock->in_count == 0)
8619 * Synchronized wrappers.
8620 * Its hard to determine where to replace a method with its synchronized
8621 * wrapper without causing an infinite recursion. The current solution is
8622 * to add the synchronized wrapper in the trampolines, and to
8623 * change the called method to a dummy wrapper, and resolve that wrapper
8624 * to the real method in mono_jit_compile_method ().
8626 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8627 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8628 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8629 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8633 INLINE_FAILURE ("call");
8634 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8635 imt_arg, vtable_arg);
8638 link_bblock (cfg, bblock, end_bblock);
8639 start_new_bblock = 1;
8641 // FIXME: Eliminate unreachable epilogs
8644 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8645 * only reachable from this call.
8647 GET_BBLOCK (cfg, tblock, ip + 5);
8648 if (tblock == bblock || tblock->in_count == 0)
8655 /* End of call, INS should contain the result of the call, if any */
8657 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8660 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8665 if (keep_this_alive) {
8666 MonoInst *dummy_use;
8668 /* See mono_emit_method_call_full () */
8669 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8672 CHECK_CFG_EXCEPTION;
8676 g_assert (*ip == CEE_RET);
8680 constrained_call = NULL;
8682 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8686 if (cfg->method != method) {
8687 /* return from inlined method */
8689 * If in_count == 0, that means the ret is unreachable due to
8690 * being preceeded by a throw. In that case, inline_method () will
8691 * handle setting the return value
8692 * (test case: test_0_inline_throw ()).
8694 if (return_var && cfg->cbb->in_count) {
8695 MonoType *ret_type = mono_method_signature (method)->ret;
8701 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8704 //g_assert (returnvar != -1);
8705 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8706 cfg->ret_var_set = TRUE;
8709 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8711 if (cfg->lmf_var && cfg->cbb->in_count)
8715 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8717 if (seq_points && !sym_seq_points) {
8719 * Place a seq point here too even through the IL stack is not
8720 * empty, so a step over on
8723 * will work correctly.
8725 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8726 MONO_ADD_INS (cfg->cbb, ins);
8729 g_assert (!return_var);
8733 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8736 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8739 if (!cfg->vret_addr) {
8742 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8744 EMIT_NEW_RETLOADA (cfg, ret_addr);
8746 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8747 ins->klass = mono_class_from_mono_type (ret_type);
8750 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8751 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8752 MonoInst *iargs [1];
8756 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8757 mono_arch_emit_setret (cfg, method, conv);
8759 mono_arch_emit_setret (cfg, method, *sp);
8762 mono_arch_emit_setret (cfg, method, *sp);
8767 if (sp != stack_start)
8769 MONO_INST_NEW (cfg, ins, OP_BR);
8771 ins->inst_target_bb = end_bblock;
8772 MONO_ADD_INS (bblock, ins);
8773 link_bblock (cfg, bblock, end_bblock);
8774 start_new_bblock = 1;
8778 MONO_INST_NEW (cfg, ins, OP_BR);
8780 target = ip + 1 + (signed char)(*ip);
8782 GET_BBLOCK (cfg, tblock, target);
8783 link_bblock (cfg, bblock, tblock);
8784 ins->inst_target_bb = tblock;
8785 if (sp != stack_start) {
8786 handle_stack_args (cfg, stack_start, sp - stack_start);
8788 CHECK_UNVERIFIABLE (cfg);
8790 MONO_ADD_INS (bblock, ins);
8791 start_new_bblock = 1;
8792 inline_costs += BRANCH_COST;
8806 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8808 target = ip + 1 + *(signed char*)ip;
8814 inline_costs += BRANCH_COST;
8818 MONO_INST_NEW (cfg, ins, OP_BR);
8821 target = ip + 4 + (gint32)read32(ip);
8823 GET_BBLOCK (cfg, tblock, target);
8824 link_bblock (cfg, bblock, tblock);
8825 ins->inst_target_bb = tblock;
8826 if (sp != stack_start) {
8827 handle_stack_args (cfg, stack_start, sp - stack_start);
8829 CHECK_UNVERIFIABLE (cfg);
8832 MONO_ADD_INS (bblock, ins);
8834 start_new_bblock = 1;
8835 inline_costs += BRANCH_COST;
8842 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8843 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8844 guint32 opsize = is_short ? 1 : 4;
8846 CHECK_OPSIZE (opsize);
8848 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8851 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8856 GET_BBLOCK (cfg, tblock, target);
8857 link_bblock (cfg, bblock, tblock);
8858 GET_BBLOCK (cfg, tblock, ip);
8859 link_bblock (cfg, bblock, tblock);
8861 if (sp != stack_start) {
8862 handle_stack_args (cfg, stack_start, sp - stack_start);
8863 CHECK_UNVERIFIABLE (cfg);
8866 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8867 cmp->sreg1 = sp [0]->dreg;
8868 type_from_op (cmp, sp [0], NULL);
8871 #if SIZEOF_REGISTER == 4
8872 if (cmp->opcode == OP_LCOMPARE_IMM) {
8873 /* Convert it to OP_LCOMPARE */
8874 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8875 ins->type = STACK_I8;
8876 ins->dreg = alloc_dreg (cfg, STACK_I8);
8878 MONO_ADD_INS (bblock, ins);
8879 cmp->opcode = OP_LCOMPARE;
8880 cmp->sreg2 = ins->dreg;
8883 MONO_ADD_INS (bblock, cmp);
8885 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8886 type_from_op (ins, sp [0], NULL);
8887 MONO_ADD_INS (bblock, ins);
8888 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8889 GET_BBLOCK (cfg, tblock, target);
8890 ins->inst_true_bb = tblock;
8891 GET_BBLOCK (cfg, tblock, ip);
8892 ins->inst_false_bb = tblock;
8893 start_new_bblock = 2;
8896 inline_costs += BRANCH_COST;
8911 MONO_INST_NEW (cfg, ins, *ip);
8913 target = ip + 4 + (gint32)read32(ip);
8919 inline_costs += BRANCH_COST;
8923 MonoBasicBlock **targets;
8924 MonoBasicBlock *default_bblock;
8925 MonoJumpInfoBBTable *table;
8926 int offset_reg = alloc_preg (cfg);
8927 int target_reg = alloc_preg (cfg);
8928 int table_reg = alloc_preg (cfg);
8929 int sum_reg = alloc_preg (cfg);
8930 gboolean use_op_switch;
8934 n = read32 (ip + 1);
8937 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8941 CHECK_OPSIZE (n * sizeof (guint32));
8942 target = ip + n * sizeof (guint32);
8944 GET_BBLOCK (cfg, default_bblock, target);
8945 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8947 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8948 for (i = 0; i < n; ++i) {
8949 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8950 targets [i] = tblock;
8951 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8955 if (sp != stack_start) {
8957 * Link the current bb with the targets as well, so handle_stack_args
8958 * will set their in_stack correctly.
8960 link_bblock (cfg, bblock, default_bblock);
8961 for (i = 0; i < n; ++i)
8962 link_bblock (cfg, bblock, targets [i]);
8964 handle_stack_args (cfg, stack_start, sp - stack_start);
8966 CHECK_UNVERIFIABLE (cfg);
8969 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8970 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8973 for (i = 0; i < n; ++i)
8974 link_bblock (cfg, bblock, targets [i]);
8976 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8977 table->table = targets;
8978 table->table_size = n;
8980 use_op_switch = FALSE;
8982 /* ARM implements SWITCH statements differently */
8983 /* FIXME: Make it use the generic implementation */
8984 if (!cfg->compile_aot)
8985 use_op_switch = TRUE;
8988 if (COMPILE_LLVM (cfg))
8989 use_op_switch = TRUE;
8991 cfg->cbb->has_jump_table = 1;
8993 if (use_op_switch) {
8994 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8995 ins->sreg1 = src1->dreg;
8996 ins->inst_p0 = table;
8997 ins->inst_many_bb = targets;
8998 ins->klass = GUINT_TO_POINTER (n);
8999 MONO_ADD_INS (cfg->cbb, ins);
9001 if (sizeof (gpointer) == 8)
9002 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9004 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9006 #if SIZEOF_REGISTER == 8
9007 /* The upper word might not be zero, and we add it to a 64 bit address later */
9008 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9011 if (cfg->compile_aot) {
9012 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9014 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9015 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9016 ins->inst_p0 = table;
9017 ins->dreg = table_reg;
9018 MONO_ADD_INS (cfg->cbb, ins);
9021 /* FIXME: Use load_memindex */
9022 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9023 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9024 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9026 start_new_bblock = 1;
9027 inline_costs += (BRANCH_COST * 2);
9047 dreg = alloc_freg (cfg);
9050 dreg = alloc_lreg (cfg);
9053 dreg = alloc_ireg_ref (cfg);
9056 dreg = alloc_preg (cfg);
9059 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9060 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9061 ins->flags |= ins_flag;
9062 MONO_ADD_INS (bblock, ins);
9064 if (ins_flag & MONO_INST_VOLATILE) {
9065 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9066 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9067 emit_memory_barrier (cfg, FullBarrier);
9083 if (ins_flag & MONO_INST_VOLATILE) {
9084 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9085 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9086 emit_memory_barrier (cfg, FullBarrier);
9089 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9090 ins->flags |= ins_flag;
9093 MONO_ADD_INS (bblock, ins);
9095 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9096 emit_write_barrier (cfg, sp [0], sp [1]);
9105 MONO_INST_NEW (cfg, ins, (*ip));
9107 ins->sreg1 = sp [0]->dreg;
9108 ins->sreg2 = sp [1]->dreg;
9109 type_from_op (ins, sp [0], sp [1]);
9111 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9113 /* Use the immediate opcodes if possible */
9114 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9115 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9116 if (imm_opcode != -1) {
9117 ins->opcode = imm_opcode;
9118 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9121 sp [1]->opcode = OP_NOP;
9125 MONO_ADD_INS ((cfg)->cbb, (ins));
9127 *sp++ = mono_decompose_opcode (cfg, ins);
9144 MONO_INST_NEW (cfg, ins, (*ip));
9146 ins->sreg1 = sp [0]->dreg;
9147 ins->sreg2 = sp [1]->dreg;
9148 type_from_op (ins, sp [0], sp [1]);
9150 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9151 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9153 /* FIXME: Pass opcode to is_inst_imm */
9155 /* Use the immediate opcodes if possible */
9156 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9159 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9160 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9161 /* Keep emulated opcodes which are optimized away later */
9162 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9163 imm_opcode = mono_op_to_op_imm (ins->opcode);
9166 if (imm_opcode != -1) {
9167 ins->opcode = imm_opcode;
9168 if (sp [1]->opcode == OP_I8CONST) {
9169 #if SIZEOF_REGISTER == 8
9170 ins->inst_imm = sp [1]->inst_l;
9172 ins->inst_ls_word = sp [1]->inst_ls_word;
9173 ins->inst_ms_word = sp [1]->inst_ms_word;
9177 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9180 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9181 if (sp [1]->next == NULL)
9182 sp [1]->opcode = OP_NOP;
9185 MONO_ADD_INS ((cfg)->cbb, (ins));
9187 *sp++ = mono_decompose_opcode (cfg, ins);
9200 case CEE_CONV_OVF_I8:
9201 case CEE_CONV_OVF_U8:
9205 /* Special case this earlier so we have long constants in the IR */
9206 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9207 int data = sp [-1]->inst_c0;
9208 sp [-1]->opcode = OP_I8CONST;
9209 sp [-1]->type = STACK_I8;
9210 #if SIZEOF_REGISTER == 8
9211 if ((*ip) == CEE_CONV_U8)
9212 sp [-1]->inst_c0 = (guint32)data;
9214 sp [-1]->inst_c0 = data;
9216 sp [-1]->inst_ls_word = data;
9217 if ((*ip) == CEE_CONV_U8)
9218 sp [-1]->inst_ms_word = 0;
9220 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9222 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9229 case CEE_CONV_OVF_I4:
9230 case CEE_CONV_OVF_I1:
9231 case CEE_CONV_OVF_I2:
9232 case CEE_CONV_OVF_I:
9233 case CEE_CONV_OVF_U:
9236 if (sp [-1]->type == STACK_R8) {
9237 ADD_UNOP (CEE_CONV_OVF_I8);
9244 case CEE_CONV_OVF_U1:
9245 case CEE_CONV_OVF_U2:
9246 case CEE_CONV_OVF_U4:
9249 if (sp [-1]->type == STACK_R8) {
9250 ADD_UNOP (CEE_CONV_OVF_U8);
9257 case CEE_CONV_OVF_I1_UN:
9258 case CEE_CONV_OVF_I2_UN:
9259 case CEE_CONV_OVF_I4_UN:
9260 case CEE_CONV_OVF_I8_UN:
9261 case CEE_CONV_OVF_U1_UN:
9262 case CEE_CONV_OVF_U2_UN:
9263 case CEE_CONV_OVF_U4_UN:
9264 case CEE_CONV_OVF_U8_UN:
9265 case CEE_CONV_OVF_I_UN:
9266 case CEE_CONV_OVF_U_UN:
9273 CHECK_CFG_EXCEPTION;
9277 case CEE_ADD_OVF_UN:
9279 case CEE_MUL_OVF_UN:
9281 case CEE_SUB_OVF_UN:
9287 GSHAREDVT_FAILURE (*ip);
9290 token = read32 (ip + 1);
9291 klass = mini_get_class (method, token, generic_context);
9292 CHECK_TYPELOAD (klass);
9294 if (generic_class_is_reference_type (cfg, klass)) {
9295 MonoInst *store, *load;
9296 int dreg = alloc_ireg_ref (cfg);
9298 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9299 load->flags |= ins_flag;
9300 MONO_ADD_INS (cfg->cbb, load);
9302 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9303 store->flags |= ins_flag;
9304 MONO_ADD_INS (cfg->cbb, store);
9306 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9307 emit_write_barrier (cfg, sp [0], sp [1]);
9309 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9321 token = read32 (ip + 1);
9322 klass = mini_get_class (method, token, generic_context);
9323 CHECK_TYPELOAD (klass);
9325 /* Optimize the common ldobj+stloc combination */
9335 loc_index = ip [5] - CEE_STLOC_0;
9342 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9343 CHECK_LOCAL (loc_index);
9345 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9346 ins->dreg = cfg->locals [loc_index]->dreg;
9347 ins->flags |= ins_flag;
9350 if (ins_flag & MONO_INST_VOLATILE) {
9351 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9352 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9353 emit_memory_barrier (cfg, FullBarrier);
9359 /* Optimize the ldobj+stobj combination */
9360 /* The reference case ends up being a load+store anyway */
9361 /* Skip this if the operation is volatile. */
9362 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9367 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9374 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9375 ins->flags |= ins_flag;
9378 if (ins_flag & MONO_INST_VOLATILE) {
9379 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9380 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9381 emit_memory_barrier (cfg, FullBarrier);
9390 CHECK_STACK_OVF (1);
9392 n = read32 (ip + 1);
9394 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9395 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9396 ins->type = STACK_OBJ;
9399 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9400 MonoInst *iargs [1];
9402 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9403 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9405 if (cfg->opt & MONO_OPT_SHARED) {
9406 MonoInst *iargs [3];
9408 if (cfg->compile_aot) {
9409 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9411 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9412 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9413 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9414 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9415 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9417 if (bblock->out_of_line) {
9418 MonoInst *iargs [2];
9420 if (image == mono_defaults.corlib) {
9422 * Avoid relocations in AOT and save some space by using a
9423 * version of helper_ldstr specialized to mscorlib.
9425 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9426 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9428 /* Avoid creating the string object */
9429 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9430 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9431 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9435 if (cfg->compile_aot) {
9436 NEW_LDSTRCONST (cfg, ins, image, n);
9438 MONO_ADD_INS (bblock, ins);
9441 NEW_PCONST (cfg, ins, NULL);
9442 ins->type = STACK_OBJ;
9443 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9445 OUT_OF_MEMORY_FAILURE;
9448 MONO_ADD_INS (bblock, ins);
9457 MonoInst *iargs [2];
9458 MonoMethodSignature *fsig;
9461 MonoInst *vtable_arg = NULL;
9464 token = read32 (ip + 1);
9465 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9466 if (!cmethod || mono_loader_get_last_error ())
9468 fsig = mono_method_get_signature (cmethod, image, token);
9472 mono_save_token_info (cfg, image, token, cmethod);
9474 if (!mono_class_init (cmethod->klass))
9475 TYPE_LOAD_ERROR (cmethod->klass);
9477 context_used = mini_method_check_context_used (cfg, cmethod);
9479 if (mono_security_cas_enabled ()) {
9480 if (check_linkdemand (cfg, method, cmethod))
9481 INLINE_FAILURE ("linkdemand");
9482 CHECK_CFG_EXCEPTION;
9483 } else if (mono_security_core_clr_enabled ()) {
9484 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9487 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9488 emit_generic_class_init (cfg, cmethod->klass);
9489 CHECK_TYPELOAD (cmethod->klass);
9493 if (cfg->gsharedvt) {
9494 if (mini_is_gsharedvt_variable_signature (sig))
9495 GSHAREDVT_FAILURE (*ip);
9499 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9500 mono_method_is_generic_sharable (cmethod, TRUE)) {
9501 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9502 mono_class_vtable (cfg->domain, cmethod->klass);
9503 CHECK_TYPELOAD (cmethod->klass);
9505 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9506 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9509 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9510 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9512 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9514 CHECK_TYPELOAD (cmethod->klass);
9515 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9520 n = fsig->param_count;
9524 * Generate smaller code for the common newobj <exception> instruction in
9525 * argument checking code.
9527 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9528 is_exception_class (cmethod->klass) && n <= 2 &&
9529 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9530 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9531 MonoInst *iargs [3];
9533 g_assert (!vtable_arg);
9537 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9540 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9544 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9549 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9552 g_assert_not_reached ();
9560 /* move the args to allow room for 'this' in the first position */
9566 /* check_call_signature () requires sp[0] to be set */
9567 this_ins.type = STACK_OBJ;
9569 if (check_call_signature (cfg, fsig, sp))
9574 if (mini_class_is_system_array (cmethod->klass)) {
9575 g_assert (!vtable_arg);
9577 *sp = emit_get_rgctx_method (cfg, context_used,
9578 cmethod, MONO_RGCTX_INFO_METHOD);
9580 /* Avoid varargs in the common case */
9581 if (fsig->param_count == 1)
9582 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9583 else if (fsig->param_count == 2)
9584 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9585 else if (fsig->param_count == 3)
9586 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9587 else if (fsig->param_count == 4)
9588 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9590 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9591 } else if (cmethod->string_ctor) {
9592 g_assert (!context_used);
9593 g_assert (!vtable_arg);
9594 /* we simply pass a null pointer */
9595 EMIT_NEW_PCONST (cfg, *sp, NULL);
9596 /* now call the string ctor */
9597 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9599 MonoInst* callvirt_this_arg = NULL;
9601 if (cmethod->klass->valuetype) {
9602 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9603 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9604 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9609 * The code generated by mini_emit_virtual_call () expects
9610 * iargs [0] to be a boxed instance, but luckily the vcall
9611 * will be transformed into a normal call there.
9613 } else if (context_used) {
9614 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9617 MonoVTable *vtable = NULL;
9619 if (!cfg->compile_aot)
9620 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9621 CHECK_TYPELOAD (cmethod->klass);
9624 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9625 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9626 * As a workaround, we call class cctors before allocating objects.
9628 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9629 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9630 if (cfg->verbose_level > 2)
9631 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9632 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9635 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9638 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9641 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9643 /* Now call the actual ctor */
9644 /* Avoid virtual calls to ctors if possible */
9645 if (mono_class_is_marshalbyref (cmethod->klass))
9646 callvirt_this_arg = sp [0];
9649 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9650 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9651 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9656 CHECK_CFG_EXCEPTION;
9657 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9658 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9659 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9660 !g_list_find (dont_inline, cmethod)) {
9663 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9664 cfg->real_offset += 5;
9667 inline_costs += costs - 5;
9669 INLINE_FAILURE ("inline failure");
9670 // FIXME-VT: Clean this up
9671 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9672 GSHAREDVT_FAILURE(*ip);
9673 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9675 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9678 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9679 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9680 } else if (context_used &&
9681 ((!mono_method_is_generic_sharable (cmethod, TRUE) ||
9682 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
9683 MonoInst *cmethod_addr;
9685 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
9687 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9688 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9690 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9692 INLINE_FAILURE ("ctor call");
9693 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9694 callvirt_this_arg, NULL, vtable_arg);
9698 if (alloc == NULL) {
9700 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9701 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9715 token = read32 (ip + 1);
9716 klass = mini_get_class (method, token, generic_context);
9717 CHECK_TYPELOAD (klass);
9718 if (sp [0]->type != STACK_OBJ)
9721 context_used = mini_class_check_context_used (cfg, klass);
9723 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9730 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9733 if (cfg->compile_aot)
9734 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9736 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9738 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9740 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9743 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9744 MonoMethod *mono_castclass;
9745 MonoInst *iargs [1];
9748 mono_castclass = mono_marshal_get_castclass (klass);
9751 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9752 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9753 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9754 reset_cast_details (cfg);
9755 CHECK_CFG_EXCEPTION;
9756 g_assert (costs > 0);
9759 cfg->real_offset += 5;
9764 inline_costs += costs;
9767 ins = handle_castclass (cfg, klass, *sp, context_used);
9768 CHECK_CFG_EXCEPTION;
9778 token = read32 (ip + 1);
9779 klass = mini_get_class (method, token, generic_context);
9780 CHECK_TYPELOAD (klass);
9781 if (sp [0]->type != STACK_OBJ)
9784 context_used = mini_class_check_context_used (cfg, klass);
9786 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9787 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9794 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9797 if (cfg->compile_aot)
9798 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9800 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9802 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9805 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9806 MonoMethod *mono_isinst;
9807 MonoInst *iargs [1];
9810 mono_isinst = mono_marshal_get_isinst (klass);
9813 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9814 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9815 CHECK_CFG_EXCEPTION;
9816 g_assert (costs > 0);
9819 cfg->real_offset += 5;
9824 inline_costs += costs;
9827 ins = handle_isinst (cfg, klass, *sp, context_used);
9828 CHECK_CFG_EXCEPTION;
9835 case CEE_UNBOX_ANY: {
9839 token = read32 (ip + 1);
9840 klass = mini_get_class (method, token, generic_context);
9841 CHECK_TYPELOAD (klass);
9843 mono_save_token_info (cfg, image, token, klass);
9845 context_used = mini_class_check_context_used (cfg, klass);
9847 if (mini_is_gsharedvt_klass (cfg, klass)) {
9848 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9856 if (generic_class_is_reference_type (cfg, klass)) {
9857 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9858 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9865 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9868 /*FIXME AOT support*/
9869 if (cfg->compile_aot)
9870 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9872 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9874 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9875 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9878 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9879 MonoMethod *mono_castclass;
9880 MonoInst *iargs [1];
9883 mono_castclass = mono_marshal_get_castclass (klass);
9886 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9887 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9888 CHECK_CFG_EXCEPTION;
9889 g_assert (costs > 0);
9892 cfg->real_offset += 5;
9896 inline_costs += costs;
9898 ins = handle_castclass (cfg, klass, *sp, context_used);
9899 CHECK_CFG_EXCEPTION;
9907 if (mono_class_is_nullable (klass)) {
9908 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9915 ins = handle_unbox (cfg, klass, sp, context_used);
9921 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9934 token = read32 (ip + 1);
9935 klass = mini_get_class (method, token, generic_context);
9936 CHECK_TYPELOAD (klass);
9938 mono_save_token_info (cfg, image, token, klass);
9940 context_used = mini_class_check_context_used (cfg, klass);
9942 if (generic_class_is_reference_type (cfg, klass)) {
9948 if (klass == mono_defaults.void_class)
9950 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9952 /* frequent check in generic code: box (struct), brtrue */
9954 // FIXME: LLVM can't handle the inconsistent bb linking
9955 if (!mono_class_is_nullable (klass) &&
9956 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9957 (ip [5] == CEE_BRTRUE ||
9958 ip [5] == CEE_BRTRUE_S ||
9959 ip [5] == CEE_BRFALSE ||
9960 ip [5] == CEE_BRFALSE_S)) {
9961 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9963 MonoBasicBlock *true_bb, *false_bb;
9967 if (cfg->verbose_level > 3) {
9968 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9969 printf ("<box+brtrue opt>\n");
9977 target = ip + 1 + (signed char)(*ip);
9984 target = ip + 4 + (gint)(read32 (ip));
9988 g_assert_not_reached ();
9992 * We need to link both bblocks, since it is needed for handling stack
9993 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9994 * Branching to only one of them would lead to inconsistencies, so
9995 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9997 GET_BBLOCK (cfg, true_bb, target);
9998 GET_BBLOCK (cfg, false_bb, ip);
10000 mono_link_bblock (cfg, cfg->cbb, true_bb);
10001 mono_link_bblock (cfg, cfg->cbb, false_bb);
10003 if (sp != stack_start) {
10004 handle_stack_args (cfg, stack_start, sp - stack_start);
10006 CHECK_UNVERIFIABLE (cfg);
10009 if (COMPILE_LLVM (cfg)) {
10010 dreg = alloc_ireg (cfg);
10011 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10012 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10014 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10016 /* The JIT can't eliminate the iconst+compare */
10017 MONO_INST_NEW (cfg, ins, OP_BR);
10018 ins->inst_target_bb = is_true ? true_bb : false_bb;
10019 MONO_ADD_INS (cfg->cbb, ins);
10022 start_new_bblock = 1;
10026 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10028 CHECK_CFG_EXCEPTION;
10037 token = read32 (ip + 1);
10038 klass = mini_get_class (method, token, generic_context);
10039 CHECK_TYPELOAD (klass);
10041 mono_save_token_info (cfg, image, token, klass);
10043 context_used = mini_class_check_context_used (cfg, klass);
10045 if (mono_class_is_nullable (klass)) {
10048 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10049 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10053 ins = handle_unbox (cfg, klass, sp, context_used);
10066 MonoClassField *field;
10067 #ifndef DISABLE_REMOTING
10071 gboolean is_instance;
10073 gpointer addr = NULL;
10074 gboolean is_special_static;
10076 MonoInst *store_val = NULL;
10077 MonoInst *thread_ins;
10080 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10082 if (op == CEE_STFLD) {
10085 store_val = sp [1];
10090 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10092 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10095 if (op == CEE_STSFLD) {
10098 store_val = sp [0];
10103 token = read32 (ip + 1);
10104 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10105 field = mono_method_get_wrapper_data (method, token);
10106 klass = field->parent;
10109 field = mono_field_from_token (image, token, &klass, generic_context);
10113 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10114 FIELD_ACCESS_FAILURE;
10115 mono_class_init (klass);
10117 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10120 /* if the class is Critical then transparent code cannot access it's fields */
10121 if (!is_instance && mono_security_core_clr_enabled ())
10122 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10124 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10125 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10126 if (mono_security_core_clr_enabled ())
10127 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10131 * LDFLD etc. is usable on static fields as well, so convert those cases to
10134 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10146 g_assert_not_reached ();
10148 is_instance = FALSE;
10151 context_used = mini_class_check_context_used (cfg, klass);
10153 /* INSTANCE CASE */
10155 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10156 if (op == CEE_STFLD) {
10157 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10159 #ifndef DISABLE_REMOTING
10160 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10161 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10162 MonoInst *iargs [5];
10164 GSHAREDVT_FAILURE (op);
10166 iargs [0] = sp [0];
10167 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10168 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10169 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10171 iargs [4] = sp [1];
10173 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10174 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10175 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10176 CHECK_CFG_EXCEPTION;
10177 g_assert (costs > 0);
10179 cfg->real_offset += 5;
10182 inline_costs += costs;
10184 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10191 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10193 if (mini_is_gsharedvt_klass (cfg, klass)) {
10194 MonoInst *offset_ins;
10196 context_used = mini_class_check_context_used (cfg, klass);
10198 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10199 dreg = alloc_ireg_mp (cfg);
10200 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10201 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10202 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10204 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10206 if (sp [0]->opcode != OP_LDADDR)
10207 store->flags |= MONO_INST_FAULT;
10209 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10210 /* insert call to write barrier */
10214 dreg = alloc_ireg_mp (cfg);
10215 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10216 emit_write_barrier (cfg, ptr, sp [1]);
10219 store->flags |= ins_flag;
10226 #ifndef DISABLE_REMOTING
10227 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10228 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10229 MonoInst *iargs [4];
10231 GSHAREDVT_FAILURE (op);
10233 iargs [0] = sp [0];
10234 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10235 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10236 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10237 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10238 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10239 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10240 CHECK_CFG_EXCEPTION;
10242 g_assert (costs > 0);
10244 cfg->real_offset += 5;
10248 inline_costs += costs;
10250 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10256 if (sp [0]->type == STACK_VTYPE) {
10259 /* Have to compute the address of the variable */
10261 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10263 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10265 g_assert (var->klass == klass);
10267 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10271 if (op == CEE_LDFLDA) {
10272 if (is_magic_tls_access (field)) {
10273 GSHAREDVT_FAILURE (*ip);
10275 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10277 if (sp [0]->type == STACK_OBJ) {
10278 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10279 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10282 dreg = alloc_ireg_mp (cfg);
10284 if (mini_is_gsharedvt_klass (cfg, klass)) {
10285 MonoInst *offset_ins;
10287 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10288 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10290 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10292 ins->klass = mono_class_from_mono_type (field->type);
10293 ins->type = STACK_MP;
10299 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10301 if (mini_is_gsharedvt_klass (cfg, klass)) {
10302 MonoInst *offset_ins;
10304 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10305 dreg = alloc_ireg_mp (cfg);
10306 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10307 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10309 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10311 load->flags |= ins_flag;
10312 if (sp [0]->opcode != OP_LDADDR)
10313 load->flags |= MONO_INST_FAULT;
10327 * We can only support shared generic static
10328 * field access on architectures where the
10329 * trampoline code has been extended to handle
10330 * the generic class init.
10332 #ifndef MONO_ARCH_VTABLE_REG
10333 GENERIC_SHARING_FAILURE (op);
10336 context_used = mini_class_check_context_used (cfg, klass);
10338 ftype = mono_field_get_type (field);
10340 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10343 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10344 * to be called here.
10346 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10347 mono_class_vtable (cfg->domain, klass);
10348 CHECK_TYPELOAD (klass);
10350 mono_domain_lock (cfg->domain);
10351 if (cfg->domain->special_static_fields)
10352 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10353 mono_domain_unlock (cfg->domain);
10355 is_special_static = mono_class_field_is_special_static (field);
10357 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10358 thread_ins = mono_get_thread_intrinsic (cfg);
10362 /* Generate IR to compute the field address */
10363 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10365 * Fast access to TLS data
10366 * Inline version of get_thread_static_data () in
10370 int idx, static_data_reg, array_reg, dreg;
10372 GSHAREDVT_FAILURE (op);
10374 // offset &= 0x7fffffff;
10375 // idx = (offset >> 24) - 1;
10376 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10377 MONO_ADD_INS (cfg->cbb, thread_ins);
10378 static_data_reg = alloc_ireg (cfg);
10379 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10381 if (cfg->compile_aot) {
10382 int offset_reg, offset2_reg, idx_reg;
10384 /* For TLS variables, this will return the TLS offset */
10385 EMIT_NEW_SFLDACONST (cfg, ins, field);
10386 offset_reg = ins->dreg;
10387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10388 idx_reg = alloc_ireg (cfg);
10389 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10390 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10391 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10392 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10393 array_reg = alloc_ireg (cfg);
10394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10395 offset2_reg = alloc_ireg (cfg);
10396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10397 dreg = alloc_ireg (cfg);
10398 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10400 offset = (gsize)addr & 0x7fffffff;
10401 idx = (offset >> 24) - 1;
10403 array_reg = alloc_ireg (cfg);
10404 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10405 dreg = alloc_ireg (cfg);
10406 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10408 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10409 (cfg->compile_aot && is_special_static) ||
10410 (context_used && is_special_static)) {
10411 MonoInst *iargs [2];
10413 g_assert (field->parent);
10414 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10415 if (context_used) {
10416 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10417 field, MONO_RGCTX_INFO_CLASS_FIELD);
10419 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10421 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10422 } else if (context_used) {
10423 MonoInst *static_data;
10426 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10427 method->klass->name_space, method->klass->name, method->name,
10428 depth, field->offset);
10431 if (mono_class_needs_cctor_run (klass, method))
10432 emit_generic_class_init (cfg, klass);
10435 * The pointer we're computing here is
10437 * super_info.static_data + field->offset
10439 static_data = emit_get_rgctx_klass (cfg, context_used,
10440 klass, MONO_RGCTX_INFO_STATIC_DATA);
10442 if (mini_is_gsharedvt_klass (cfg, klass)) {
10443 MonoInst *offset_ins;
10445 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10446 dreg = alloc_ireg_mp (cfg);
10447 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10448 } else if (field->offset == 0) {
10451 int addr_reg = mono_alloc_preg (cfg);
10452 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10454 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10455 MonoInst *iargs [2];
10457 g_assert (field->parent);
10458 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10459 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10460 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10462 MonoVTable *vtable = NULL;
10464 if (!cfg->compile_aot)
10465 vtable = mono_class_vtable (cfg->domain, klass);
10466 CHECK_TYPELOAD (klass);
10469 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10470 if (!(g_slist_find (class_inits, klass))) {
10471 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10472 if (cfg->verbose_level > 2)
10473 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10474 class_inits = g_slist_prepend (class_inits, klass);
10477 if (cfg->run_cctors) {
10479 /* This makes so that inline cannot trigger */
10480 /* .cctors: too many apps depend on them */
10481 /* running with a specific order... */
10483 if (! vtable->initialized)
10484 INLINE_FAILURE ("class init");
10485 ex = mono_runtime_class_init_full (vtable, FALSE);
10487 set_exception_object (cfg, ex);
10488 goto exception_exit;
10492 if (cfg->compile_aot)
10493 EMIT_NEW_SFLDACONST (cfg, ins, field);
10496 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10498 EMIT_NEW_PCONST (cfg, ins, addr);
10501 MonoInst *iargs [1];
10502 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10503 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10507 /* Generate IR to do the actual load/store operation */
10509 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10510 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10511 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10512 emit_memory_barrier (cfg, FullBarrier);
10515 if (op == CEE_LDSFLDA) {
10516 ins->klass = mono_class_from_mono_type (ftype);
10517 ins->type = STACK_PTR;
10519 } else if (op == CEE_STSFLD) {
10522 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10523 store->flags |= ins_flag;
10525 gboolean is_const = FALSE;
10526 MonoVTable *vtable = NULL;
10527 gpointer addr = NULL;
10529 if (!context_used) {
10530 vtable = mono_class_vtable (cfg->domain, klass);
10531 CHECK_TYPELOAD (klass);
10533 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10534 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10535 int ro_type = ftype->type;
10537 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10538 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10539 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10542 GSHAREDVT_FAILURE (op);
10544 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10547 case MONO_TYPE_BOOLEAN:
10549 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10553 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10556 case MONO_TYPE_CHAR:
10558 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10562 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10567 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10571 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10576 case MONO_TYPE_PTR:
10577 case MONO_TYPE_FNPTR:
10578 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10579 type_to_eval_stack_type ((cfg), field->type, *sp);
10582 case MONO_TYPE_STRING:
10583 case MONO_TYPE_OBJECT:
10584 case MONO_TYPE_CLASS:
10585 case MONO_TYPE_SZARRAY:
10586 case MONO_TYPE_ARRAY:
10587 if (!mono_gc_is_moving ()) {
10588 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10589 type_to_eval_stack_type ((cfg), field->type, *sp);
10597 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10602 case MONO_TYPE_VALUETYPE:
10612 CHECK_STACK_OVF (1);
10614 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10615 load->flags |= ins_flag;
10621 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10622 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10623 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10624 emit_memory_barrier (cfg, FullBarrier);
10635 token = read32 (ip + 1);
10636 klass = mini_get_class (method, token, generic_context);
10637 CHECK_TYPELOAD (klass);
10638 if (ins_flag & MONO_INST_VOLATILE) {
10639 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10640 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10641 emit_memory_barrier (cfg, FullBarrier);
10643 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10644 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10645 ins->flags |= ins_flag;
10646 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10647 generic_class_is_reference_type (cfg, klass)) {
10648 /* insert call to write barrier */
10649 emit_write_barrier (cfg, sp [0], sp [1]);
10661 const char *data_ptr;
10663 guint32 field_token;
10669 token = read32 (ip + 1);
10671 klass = mini_get_class (method, token, generic_context);
10672 CHECK_TYPELOAD (klass);
10674 context_used = mini_class_check_context_used (cfg, klass);
10676 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10677 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10678 ins->sreg1 = sp [0]->dreg;
10679 ins->type = STACK_I4;
10680 ins->dreg = alloc_ireg (cfg);
10681 MONO_ADD_INS (cfg->cbb, ins);
10682 *sp = mono_decompose_opcode (cfg, ins);
10685 if (context_used) {
10686 MonoInst *args [3];
10687 MonoClass *array_class = mono_array_class_get (klass, 1);
10688 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10690 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10693 args [0] = emit_get_rgctx_klass (cfg, context_used,
10694 array_class, MONO_RGCTX_INFO_VTABLE);
10699 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10701 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10703 if (cfg->opt & MONO_OPT_SHARED) {
10704 /* Decompose now to avoid problems with references to the domainvar */
10705 MonoInst *iargs [3];
10707 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10708 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10709 iargs [2] = sp [0];
10711 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10713 /* Decompose later since it is needed by abcrem */
10714 MonoClass *array_type = mono_array_class_get (klass, 1);
10715 mono_class_vtable (cfg->domain, array_type);
10716 CHECK_TYPELOAD (array_type);
10718 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10719 ins->dreg = alloc_ireg_ref (cfg);
10720 ins->sreg1 = sp [0]->dreg;
10721 ins->inst_newa_class = klass;
10722 ins->type = STACK_OBJ;
10723 ins->klass = array_type;
10724 MONO_ADD_INS (cfg->cbb, ins);
10725 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10726 cfg->cbb->has_array_access = TRUE;
10728 /* Needed so mono_emit_load_get_addr () gets called */
10729 mono_get_got_var (cfg);
10739 * we inline/optimize the initialization sequence if possible.
10740 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10741 * for small sizes open code the memcpy
10742 * ensure the rva field is big enough
10744 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10745 MonoMethod *memcpy_method = get_memcpy_method ();
10746 MonoInst *iargs [3];
10747 int add_reg = alloc_ireg_mp (cfg);
10749 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10750 if (cfg->compile_aot) {
10751 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10753 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10755 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10756 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10765 if (sp [0]->type != STACK_OBJ)
10768 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10769 ins->dreg = alloc_preg (cfg);
10770 ins->sreg1 = sp [0]->dreg;
10771 ins->type = STACK_I4;
10772 /* This flag will be inherited by the decomposition */
10773 ins->flags |= MONO_INST_FAULT;
10774 MONO_ADD_INS (cfg->cbb, ins);
10775 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10776 cfg->cbb->has_array_access = TRUE;
10784 if (sp [0]->type != STACK_OBJ)
10787 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10789 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10790 CHECK_TYPELOAD (klass);
10791 /* we need to make sure that this array is exactly the type it needs
10792 * to be for correctness. the wrappers are lax with their usage
10793 * so we need to ignore them here
10795 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10796 MonoClass *array_class = mono_array_class_get (klass, 1);
10797 mini_emit_check_array_type (cfg, sp [0], array_class);
10798 CHECK_TYPELOAD (array_class);
10802 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10807 case CEE_LDELEM_I1:
10808 case CEE_LDELEM_U1:
10809 case CEE_LDELEM_I2:
10810 case CEE_LDELEM_U2:
10811 case CEE_LDELEM_I4:
10812 case CEE_LDELEM_U4:
10813 case CEE_LDELEM_I8:
10815 case CEE_LDELEM_R4:
10816 case CEE_LDELEM_R8:
10817 case CEE_LDELEM_REF: {
10823 if (*ip == CEE_LDELEM) {
10825 token = read32 (ip + 1);
10826 klass = mini_get_class (method, token, generic_context);
10827 CHECK_TYPELOAD (klass);
10828 mono_class_init (klass);
10831 klass = array_access_to_klass (*ip);
10833 if (sp [0]->type != STACK_OBJ)
10836 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10838 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10839 // FIXME-VT: OP_ICONST optimization
10840 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10841 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10842 ins->opcode = OP_LOADV_MEMBASE;
10843 } else if (sp [1]->opcode == OP_ICONST) {
10844 int array_reg = sp [0]->dreg;
10845 int index_reg = sp [1]->dreg;
10846 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10848 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10849 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10851 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10852 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10855 if (*ip == CEE_LDELEM)
10862 case CEE_STELEM_I1:
10863 case CEE_STELEM_I2:
10864 case CEE_STELEM_I4:
10865 case CEE_STELEM_I8:
10866 case CEE_STELEM_R4:
10867 case CEE_STELEM_R8:
10868 case CEE_STELEM_REF:
10873 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10875 if (*ip == CEE_STELEM) {
10877 token = read32 (ip + 1);
10878 klass = mini_get_class (method, token, generic_context);
10879 CHECK_TYPELOAD (klass);
10880 mono_class_init (klass);
10883 klass = array_access_to_klass (*ip);
10885 if (sp [0]->type != STACK_OBJ)
10888 emit_array_store (cfg, klass, sp, TRUE);
10890 if (*ip == CEE_STELEM)
10897 case CEE_CKFINITE: {
10901 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10902 ins->sreg1 = sp [0]->dreg;
10903 ins->dreg = alloc_freg (cfg);
10904 ins->type = STACK_R8;
10905 MONO_ADD_INS (bblock, ins);
10907 *sp++ = mono_decompose_opcode (cfg, ins);
10912 case CEE_REFANYVAL: {
10913 MonoInst *src_var, *src;
10915 int klass_reg = alloc_preg (cfg);
10916 int dreg = alloc_preg (cfg);
10918 GSHAREDVT_FAILURE (*ip);
10921 MONO_INST_NEW (cfg, ins, *ip);
10924 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10925 CHECK_TYPELOAD (klass);
10926 mono_class_init (klass);
10928 context_used = mini_class_check_context_used (cfg, klass);
10931 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10933 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10934 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10935 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10937 if (context_used) {
10938 MonoInst *klass_ins;
10940 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10941 klass, MONO_RGCTX_INFO_KLASS);
10944 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10945 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10947 mini_emit_class_check (cfg, klass_reg, klass);
10949 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10950 ins->type = STACK_MP;
10955 case CEE_MKREFANY: {
10956 MonoInst *loc, *addr;
10958 GSHAREDVT_FAILURE (*ip);
10961 MONO_INST_NEW (cfg, ins, *ip);
10964 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10965 CHECK_TYPELOAD (klass);
10966 mono_class_init (klass);
10968 context_used = mini_class_check_context_used (cfg, klass);
10970 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10971 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10973 if (context_used) {
10974 MonoInst *const_ins;
10975 int type_reg = alloc_preg (cfg);
10977 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10978 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10979 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10980 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10981 } else if (cfg->compile_aot) {
10982 int const_reg = alloc_preg (cfg);
10983 int type_reg = alloc_preg (cfg);
10985 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10986 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10987 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10988 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10990 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10991 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10993 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10995 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10996 ins->type = STACK_VTYPE;
10997 ins->klass = mono_defaults.typed_reference_class;
11002 case CEE_LDTOKEN: {
11004 MonoClass *handle_class;
11006 CHECK_STACK_OVF (1);
11009 n = read32 (ip + 1);
11011 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11012 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11013 handle = mono_method_get_wrapper_data (method, n);
11014 handle_class = mono_method_get_wrapper_data (method, n + 1);
11015 if (handle_class == mono_defaults.typehandle_class)
11016 handle = &((MonoClass*)handle)->byval_arg;
11019 handle = mono_ldtoken (image, n, &handle_class, generic_context);
11023 mono_class_init (handle_class);
11024 if (cfg->generic_sharing_context) {
11025 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11026 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11027 /* This case handles ldtoken
11028 of an open type, like for
11031 } else if (handle_class == mono_defaults.typehandle_class) {
11032 /* If we get a MONO_TYPE_CLASS
11033 then we need to provide the
11035 instantiation of it. */
11036 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
11039 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11040 } else if (handle_class == mono_defaults.fieldhandle_class)
11041 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11042 else if (handle_class == mono_defaults.methodhandle_class)
11043 context_used = mini_method_check_context_used (cfg, handle);
11045 g_assert_not_reached ();
11048 if ((cfg->opt & MONO_OPT_SHARED) &&
11049 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11050 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11051 MonoInst *addr, *vtvar, *iargs [3];
11052 int method_context_used;
11054 method_context_used = mini_method_check_context_used (cfg, method);
11056 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11058 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11059 EMIT_NEW_ICONST (cfg, iargs [1], n);
11060 if (method_context_used) {
11061 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11062 method, MONO_RGCTX_INFO_METHOD);
11063 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11065 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11066 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11068 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11072 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11074 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11075 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11076 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11077 (cmethod->klass == mono_defaults.systemtype_class) &&
11078 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11079 MonoClass *tclass = mono_class_from_mono_type (handle);
11081 mono_class_init (tclass);
11082 if (context_used) {
11083 ins = emit_get_rgctx_klass (cfg, context_used,
11084 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11085 } else if (cfg->compile_aot) {
11086 if (method->wrapper_type) {
11087 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
11088 /* Special case for static synchronized wrappers */
11089 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11091 /* FIXME: n is not a normal token */
11093 EMIT_NEW_PCONST (cfg, ins, NULL);
11096 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11099 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11101 ins->type = STACK_OBJ;
11102 ins->klass = cmethod->klass;
11105 MonoInst *addr, *vtvar;
11107 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11109 if (context_used) {
11110 if (handle_class == mono_defaults.typehandle_class) {
11111 ins = emit_get_rgctx_klass (cfg, context_used,
11112 mono_class_from_mono_type (handle),
11113 MONO_RGCTX_INFO_TYPE);
11114 } else if (handle_class == mono_defaults.methodhandle_class) {
11115 ins = emit_get_rgctx_method (cfg, context_used,
11116 handle, MONO_RGCTX_INFO_METHOD);
11117 } else if (handle_class == mono_defaults.fieldhandle_class) {
11118 ins = emit_get_rgctx_field (cfg, context_used,
11119 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11121 g_assert_not_reached ();
11123 } else if (cfg->compile_aot) {
11124 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11126 EMIT_NEW_PCONST (cfg, ins, handle);
11128 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11129 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11130 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11140 MONO_INST_NEW (cfg, ins, OP_THROW);
11142 ins->sreg1 = sp [0]->dreg;
11144 bblock->out_of_line = TRUE;
11145 MONO_ADD_INS (bblock, ins);
11146 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11147 MONO_ADD_INS (bblock, ins);
11150 link_bblock (cfg, bblock, end_bblock);
11151 start_new_bblock = 1;
11153 case CEE_ENDFINALLY:
11154 /* mono_save_seq_point_info () depends on this */
11155 if (sp != stack_start)
11156 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11157 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11158 MONO_ADD_INS (bblock, ins);
11160 start_new_bblock = 1;
11163 * Control will leave the method so empty the stack, otherwise
11164 * the next basic block will start with a nonempty stack.
11166 while (sp != stack_start) {
11171 case CEE_LEAVE_S: {
11174 if (*ip == CEE_LEAVE) {
11176 target = ip + 5 + (gint32)read32(ip + 1);
11179 target = ip + 2 + (signed char)(ip [1]);
11182 /* empty the stack */
11183 while (sp != stack_start) {
11188 * If this leave statement is in a catch block, check for a
11189 * pending exception, and rethrow it if necessary.
11190 * We avoid doing this in runtime invoke wrappers, since those are called
11191 * by native code which excepts the wrapper to catch all exceptions.
11193 for (i = 0; i < header->num_clauses; ++i) {
11194 MonoExceptionClause *clause = &header->clauses [i];
11197 * Use <= in the final comparison to handle clauses with multiple
11198 * leave statements, like in bug #78024.
11199 * The ordering of the exception clauses guarantees that we find the
11200 * innermost clause.
11202 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11204 MonoBasicBlock *dont_throw;
11209 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11212 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11214 NEW_BBLOCK (cfg, dont_throw);
11217 * Currently, we always rethrow the abort exception, despite the
11218 * fact that this is not correct. See thread6.cs for an example.
11219 * But propagating the abort exception is more important than
11220 * getting the sematics right.
11222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11224 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11226 MONO_START_BB (cfg, dont_throw);
11231 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11233 MonoExceptionClause *clause;
11235 for (tmp = handlers; tmp; tmp = tmp->next) {
11236 clause = tmp->data;
11237 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11239 link_bblock (cfg, bblock, tblock);
11240 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11241 ins->inst_target_bb = tblock;
11242 ins->inst_eh_block = clause;
11243 MONO_ADD_INS (bblock, ins);
11244 bblock->has_call_handler = 1;
11245 if (COMPILE_LLVM (cfg)) {
11246 MonoBasicBlock *target_bb;
11249 * Link the finally bblock with the target, since it will
11250 * conceptually branch there.
11251 * FIXME: Have to link the bblock containing the endfinally.
11253 GET_BBLOCK (cfg, target_bb, target);
11254 link_bblock (cfg, tblock, target_bb);
11257 g_list_free (handlers);
11260 MONO_INST_NEW (cfg, ins, OP_BR);
11261 MONO_ADD_INS (bblock, ins);
11262 GET_BBLOCK (cfg, tblock, target);
11263 link_bblock (cfg, bblock, tblock);
11264 ins->inst_target_bb = tblock;
11265 start_new_bblock = 1;
11267 if (*ip == CEE_LEAVE)
11276 * Mono specific opcodes
11278 case MONO_CUSTOM_PREFIX: {
11280 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11284 case CEE_MONO_ICALL: {
11286 MonoJitICallInfo *info;
11288 token = read32 (ip + 2);
11289 func = mono_method_get_wrapper_data (method, token);
11290 info = mono_find_jit_icall_by_addr (func);
11292 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11295 CHECK_STACK (info->sig->param_count);
11296 sp -= info->sig->param_count;
11298 ins = mono_emit_jit_icall (cfg, info->func, sp);
11299 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11303 inline_costs += 10 * num_calls++;
11307 case CEE_MONO_LDPTR: {
11310 CHECK_STACK_OVF (1);
11312 token = read32 (ip + 2);
11314 ptr = mono_method_get_wrapper_data (method, token);
11315 /* FIXME: Generalize this */
11316 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11317 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11322 EMIT_NEW_PCONST (cfg, ins, ptr);
11325 inline_costs += 10 * num_calls++;
11326 /* Can't embed random pointers into AOT code */
11330 case CEE_MONO_JIT_ICALL_ADDR: {
11331 MonoJitICallInfo *callinfo;
11334 CHECK_STACK_OVF (1);
11336 token = read32 (ip + 2);
11338 ptr = mono_method_get_wrapper_data (method, token);
11339 callinfo = mono_find_jit_icall_by_addr (ptr);
11340 g_assert (callinfo);
11341 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11344 inline_costs += 10 * num_calls++;
11347 case CEE_MONO_ICALL_ADDR: {
11348 MonoMethod *cmethod;
11351 CHECK_STACK_OVF (1);
11353 token = read32 (ip + 2);
11355 cmethod = mono_method_get_wrapper_data (method, token);
11357 if (cfg->compile_aot) {
11358 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11360 ptr = mono_lookup_internal_call (cmethod);
11362 EMIT_NEW_PCONST (cfg, ins, ptr);
11368 case CEE_MONO_VTADDR: {
11369 MonoInst *src_var, *src;
11375 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11376 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11381 case CEE_MONO_NEWOBJ: {
11382 MonoInst *iargs [2];
11384 CHECK_STACK_OVF (1);
11386 token = read32 (ip + 2);
11387 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11388 mono_class_init (klass);
11389 NEW_DOMAINCONST (cfg, iargs [0]);
11390 MONO_ADD_INS (cfg->cbb, iargs [0]);
11391 NEW_CLASSCONST (cfg, iargs [1], klass);
11392 MONO_ADD_INS (cfg->cbb, iargs [1]);
11393 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11395 inline_costs += 10 * num_calls++;
11398 case CEE_MONO_OBJADDR:
11401 MONO_INST_NEW (cfg, ins, OP_MOVE);
11402 ins->dreg = alloc_ireg_mp (cfg);
11403 ins->sreg1 = sp [0]->dreg;
11404 ins->type = STACK_MP;
11405 MONO_ADD_INS (cfg->cbb, ins);
11409 case CEE_MONO_LDNATIVEOBJ:
11411 * Similar to LDOBJ, but instead load the unmanaged
11412 * representation of the vtype to the stack.
11417 token = read32 (ip + 2);
11418 klass = mono_method_get_wrapper_data (method, token);
11419 g_assert (klass->valuetype);
11420 mono_class_init (klass);
11423 MonoInst *src, *dest, *temp;
11426 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11427 temp->backend.is_pinvoke = 1;
11428 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11429 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11431 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11432 dest->type = STACK_VTYPE;
11433 dest->klass = klass;
11439 case CEE_MONO_RETOBJ: {
11441 * Same as RET, but return the native representation of a vtype
11444 g_assert (cfg->ret);
11445 g_assert (mono_method_signature (method)->pinvoke);
11450 token = read32 (ip + 2);
11451 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11453 if (!cfg->vret_addr) {
11454 g_assert (cfg->ret_var_is_local);
11456 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11458 EMIT_NEW_RETLOADA (cfg, ins);
11460 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11462 if (sp != stack_start)
11465 MONO_INST_NEW (cfg, ins, OP_BR);
11466 ins->inst_target_bb = end_bblock;
11467 MONO_ADD_INS (bblock, ins);
11468 link_bblock (cfg, bblock, end_bblock);
11469 start_new_bblock = 1;
11473 case CEE_MONO_CISINST:
11474 case CEE_MONO_CCASTCLASS: {
11479 token = read32 (ip + 2);
11480 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11481 if (ip [1] == CEE_MONO_CISINST)
11482 ins = handle_cisinst (cfg, klass, sp [0]);
11484 ins = handle_ccastclass (cfg, klass, sp [0]);
11490 case CEE_MONO_SAVE_LMF:
11491 case CEE_MONO_RESTORE_LMF:
11492 #ifdef MONO_ARCH_HAVE_LMF_OPS
11493 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11494 MONO_ADD_INS (bblock, ins);
11495 cfg->need_lmf_area = TRUE;
11499 case CEE_MONO_CLASSCONST:
11500 CHECK_STACK_OVF (1);
11502 token = read32 (ip + 2);
11503 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11506 inline_costs += 10 * num_calls++;
11508 case CEE_MONO_NOT_TAKEN:
11509 bblock->out_of_line = TRUE;
11512 case CEE_MONO_TLS: {
11515 CHECK_STACK_OVF (1);
11517 key = (gint32)read32 (ip + 2);
11518 g_assert (key < TLS_KEY_NUM);
11520 ins = mono_create_tls_get (cfg, key);
11522 if (cfg->compile_aot) {
11524 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11525 ins->dreg = alloc_preg (cfg);
11526 ins->type = STACK_PTR;
11528 g_assert_not_reached ();
11531 ins->type = STACK_PTR;
11532 MONO_ADD_INS (bblock, ins);
11537 case CEE_MONO_DYN_CALL: {
11538 MonoCallInst *call;
11540 /* It would be easier to call a trampoline, but that would put an
11541 * extra frame on the stack, confusing exception handling. So
11542 * implement it inline using an opcode for now.
11545 if (!cfg->dyn_call_var) {
11546 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11547 /* prevent it from being register allocated */
11548 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11551 /* Has to use a call inst since it local regalloc expects it */
11552 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11553 ins = (MonoInst*)call;
11555 ins->sreg1 = sp [0]->dreg;
11556 ins->sreg2 = sp [1]->dreg;
11557 MONO_ADD_INS (bblock, ins);
11559 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11562 inline_costs += 10 * num_calls++;
11566 case CEE_MONO_MEMORY_BARRIER: {
11568 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11572 case CEE_MONO_JIT_ATTACH: {
11573 MonoInst *args [16];
11574 MonoInst *ad_ins, *lmf_ins;
11575 MonoBasicBlock *next_bb = NULL;
11577 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11579 EMIT_NEW_PCONST (cfg, ins, NULL);
11580 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11586 ad_ins = mono_get_domain_intrinsic (cfg);
11587 lmf_ins = mono_get_lmf_intrinsic (cfg);
11590 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11591 NEW_BBLOCK (cfg, next_bb);
11593 MONO_ADD_INS (cfg->cbb, ad_ins);
11594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11597 MONO_ADD_INS (cfg->cbb, lmf_ins);
11598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11602 if (cfg->compile_aot) {
11603 /* AOT code is only used in the root domain */
11604 EMIT_NEW_PCONST (cfg, args [0], NULL);
11606 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11608 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11609 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11612 MONO_START_BB (cfg, next_bb);
11618 case CEE_MONO_JIT_DETACH: {
11619 MonoInst *args [16];
11621 /* Restore the original domain */
11622 dreg = alloc_ireg (cfg);
11623 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11624 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11629 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11635 case CEE_PREFIX1: {
11638 case CEE_ARGLIST: {
11639 /* somewhat similar to LDTOKEN */
11640 MonoInst *addr, *vtvar;
11641 CHECK_STACK_OVF (1);
11642 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11644 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11645 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11647 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11648 ins->type = STACK_VTYPE;
11649 ins->klass = mono_defaults.argumenthandle_class;
11662 * The following transforms:
11663 * CEE_CEQ into OP_CEQ
11664 * CEE_CGT into OP_CGT
11665 * CEE_CGT_UN into OP_CGT_UN
11666 * CEE_CLT into OP_CLT
11667 * CEE_CLT_UN into OP_CLT_UN
11669 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11671 MONO_INST_NEW (cfg, ins, cmp->opcode);
11673 cmp->sreg1 = sp [0]->dreg;
11674 cmp->sreg2 = sp [1]->dreg;
11675 type_from_op (cmp, sp [0], sp [1]);
11677 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11678 cmp->opcode = OP_LCOMPARE;
11679 else if (sp [0]->type == STACK_R8)
11680 cmp->opcode = OP_FCOMPARE;
11682 cmp->opcode = OP_ICOMPARE;
11683 MONO_ADD_INS (bblock, cmp);
11684 ins->type = STACK_I4;
11685 ins->dreg = alloc_dreg (cfg, ins->type);
11686 type_from_op (ins, sp [0], sp [1]);
11688 if (cmp->opcode == OP_FCOMPARE) {
11690 * The backends expect the fceq opcodes to do the
11693 cmp->opcode = OP_NOP;
11694 ins->sreg1 = cmp->sreg1;
11695 ins->sreg2 = cmp->sreg2;
11697 MONO_ADD_INS (bblock, ins);
11703 MonoInst *argconst;
11704 MonoMethod *cil_method;
11706 CHECK_STACK_OVF (1);
11708 n = read32 (ip + 2);
11709 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11710 if (!cmethod || mono_loader_get_last_error ())
11712 mono_class_init (cmethod->klass);
11714 mono_save_token_info (cfg, image, n, cmethod);
11716 context_used = mini_method_check_context_used (cfg, cmethod);
11718 cil_method = cmethod;
11719 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11720 METHOD_ACCESS_FAILURE;
11722 if (mono_security_cas_enabled ()) {
11723 if (check_linkdemand (cfg, method, cmethod))
11724 INLINE_FAILURE ("linkdemand");
11725 CHECK_CFG_EXCEPTION;
11726 } else if (mono_security_core_clr_enabled ()) {
11727 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11731 * Optimize the common case of ldftn+delegate creation
11733 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11734 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11735 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11736 MonoInst *target_ins;
11737 MonoMethod *invoke;
11738 int invoke_context_used;
11740 invoke = mono_get_delegate_invoke (ctor_method->klass);
11741 if (!invoke || !mono_method_signature (invoke))
11744 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11746 target_ins = sp [-1];
11748 if (mono_security_core_clr_enabled ())
11749 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11751 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11752 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11753 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11755 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11759 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11760 /* FIXME: SGEN support */
11761 if (invoke_context_used == 0) {
11763 if (cfg->verbose_level > 3)
11764 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11766 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11767 CHECK_CFG_EXCEPTION;
11776 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11777 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11781 inline_costs += 10 * num_calls++;
11784 case CEE_LDVIRTFTN: {
11785 MonoInst *args [2];
11789 n = read32 (ip + 2);
11790 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11791 if (!cmethod || mono_loader_get_last_error ())
11793 mono_class_init (cmethod->klass);
11795 context_used = mini_method_check_context_used (cfg, cmethod);
11797 if (mono_security_cas_enabled ()) {
11798 if (check_linkdemand (cfg, method, cmethod))
11799 INLINE_FAILURE ("linkdemand");
11800 CHECK_CFG_EXCEPTION;
11801 } else if (mono_security_core_clr_enabled ()) {
11802 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11808 args [1] = emit_get_rgctx_method (cfg, context_used,
11809 cmethod, MONO_RGCTX_INFO_METHOD);
11812 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11814 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11817 inline_costs += 10 * num_calls++;
11821 CHECK_STACK_OVF (1);
11823 n = read16 (ip + 2);
11825 EMIT_NEW_ARGLOAD (cfg, ins, n);
11830 CHECK_STACK_OVF (1);
11832 n = read16 (ip + 2);
11834 NEW_ARGLOADA (cfg, ins, n);
11835 MONO_ADD_INS (cfg->cbb, ins);
11843 n = read16 (ip + 2);
11845 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11847 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11851 CHECK_STACK_OVF (1);
11853 n = read16 (ip + 2);
11855 EMIT_NEW_LOCLOAD (cfg, ins, n);
11860 unsigned char *tmp_ip;
11861 CHECK_STACK_OVF (1);
11863 n = read16 (ip + 2);
11866 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11872 EMIT_NEW_LOCLOADA (cfg, ins, n);
11881 n = read16 (ip + 2);
11883 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11885 emit_stloc_ir (cfg, sp, header, n);
11892 if (sp != stack_start)
11894 if (cfg->method != method)
11896 * Inlining this into a loop in a parent could lead to
11897 * stack overflows which is different behavior than the
11898 * non-inlined case, thus disable inlining in this case.
11900 goto inline_failure;
11902 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11903 ins->dreg = alloc_preg (cfg);
11904 ins->sreg1 = sp [0]->dreg;
11905 ins->type = STACK_PTR;
11906 MONO_ADD_INS (cfg->cbb, ins);
11908 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11910 ins->flags |= MONO_INST_INIT;
11915 case CEE_ENDFILTER: {
11916 MonoExceptionClause *clause, *nearest;
11917 int cc, nearest_num;
11921 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11923 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11924 ins->sreg1 = (*sp)->dreg;
11925 MONO_ADD_INS (bblock, ins);
11926 start_new_bblock = 1;
11931 for (cc = 0; cc < header->num_clauses; ++cc) {
11932 clause = &header->clauses [cc];
11933 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11934 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11935 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11940 g_assert (nearest);
11941 if ((ip - header->code) != nearest->handler_offset)
11946 case CEE_UNALIGNED_:
11947 ins_flag |= MONO_INST_UNALIGNED;
11948 /* FIXME: record alignment? we can assume 1 for now */
11952 case CEE_VOLATILE_:
11953 ins_flag |= MONO_INST_VOLATILE;
11957 ins_flag |= MONO_INST_TAILCALL;
11958 cfg->flags |= MONO_CFG_HAS_TAIL;
11959 /* Can't inline tail calls at this time */
11960 inline_costs += 100000;
11967 token = read32 (ip + 2);
11968 klass = mini_get_class (method, token, generic_context);
11969 CHECK_TYPELOAD (klass);
11970 if (generic_class_is_reference_type (cfg, klass))
11971 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11973 mini_emit_initobj (cfg, *sp, NULL, klass);
11977 case CEE_CONSTRAINED_:
11979 token = read32 (ip + 2);
11980 constrained_call = mini_get_class (method, token, generic_context);
11981 CHECK_TYPELOAD (constrained_call);
11985 case CEE_INITBLK: {
11986 MonoInst *iargs [3];
11990 /* Skip optimized paths for volatile operations. */
11991 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11992 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11993 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11994 /* emit_memset only works when val == 0 */
11995 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11998 iargs [0] = sp [0];
11999 iargs [1] = sp [1];
12000 iargs [2] = sp [2];
12001 if (ip [1] == CEE_CPBLK) {
12003 * FIXME: It's unclear whether we should be emitting both the acquire
12004 * and release barriers for cpblk. It is technically both a load and
12005 * store operation, so it seems like that's the sensible thing to do.
12007 MonoMethod *memcpy_method = get_memcpy_method ();
12008 if (ins_flag & MONO_INST_VOLATILE) {
12009 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12010 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12011 emit_memory_barrier (cfg, FullBarrier);
12013 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12014 call->flags |= ins_flag;
12015 if (ins_flag & MONO_INST_VOLATILE) {
12016 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
12017 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12018 emit_memory_barrier (cfg, FullBarrier);
12021 MonoMethod *memset_method = get_memset_method ();
12022 if (ins_flag & MONO_INST_VOLATILE) {
12023 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12024 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12025 emit_memory_barrier (cfg, FullBarrier);
12027 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12028 call->flags |= ins_flag;
12039 ins_flag |= MONO_INST_NOTYPECHECK;
12041 ins_flag |= MONO_INST_NORANGECHECK;
12042 /* we ignore the no-nullcheck for now since we
12043 * really do it explicitly only when doing callvirt->call
12047 case CEE_RETHROW: {
12049 int handler_offset = -1;
12051 for (i = 0; i < header->num_clauses; ++i) {
12052 MonoExceptionClause *clause = &header->clauses [i];
12053 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12054 handler_offset = clause->handler_offset;
12059 bblock->flags |= BB_EXCEPTION_UNSAFE;
12061 g_assert (handler_offset != -1);
12063 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12064 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12065 ins->sreg1 = load->dreg;
12066 MONO_ADD_INS (bblock, ins);
12068 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12069 MONO_ADD_INS (bblock, ins);
12072 link_bblock (cfg, bblock, end_bblock);
12073 start_new_bblock = 1;
12081 GSHAREDVT_FAILURE (*ip);
12083 CHECK_STACK_OVF (1);
12085 token = read32 (ip + 2);
12086 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
12087 MonoType *type = mono_type_create_from_typespec (image, token);
12088 val = mono_type_size (type, &ialign);
12090 MonoClass *klass = mono_class_get_full (image, token, generic_context);
12091 CHECK_TYPELOAD (klass);
12092 mono_class_init (klass);
12093 val = mono_type_size (&klass->byval_arg, &ialign);
12095 EMIT_NEW_ICONST (cfg, ins, val);
12100 case CEE_REFANYTYPE: {
12101 MonoInst *src_var, *src;
12103 GSHAREDVT_FAILURE (*ip);
12109 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12111 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12112 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12113 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
12118 case CEE_READONLY_:
12131 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12141 g_warning ("opcode 0x%02x not handled", *ip);
12145 if (start_new_bblock != 1)
12148 bblock->cil_length = ip - bblock->cil_code;
12149 if (bblock->next_bb) {
12150 /* This could already be set because of inlining, #693905 */
12151 MonoBasicBlock *bb = bblock;
12153 while (bb->next_bb)
12155 bb->next_bb = end_bblock;
12157 bblock->next_bb = end_bblock;
12160 if (cfg->method == method && cfg->domainvar) {
12162 MonoInst *get_domain;
12164 cfg->cbb = init_localsbb;
12166 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12167 MONO_ADD_INS (cfg->cbb, get_domain);
12169 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12171 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12172 MONO_ADD_INS (cfg->cbb, store);
12175 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12176 if (cfg->compile_aot)
12177 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12178 mono_get_got_var (cfg);
12181 if (cfg->method == method && cfg->got_var)
12182 mono_emit_load_got_addr (cfg);
12184 if (init_localsbb) {
12185 cfg->cbb = init_localsbb;
12187 for (i = 0; i < header->num_locals; ++i) {
12188 emit_init_local (cfg, i, header->locals [i], init_locals);
12192 if (cfg->init_ref_vars && cfg->method == method) {
12193 /* Emit initialization for ref vars */
12194 // FIXME: Avoid duplication initialization for IL locals.
12195 for (i = 0; i < cfg->num_varinfo; ++i) {
12196 MonoInst *ins = cfg->varinfo [i];
12198 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12199 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12203 if (cfg->lmf_var && cfg->method == method) {
12204 cfg->cbb = init_localsbb;
12205 emit_push_lmf (cfg);
12208 cfg->cbb = init_localsbb;
12209 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12212 MonoBasicBlock *bb;
12215 * Make seq points at backward branch targets interruptable.
12217 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12218 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12219 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12222 /* Add a sequence point for method entry/exit events */
12224 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12225 MONO_ADD_INS (init_localsbb, ins);
12226 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12227 MONO_ADD_INS (cfg->bb_exit, ins);
12231 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12232 * the code they refer to was dead (#11880).
12234 if (sym_seq_points) {
12235 for (i = 0; i < header->code_size; ++i) {
12236 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12239 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12240 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12247 if (cfg->method == method) {
12248 MonoBasicBlock *bb;
12249 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12250 bb->region = mono_find_block_region (cfg, bb->real_offset);
12252 mono_create_spvar_for_region (cfg, bb->region);
12253 if (cfg->verbose_level > 2)
12254 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12258 g_slist_free (class_inits);
12259 dont_inline = g_list_remove (dont_inline, method);
12261 if (inline_costs < 0) {
12264 /* Method is too large */
12265 mname = mono_method_full_name (method, TRUE);
12266 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12267 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12269 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12270 mono_basic_block_free (original_bb);
12274 if ((cfg->verbose_level > 2) && (cfg->method == method))
12275 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12277 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12278 mono_basic_block_free (original_bb);
12279 return inline_costs;
12282 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12289 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12293 set_exception_type_from_invalid_il (cfg, method, ip);
12297 g_slist_free (class_inits);
12298 mono_basic_block_free (original_bb);
12299 dont_inline = g_list_remove (dont_inline, method);
12300 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12305 store_membase_reg_to_store_membase_imm (int opcode)
12308 case OP_STORE_MEMBASE_REG:
12309 return OP_STORE_MEMBASE_IMM;
12310 case OP_STOREI1_MEMBASE_REG:
12311 return OP_STOREI1_MEMBASE_IMM;
12312 case OP_STOREI2_MEMBASE_REG:
12313 return OP_STOREI2_MEMBASE_IMM;
12314 case OP_STOREI4_MEMBASE_REG:
12315 return OP_STOREI4_MEMBASE_IMM;
12316 case OP_STOREI8_MEMBASE_REG:
12317 return OP_STOREI8_MEMBASE_IMM;
12319 g_assert_not_reached ();
12326 mono_op_to_op_imm (int opcode)
12330 return OP_IADD_IMM;
12332 return OP_ISUB_IMM;
12334 return OP_IDIV_IMM;
12336 return OP_IDIV_UN_IMM;
12338 return OP_IREM_IMM;
12340 return OP_IREM_UN_IMM;
12342 return OP_IMUL_IMM;
12344 return OP_IAND_IMM;
12348 return OP_IXOR_IMM;
12350 return OP_ISHL_IMM;
12352 return OP_ISHR_IMM;
12354 return OP_ISHR_UN_IMM;
12357 return OP_LADD_IMM;
12359 return OP_LSUB_IMM;
12361 return OP_LAND_IMM;
12365 return OP_LXOR_IMM;
12367 return OP_LSHL_IMM;
12369 return OP_LSHR_IMM;
12371 return OP_LSHR_UN_IMM;
12374 return OP_COMPARE_IMM;
12376 return OP_ICOMPARE_IMM;
12378 return OP_LCOMPARE_IMM;
12380 case OP_STORE_MEMBASE_REG:
12381 return OP_STORE_MEMBASE_IMM;
12382 case OP_STOREI1_MEMBASE_REG:
12383 return OP_STOREI1_MEMBASE_IMM;
12384 case OP_STOREI2_MEMBASE_REG:
12385 return OP_STOREI2_MEMBASE_IMM;
12386 case OP_STOREI4_MEMBASE_REG:
12387 return OP_STOREI4_MEMBASE_IMM;
12389 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12391 return OP_X86_PUSH_IMM;
12392 case OP_X86_COMPARE_MEMBASE_REG:
12393 return OP_X86_COMPARE_MEMBASE_IMM;
12395 #if defined(TARGET_AMD64)
12396 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12397 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12399 case OP_VOIDCALL_REG:
12400 return OP_VOIDCALL;
12408 return OP_LOCALLOC_IMM;
12415 ldind_to_load_membase (int opcode)
12419 return OP_LOADI1_MEMBASE;
12421 return OP_LOADU1_MEMBASE;
12423 return OP_LOADI2_MEMBASE;
12425 return OP_LOADU2_MEMBASE;
12427 return OP_LOADI4_MEMBASE;
12429 return OP_LOADU4_MEMBASE;
12431 return OP_LOAD_MEMBASE;
12432 case CEE_LDIND_REF:
12433 return OP_LOAD_MEMBASE;
12435 return OP_LOADI8_MEMBASE;
12437 return OP_LOADR4_MEMBASE;
12439 return OP_LOADR8_MEMBASE;
12441 g_assert_not_reached ();
12448 stind_to_store_membase (int opcode)
12452 return OP_STOREI1_MEMBASE_REG;
12454 return OP_STOREI2_MEMBASE_REG;
12456 return OP_STOREI4_MEMBASE_REG;
12458 case CEE_STIND_REF:
12459 return OP_STORE_MEMBASE_REG;
12461 return OP_STOREI8_MEMBASE_REG;
12463 return OP_STORER4_MEMBASE_REG;
12465 return OP_STORER8_MEMBASE_REG;
12467 g_assert_not_reached ();
12474 mono_load_membase_to_load_mem (int opcode)
12476 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12477 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12479 case OP_LOAD_MEMBASE:
12480 return OP_LOAD_MEM;
12481 case OP_LOADU1_MEMBASE:
12482 return OP_LOADU1_MEM;
12483 case OP_LOADU2_MEMBASE:
12484 return OP_LOADU2_MEM;
12485 case OP_LOADI4_MEMBASE:
12486 return OP_LOADI4_MEM;
12487 case OP_LOADU4_MEMBASE:
12488 return OP_LOADU4_MEM;
12489 #if SIZEOF_REGISTER == 8
12490 case OP_LOADI8_MEMBASE:
12491 return OP_LOADI8_MEM;
12500 op_to_op_dest_membase (int store_opcode, int opcode)
12502 #if defined(TARGET_X86)
12503 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12508 return OP_X86_ADD_MEMBASE_REG;
12510 return OP_X86_SUB_MEMBASE_REG;
12512 return OP_X86_AND_MEMBASE_REG;
12514 return OP_X86_OR_MEMBASE_REG;
12516 return OP_X86_XOR_MEMBASE_REG;
12519 return OP_X86_ADD_MEMBASE_IMM;
12522 return OP_X86_SUB_MEMBASE_IMM;
12525 return OP_X86_AND_MEMBASE_IMM;
12528 return OP_X86_OR_MEMBASE_IMM;
12531 return OP_X86_XOR_MEMBASE_IMM;
12537 #if defined(TARGET_AMD64)
12538 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12543 return OP_X86_ADD_MEMBASE_REG;
12545 return OP_X86_SUB_MEMBASE_REG;
12547 return OP_X86_AND_MEMBASE_REG;
12549 return OP_X86_OR_MEMBASE_REG;
12551 return OP_X86_XOR_MEMBASE_REG;
12553 return OP_X86_ADD_MEMBASE_IMM;
12555 return OP_X86_SUB_MEMBASE_IMM;
12557 return OP_X86_AND_MEMBASE_IMM;
12559 return OP_X86_OR_MEMBASE_IMM;
12561 return OP_X86_XOR_MEMBASE_IMM;
12563 return OP_AMD64_ADD_MEMBASE_REG;
12565 return OP_AMD64_SUB_MEMBASE_REG;
12567 return OP_AMD64_AND_MEMBASE_REG;
12569 return OP_AMD64_OR_MEMBASE_REG;
12571 return OP_AMD64_XOR_MEMBASE_REG;
12574 return OP_AMD64_ADD_MEMBASE_IMM;
12577 return OP_AMD64_SUB_MEMBASE_IMM;
12580 return OP_AMD64_AND_MEMBASE_IMM;
12583 return OP_AMD64_OR_MEMBASE_IMM;
12586 return OP_AMD64_XOR_MEMBASE_IMM;
12596 op_to_op_store_membase (int store_opcode, int opcode)
12598 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12601 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12602 return OP_X86_SETEQ_MEMBASE;
12604 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12605 return OP_X86_SETNE_MEMBASE;
12613 op_to_op_src1_membase (int load_opcode, int opcode)
12616 /* FIXME: This has sign extension issues */
12618 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12619 return OP_X86_COMPARE_MEMBASE8_IMM;
12622 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12627 return OP_X86_PUSH_MEMBASE;
12628 case OP_COMPARE_IMM:
12629 case OP_ICOMPARE_IMM:
12630 return OP_X86_COMPARE_MEMBASE_IMM;
12633 return OP_X86_COMPARE_MEMBASE_REG;
12637 #ifdef TARGET_AMD64
12638 /* FIXME: This has sign extension issues */
12640 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12641 return OP_X86_COMPARE_MEMBASE8_IMM;
12646 #ifdef __mono_ilp32__
12647 if (load_opcode == OP_LOADI8_MEMBASE)
12649 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12651 return OP_X86_PUSH_MEMBASE;
12653 /* FIXME: This only works for 32 bit immediates
12654 case OP_COMPARE_IMM:
12655 case OP_LCOMPARE_IMM:
12656 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12657 return OP_AMD64_COMPARE_MEMBASE_IMM;
12659 case OP_ICOMPARE_IMM:
12660 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12661 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12665 #ifdef __mono_ilp32__
12666 if (load_opcode == OP_LOAD_MEMBASE)
12667 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12668 if (load_opcode == OP_LOADI8_MEMBASE)
12670 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12672 return OP_AMD64_COMPARE_MEMBASE_REG;
12675 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12676 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12685 op_to_op_src2_membase (int load_opcode, int opcode)
12688 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12694 return OP_X86_COMPARE_REG_MEMBASE;
12696 return OP_X86_ADD_REG_MEMBASE;
12698 return OP_X86_SUB_REG_MEMBASE;
12700 return OP_X86_AND_REG_MEMBASE;
12702 return OP_X86_OR_REG_MEMBASE;
12704 return OP_X86_XOR_REG_MEMBASE;
12708 #ifdef TARGET_AMD64
12709 #ifdef __mono_ilp32__
12710 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12712 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12716 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12718 return OP_X86_ADD_REG_MEMBASE;
12720 return OP_X86_SUB_REG_MEMBASE;
12722 return OP_X86_AND_REG_MEMBASE;
12724 return OP_X86_OR_REG_MEMBASE;
12726 return OP_X86_XOR_REG_MEMBASE;
12728 #ifdef __mono_ilp32__
12729 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12731 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12736 return OP_AMD64_COMPARE_REG_MEMBASE;
12738 return OP_AMD64_ADD_REG_MEMBASE;
12740 return OP_AMD64_SUB_REG_MEMBASE;
12742 return OP_AMD64_AND_REG_MEMBASE;
12744 return OP_AMD64_OR_REG_MEMBASE;
12746 return OP_AMD64_XOR_REG_MEMBASE;
12755 mono_op_to_op_imm_noemul (int opcode)
12758 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12764 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12771 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12776 return mono_op_to_op_imm (opcode);
12781 * mono_handle_global_vregs:
12783 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12787 mono_handle_global_vregs (MonoCompile *cfg)
12789 gint32 *vreg_to_bb;
12790 MonoBasicBlock *bb;
12793 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12795 #ifdef MONO_ARCH_SIMD_INTRINSICS
12796 if (cfg->uses_simd_intrinsics)
12797 mono_simd_simplify_indirection (cfg);
12800 /* Find local vregs used in more than one bb */
12801 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12802 MonoInst *ins = bb->code;
12803 int block_num = bb->block_num;
12805 if (cfg->verbose_level > 2)
12806 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12809 for (; ins; ins = ins->next) {
12810 const char *spec = INS_INFO (ins->opcode);
12811 int regtype = 0, regindex;
12814 if (G_UNLIKELY (cfg->verbose_level > 2))
12815 mono_print_ins (ins);
12817 g_assert (ins->opcode >= MONO_CEE_LAST);
12819 for (regindex = 0; regindex < 4; regindex ++) {
12822 if (regindex == 0) {
12823 regtype = spec [MONO_INST_DEST];
12824 if (regtype == ' ')
12827 } else if (regindex == 1) {
12828 regtype = spec [MONO_INST_SRC1];
12829 if (regtype == ' ')
12832 } else if (regindex == 2) {
12833 regtype = spec [MONO_INST_SRC2];
12834 if (regtype == ' ')
12837 } else if (regindex == 3) {
12838 regtype = spec [MONO_INST_SRC3];
12839 if (regtype == ' ')
12844 #if SIZEOF_REGISTER == 4
12845 /* In the LLVM case, the long opcodes are not decomposed */
12846 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12848 * Since some instructions reference the original long vreg,
12849 * and some reference the two component vregs, it is quite hard
12850 * to determine when it needs to be global. So be conservative.
12852 if (!get_vreg_to_inst (cfg, vreg)) {
12853 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12855 if (cfg->verbose_level > 2)
12856 printf ("LONG VREG R%d made global.\n", vreg);
12860 * Make the component vregs volatile since the optimizations can
12861 * get confused otherwise.
12863 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12864 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12868 g_assert (vreg != -1);
12870 prev_bb = vreg_to_bb [vreg];
12871 if (prev_bb == 0) {
12872 /* 0 is a valid block num */
12873 vreg_to_bb [vreg] = block_num + 1;
12874 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12875 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12878 if (!get_vreg_to_inst (cfg, vreg)) {
12879 if (G_UNLIKELY (cfg->verbose_level > 2))
12880 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12884 if (vreg_is_ref (cfg, vreg))
12885 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12887 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12890 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12893 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12896 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12899 g_assert_not_reached ();
12903 /* Flag as having been used in more than one bb */
12904 vreg_to_bb [vreg] = -1;
12910 /* If a variable is used in only one bblock, convert it into a local vreg */
12911 for (i = 0; i < cfg->num_varinfo; i++) {
12912 MonoInst *var = cfg->varinfo [i];
12913 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12915 switch (var->type) {
12921 #if SIZEOF_REGISTER == 8
12924 #if !defined(TARGET_X86)
12925 /* Enabling this screws up the fp stack on x86 */
12928 if (mono_arch_is_soft_float ())
12931 /* Arguments are implicitly global */
12932 /* Putting R4 vars into registers doesn't work currently */
12933 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12934 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12936 * Make that the variable's liveness interval doesn't contain a call, since
12937 * that would cause the lvreg to be spilled, making the whole optimization
12940 /* This is too slow for JIT compilation */
12942 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12944 int def_index, call_index, ins_index;
12945 gboolean spilled = FALSE;
12950 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12951 const char *spec = INS_INFO (ins->opcode);
12953 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12954 def_index = ins_index;
12956 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12957 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12958 if (call_index > def_index) {
12964 if (MONO_IS_CALL (ins))
12965 call_index = ins_index;
12975 if (G_UNLIKELY (cfg->verbose_level > 2))
12976 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12977 var->flags |= MONO_INST_IS_DEAD;
12978 cfg->vreg_to_inst [var->dreg] = NULL;
12985 * Compress the varinfo and vars tables so the liveness computation is faster and
12986 * takes up less space.
12989 for (i = 0; i < cfg->num_varinfo; ++i) {
12990 MonoInst *var = cfg->varinfo [i];
12991 if (pos < i && cfg->locals_start == i)
12992 cfg->locals_start = pos;
12993 if (!(var->flags & MONO_INST_IS_DEAD)) {
12995 cfg->varinfo [pos] = cfg->varinfo [i];
12996 cfg->varinfo [pos]->inst_c0 = pos;
12997 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12998 cfg->vars [pos].idx = pos;
12999 #if SIZEOF_REGISTER == 4
13000 if (cfg->varinfo [pos]->type == STACK_I8) {
13001 /* Modify the two component vars too */
13004 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13005 var1->inst_c0 = pos;
13006 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13007 var1->inst_c0 = pos;
13014 cfg->num_varinfo = pos;
13015 if (cfg->locals_start > cfg->num_varinfo)
13016 cfg->locals_start = cfg->num_varinfo;
13020 * mono_spill_global_vars:
13022 * Generate spill code for variables which are not allocated to registers,
13023 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13024 * code is generated which could be optimized by the local optimization passes.
13027 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13029 MonoBasicBlock *bb;
13031 int orig_next_vreg;
13032 guint32 *vreg_to_lvreg;
13034 guint32 i, lvregs_len;
13035 gboolean dest_has_lvreg = FALSE;
13036 guint32 stacktypes [128];
13037 MonoInst **live_range_start, **live_range_end;
13038 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13039 int *gsharedvt_vreg_to_idx = NULL;
13041 *need_local_opts = FALSE;
13043 memset (spec2, 0, sizeof (spec2));
13045 /* FIXME: Move this function to mini.c */
13046 stacktypes ['i'] = STACK_PTR;
13047 stacktypes ['l'] = STACK_I8;
13048 stacktypes ['f'] = STACK_R8;
13049 #ifdef MONO_ARCH_SIMD_INTRINSICS
13050 stacktypes ['x'] = STACK_VTYPE;
13053 #if SIZEOF_REGISTER == 4
13054 /* Create MonoInsts for longs */
13055 for (i = 0; i < cfg->num_varinfo; i++) {
13056 MonoInst *ins = cfg->varinfo [i];
13058 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13059 switch (ins->type) {
13064 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13067 g_assert (ins->opcode == OP_REGOFFSET);
13069 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13071 tree->opcode = OP_REGOFFSET;
13072 tree->inst_basereg = ins->inst_basereg;
13073 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13075 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13077 tree->opcode = OP_REGOFFSET;
13078 tree->inst_basereg = ins->inst_basereg;
13079 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13089 if (cfg->compute_gc_maps) {
13090 /* registers need liveness info even for !non refs */
13091 for (i = 0; i < cfg->num_varinfo; i++) {
13092 MonoInst *ins = cfg->varinfo [i];
13094 if (ins->opcode == OP_REGVAR)
13095 ins->flags |= MONO_INST_GC_TRACK;
13099 if (cfg->gsharedvt) {
13100 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13102 for (i = 0; i < cfg->num_varinfo; ++i) {
13103 MonoInst *ins = cfg->varinfo [i];
13106 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13107 if (i >= cfg->locals_start) {
13109 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13110 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13111 ins->opcode = OP_GSHAREDVT_LOCAL;
13112 ins->inst_imm = idx;
13115 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13116 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13122 /* FIXME: widening and truncation */
13125 * As an optimization, when a variable allocated to the stack is first loaded into
13126 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13127 * the variable again.
13129 orig_next_vreg = cfg->next_vreg;
13130 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13131 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13135 * These arrays contain the first and last instructions accessing a given
13137 * Since we emit bblocks in the same order we process them here, and we
13138 * don't split live ranges, these will precisely describe the live range of
13139 * the variable, i.e. the instruction range where a valid value can be found
13140 * in the variables location.
13141 * The live range is computed using the liveness info computed by the liveness pass.
13142 * We can't use vmv->range, since that is an abstract live range, and we need
13143 * one which is instruction precise.
13144 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13146 /* FIXME: Only do this if debugging info is requested */
13147 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13148 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13149 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13150 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13152 /* Add spill loads/stores */
13153 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13156 if (cfg->verbose_level > 2)
13157 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13159 /* Clear vreg_to_lvreg array */
13160 for (i = 0; i < lvregs_len; i++)
13161 vreg_to_lvreg [lvregs [i]] = 0;
13165 MONO_BB_FOR_EACH_INS (bb, ins) {
13166 const char *spec = INS_INFO (ins->opcode);
13167 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13168 gboolean store, no_lvreg;
13169 int sregs [MONO_MAX_SRC_REGS];
13171 if (G_UNLIKELY (cfg->verbose_level > 2))
13172 mono_print_ins (ins);
13174 if (ins->opcode == OP_NOP)
13178 * We handle LDADDR here as well, since it can only be decomposed
13179 * when variable addresses are known.
13181 if (ins->opcode == OP_LDADDR) {
13182 MonoInst *var = ins->inst_p0;
13184 if (var->opcode == OP_VTARG_ADDR) {
13185 /* Happens on SPARC/S390 where vtypes are passed by reference */
13186 MonoInst *vtaddr = var->inst_left;
13187 if (vtaddr->opcode == OP_REGVAR) {
13188 ins->opcode = OP_MOVE;
13189 ins->sreg1 = vtaddr->dreg;
13191 else if (var->inst_left->opcode == OP_REGOFFSET) {
13192 ins->opcode = OP_LOAD_MEMBASE;
13193 ins->inst_basereg = vtaddr->inst_basereg;
13194 ins->inst_offset = vtaddr->inst_offset;
13197 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13198 /* gsharedvt arg passed by ref */
13199 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13201 ins->opcode = OP_LOAD_MEMBASE;
13202 ins->inst_basereg = var->inst_basereg;
13203 ins->inst_offset = var->inst_offset;
13204 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13205 MonoInst *load, *load2, *load3;
13206 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13207 int reg1, reg2, reg3;
13208 MonoInst *info_var = cfg->gsharedvt_info_var;
13209 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13213 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13216 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13218 g_assert (info_var);
13219 g_assert (locals_var);
13221 /* Mark the instruction used to compute the locals var as used */
13222 cfg->gsharedvt_locals_var_ins = NULL;
13224 /* Load the offset */
13225 if (info_var->opcode == OP_REGOFFSET) {
13226 reg1 = alloc_ireg (cfg);
13227 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13228 } else if (info_var->opcode == OP_REGVAR) {
13230 reg1 = info_var->dreg;
13232 g_assert_not_reached ();
13234 reg2 = alloc_ireg (cfg);
13235 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13236 /* Load the locals area address */
13237 reg3 = alloc_ireg (cfg);
13238 if (locals_var->opcode == OP_REGOFFSET) {
13239 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13240 } else if (locals_var->opcode == OP_REGVAR) {
13241 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13243 g_assert_not_reached ();
13245 /* Compute the address */
13246 ins->opcode = OP_PADD;
13250 mono_bblock_insert_before_ins (bb, ins, load3);
13251 mono_bblock_insert_before_ins (bb, load3, load2);
13253 mono_bblock_insert_before_ins (bb, load2, load);
13255 g_assert (var->opcode == OP_REGOFFSET);
13257 ins->opcode = OP_ADD_IMM;
13258 ins->sreg1 = var->inst_basereg;
13259 ins->inst_imm = var->inst_offset;
13262 *need_local_opts = TRUE;
13263 spec = INS_INFO (ins->opcode);
13266 if (ins->opcode < MONO_CEE_LAST) {
13267 mono_print_ins (ins);
13268 g_assert_not_reached ();
13272 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13276 if (MONO_IS_STORE_MEMBASE (ins)) {
13277 tmp_reg = ins->dreg;
13278 ins->dreg = ins->sreg2;
13279 ins->sreg2 = tmp_reg;
13282 spec2 [MONO_INST_DEST] = ' ';
13283 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13284 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13285 spec2 [MONO_INST_SRC3] = ' ';
13287 } else if (MONO_IS_STORE_MEMINDEX (ins))
13288 g_assert_not_reached ();
13293 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13294 printf ("\t %.3s %d", spec, ins->dreg);
13295 num_sregs = mono_inst_get_src_registers (ins, sregs);
13296 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13297 printf (" %d", sregs [srcindex]);
13304 regtype = spec [MONO_INST_DEST];
13305 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13308 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13309 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13310 MonoInst *store_ins;
13312 MonoInst *def_ins = ins;
13313 int dreg = ins->dreg; /* The original vreg */
13315 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13317 if (var->opcode == OP_REGVAR) {
13318 ins->dreg = var->dreg;
13319 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13321 * Instead of emitting a load+store, use a _membase opcode.
13323 g_assert (var->opcode == OP_REGOFFSET);
13324 if (ins->opcode == OP_MOVE) {
13328 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13329 ins->inst_basereg = var->inst_basereg;
13330 ins->inst_offset = var->inst_offset;
13333 spec = INS_INFO (ins->opcode);
13337 g_assert (var->opcode == OP_REGOFFSET);
13339 prev_dreg = ins->dreg;
13341 /* Invalidate any previous lvreg for this vreg */
13342 vreg_to_lvreg [ins->dreg] = 0;
13346 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13348 store_opcode = OP_STOREI8_MEMBASE_REG;
13351 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13353 #if SIZEOF_REGISTER != 8
13354 if (regtype == 'l') {
13355 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13356 mono_bblock_insert_after_ins (bb, ins, store_ins);
13357 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13358 mono_bblock_insert_after_ins (bb, ins, store_ins);
13359 def_ins = store_ins;
13364 g_assert (store_opcode != OP_STOREV_MEMBASE);
13366 /* Try to fuse the store into the instruction itself */
13367 /* FIXME: Add more instructions */
13368 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13369 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13370 ins->inst_imm = ins->inst_c0;
13371 ins->inst_destbasereg = var->inst_basereg;
13372 ins->inst_offset = var->inst_offset;
13373 spec = INS_INFO (ins->opcode);
13374 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13375 ins->opcode = store_opcode;
13376 ins->inst_destbasereg = var->inst_basereg;
13377 ins->inst_offset = var->inst_offset;
13381 tmp_reg = ins->dreg;
13382 ins->dreg = ins->sreg2;
13383 ins->sreg2 = tmp_reg;
13386 spec2 [MONO_INST_DEST] = ' ';
13387 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13388 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13389 spec2 [MONO_INST_SRC3] = ' ';
13391 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13392 // FIXME: The backends expect the base reg to be in inst_basereg
13393 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13395 ins->inst_basereg = var->inst_basereg;
13396 ins->inst_offset = var->inst_offset;
13397 spec = INS_INFO (ins->opcode);
13399 /* printf ("INS: "); mono_print_ins (ins); */
13400 /* Create a store instruction */
13401 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13403 /* Insert it after the instruction */
13404 mono_bblock_insert_after_ins (bb, ins, store_ins);
13406 def_ins = store_ins;
13409 * We can't assign ins->dreg to var->dreg here, since the
13410 * sregs could use it. So set a flag, and do it after
13413 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13414 dest_has_lvreg = TRUE;
13419 if (def_ins && !live_range_start [dreg]) {
13420 live_range_start [dreg] = def_ins;
13421 live_range_start_bb [dreg] = bb;
13424 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13427 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13428 tmp->inst_c1 = dreg;
13429 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13436 num_sregs = mono_inst_get_src_registers (ins, sregs);
13437 for (srcindex = 0; srcindex < 3; ++srcindex) {
13438 regtype = spec [MONO_INST_SRC1 + srcindex];
13439 sreg = sregs [srcindex];
13441 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13442 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13443 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13444 MonoInst *use_ins = ins;
13445 MonoInst *load_ins;
13446 guint32 load_opcode;
13448 if (var->opcode == OP_REGVAR) {
13449 sregs [srcindex] = var->dreg;
13450 //mono_inst_set_src_registers (ins, sregs);
13451 live_range_end [sreg] = use_ins;
13452 live_range_end_bb [sreg] = bb;
13454 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13457 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13458 /* var->dreg is a hreg */
13459 tmp->inst_c1 = sreg;
13460 mono_bblock_insert_after_ins (bb, ins, tmp);
13466 g_assert (var->opcode == OP_REGOFFSET);
13468 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13470 g_assert (load_opcode != OP_LOADV_MEMBASE);
13472 if (vreg_to_lvreg [sreg]) {
13473 g_assert (vreg_to_lvreg [sreg] != -1);
13475 /* The variable is already loaded to an lvreg */
13476 if (G_UNLIKELY (cfg->verbose_level > 2))
13477 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13478 sregs [srcindex] = vreg_to_lvreg [sreg];
13479 //mono_inst_set_src_registers (ins, sregs);
13483 /* Try to fuse the load into the instruction */
13484 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13485 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13486 sregs [0] = var->inst_basereg;
13487 //mono_inst_set_src_registers (ins, sregs);
13488 ins->inst_offset = var->inst_offset;
13489 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13490 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13491 sregs [1] = var->inst_basereg;
13492 //mono_inst_set_src_registers (ins, sregs);
13493 ins->inst_offset = var->inst_offset;
13495 if (MONO_IS_REAL_MOVE (ins)) {
13496 ins->opcode = OP_NOP;
13499 //printf ("%d ", srcindex); mono_print_ins (ins);
13501 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13503 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13504 if (var->dreg == prev_dreg) {
13506 * sreg refers to the value loaded by the load
13507 * emitted below, but we need to use ins->dreg
13508 * since it refers to the store emitted earlier.
13512 g_assert (sreg != -1);
13513 vreg_to_lvreg [var->dreg] = sreg;
13514 g_assert (lvregs_len < 1024);
13515 lvregs [lvregs_len ++] = var->dreg;
13519 sregs [srcindex] = sreg;
13520 //mono_inst_set_src_registers (ins, sregs);
13522 #if SIZEOF_REGISTER != 8
13523 if (regtype == 'l') {
13524 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13525 mono_bblock_insert_before_ins (bb, ins, load_ins);
13526 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13527 mono_bblock_insert_before_ins (bb, ins, load_ins);
13528 use_ins = load_ins;
13533 #if SIZEOF_REGISTER == 4
13534 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13536 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13537 mono_bblock_insert_before_ins (bb, ins, load_ins);
13538 use_ins = load_ins;
13542 if (var->dreg < orig_next_vreg) {
13543 live_range_end [var->dreg] = use_ins;
13544 live_range_end_bb [var->dreg] = bb;
13547 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13550 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13551 tmp->inst_c1 = var->dreg;
13552 mono_bblock_insert_after_ins (bb, ins, tmp);
13556 mono_inst_set_src_registers (ins, sregs);
13558 if (dest_has_lvreg) {
13559 g_assert (ins->dreg != -1);
13560 vreg_to_lvreg [prev_dreg] = ins->dreg;
13561 g_assert (lvregs_len < 1024);
13562 lvregs [lvregs_len ++] = prev_dreg;
13563 dest_has_lvreg = FALSE;
13567 tmp_reg = ins->dreg;
13568 ins->dreg = ins->sreg2;
13569 ins->sreg2 = tmp_reg;
13572 if (MONO_IS_CALL (ins)) {
13573 /* Clear vreg_to_lvreg array */
13574 for (i = 0; i < lvregs_len; i++)
13575 vreg_to_lvreg [lvregs [i]] = 0;
13577 } else if (ins->opcode == OP_NOP) {
13579 MONO_INST_NULLIFY_SREGS (ins);
13582 if (cfg->verbose_level > 2)
13583 mono_print_ins_index (1, ins);
13586 /* Extend the live range based on the liveness info */
13587 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13588 for (i = 0; i < cfg->num_varinfo; i ++) {
13589 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13591 if (vreg_is_volatile (cfg, vi->vreg))
13592 /* The liveness info is incomplete */
13595 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13596 /* Live from at least the first ins of this bb */
13597 live_range_start [vi->vreg] = bb->code;
13598 live_range_start_bb [vi->vreg] = bb;
13601 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13602 /* Live at least until the last ins of this bb */
13603 live_range_end [vi->vreg] = bb->last_ins;
13604 live_range_end_bb [vi->vreg] = bb;
13610 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13612 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13613 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13615 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13616 for (i = 0; i < cfg->num_varinfo; ++i) {
13617 int vreg = MONO_VARINFO (cfg, i)->vreg;
13620 if (live_range_start [vreg]) {
13621 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13623 ins->inst_c1 = vreg;
13624 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13626 if (live_range_end [vreg]) {
13627 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13629 ins->inst_c1 = vreg;
13630 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13631 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13633 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13639 if (cfg->gsharedvt_locals_var_ins) {
13640 /* Nullify if unused */
13641 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13642 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13645 g_free (live_range_start);
13646 g_free (live_range_end);
13647 g_free (live_range_start_bb);
13648 g_free (live_range_end_bb);
13653 * - use 'iadd' instead of 'int_add'
13654 * - handling ovf opcodes: decompose in method_to_ir.
13655 * - unify iregs/fregs
13656 * -> partly done, the missing parts are:
13657 * - a more complete unification would involve unifying the hregs as well, so
13658 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13659 * would no longer map to the machine hregs, so the code generators would need to
13660 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13661 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13662 * fp/non-fp branches speeds it up by about 15%.
13663 * - use sext/zext opcodes instead of shifts
13665 * - get rid of TEMPLOADs if possible and use vregs instead
13666 * - clean up usage of OP_P/OP_ opcodes
13667 * - cleanup usage of DUMMY_USE
13668 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13670 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13671 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13672 * - make sure handle_stack_args () is called before the branch is emitted
13673 * - when the new IR is done, get rid of all unused stuff
13674 * - COMPARE/BEQ as separate instructions or unify them ?
13675 * - keeping them separate allows specialized compare instructions like
13676 * compare_imm, compare_membase
13677 * - most back ends unify fp compare+branch, fp compare+ceq
13678 * - integrate mono_save_args into inline_method
13679 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13680 * - handle long shift opts on 32 bit platforms somehow: they require
13681 * 3 sregs (2 for arg1 and 1 for arg2)
13682 * - make byref a 'normal' type.
13683 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13684 * variable if needed.
13685 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13686 * like inline_method.
13687 * - remove inlining restrictions
13688 * - fix LNEG and enable cfold of INEG
13689 * - generalize x86 optimizations like ldelema as a peephole optimization
13690 * - add store_mem_imm for amd64
13691 * - optimize the loading of the interruption flag in the managed->native wrappers
13692 * - avoid special handling of OP_NOP in passes
13693 * - move code inserting instructions into one function/macro.
13694 * - try a coalescing phase after liveness analysis
13695 * - add float -> vreg conversion + local optimizations on !x86
13696 * - figure out how to handle decomposed branches during optimizations, ie.
13697 * compare+branch, op_jump_table+op_br etc.
13698 * - promote RuntimeXHandles to vregs
13699 * - vtype cleanups:
13700 * - add a NEW_VARLOADA_VREG macro
13701 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13702 * accessing vtype fields.
13703 * - get rid of I8CONST on 64 bit platforms
13704 * - dealing with the increase in code size due to branches created during opcode
13706 * - use extended basic blocks
13707 * - all parts of the JIT
13708 * - handle_global_vregs () && local regalloc
13709 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13710 * - sources of increase in code size:
13713 * - isinst and castclass
13714 * - lvregs not allocated to global registers even if used multiple times
13715 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13717 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13718 * - add all micro optimizations from the old JIT
13719 * - put tree optimizations into the deadce pass
13720 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13721 * specific function.
13722 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13723 * fcompare + branchCC.
13724 * - create a helper function for allocating a stack slot, taking into account
13725 * MONO_CFG_HAS_SPILLUP.
13727 * - merge the ia64 switch changes.
13728 * - optimize mono_regstate2_alloc_int/float.
13729 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13730 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13731 * parts of the tree could be separated by other instructions, killing the tree
13732 * arguments, or stores killing loads etc. Also, should we fold loads into other
13733 * instructions if the result of the load is used multiple times ?
13734 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13735 * - LAST MERGE: 108395.
13736 * - when returning vtypes in registers, generate IR and append it to the end of the
13737 * last bb instead of doing it in the epilog.
13738 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13746 - When to decompose opcodes:
13747 - earlier: this makes some optimizations hard to implement, since the low level IR
13748 no longer contains the neccessary information. But it is easier to do.
13749 - later: harder to implement, enables more optimizations.
13750 - Branches inside bblocks:
13751 - created when decomposing complex opcodes.
13752 - branches to another bblock: harmless, but not tracked by the branch
13753 optimizations, so need to branch to a label at the start of the bblock.
13754 - branches to inside the same bblock: very problematic, trips up the local
13755 reg allocator. Can be fixed by spitting the current bblock, but that is a
13756 complex operation, since some local vregs can become global vregs etc.
13757 - Local/global vregs:
13758 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13759 local register allocator.
13760 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13761 structure, created by mono_create_var (). Assigned to hregs or the stack by
13762 the global register allocator.
13763 - When to do optimizations like alu->alu_imm:
13764 - earlier -> saves work later on since the IR will be smaller/simpler
13765 - later -> can work on more instructions
13766 - Handling of valuetypes:
13767 - When a vtype is pushed on the stack, a new temporary is created, an
13768 instruction computing its address (LDADDR) is emitted and pushed on
13769 the stack. Need to optimize cases when the vtype is used immediately as in
13770 argument passing, stloc etc.
13771 - Instead of the to_end stuff in the old JIT, simply call the function handling
13772 the values on the stack before emitting the last instruction of the bb.
13775 #endif /* DISABLE_JIT */