2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/assembly.h>
38 #include <mono/metadata/attrdefs.h>
39 #include <mono/metadata/loader.h>
40 #include <mono/metadata/tabledefs.h>
41 #include <mono/metadata/class.h>
42 #include <mono/metadata/object.h>
43 #include <mono/metadata/exception.h>
44 #include <mono/metadata/opcodes.h>
45 #include <mono/metadata/mono-endian.h>
46 #include <mono/metadata/tokentype.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/marshal.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/mono-debug.h>
51 #include <mono/metadata/gc-internal.h>
52 #include <mono/metadata/security-manager.h>
53 #include <mono/metadata/threads-types.h>
54 #include <mono/metadata/security-core-clr.h>
55 #include <mono/metadata/monitor.h>
56 #include <mono/metadata/profiler-private.h>
57 #include <mono/metadata/profiler.h>
58 #include <mono/metadata/debug-mono-symfile.h>
59 #include <mono/utils/mono-compiler.h>
60 #include <mono/utils/mono-memory-model.h>
61 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
74 #define INLINE_FAILURE(msg) do { \
75 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE)) { \
76 if (cfg->verbose_level >= 2) \
77 printf ("inline failed: %s\n", msg); \
78 goto inline_failure; \
81 #define CHECK_CFG_EXCEPTION do {\
82 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
85 #define METHOD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (cil_method_fname); \
92 goto exception_exit; \
94 #define FIELD_ACCESS_FAILURE do { \
95 char *method_fname = mono_method_full_name (method, TRUE); \
96 char *field_fname = mono_field_full_name (field); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
98 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
99 g_free (method_fname); \
100 g_free (field_fname); \
101 goto exception_exit; \
103 #define GENERIC_SHARING_FAILURE(opcode) do { \
104 if (cfg->generic_sharing_context) { \
105 if (cfg->verbose_level > 2) \
106 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
108 goto exception_exit; \
111 #define GSHAREDVT_FAILURE(opcode) do { \
112 if (cfg->gsharedvt) { \
113 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __FILE__, __LINE__); \
114 if (cfg->verbose_level >= 2) \
115 printf ("%s\n", cfg->exception_message); \
116 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
117 goto exception_exit; \
120 #define OUT_OF_MEMORY_FAILURE do { \
121 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
122 goto exception_exit; \
124 #define DISABLE_AOT(cfg) do { \
125 if ((cfg)->verbose_level >= 2) \
126 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
127 (cfg)->disable_aot = TRUE; \
130 /* Determine whenever 'ins' represents a load of the 'this' argument */
131 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
133 static int ldind_to_load_membase (int opcode);
134 static int stind_to_store_membase (int opcode);
136 int mono_op_to_op_imm (int opcode);
137 int mono_op_to_op_imm_noemul (int opcode);
139 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
141 /* helper methods signatures */
142 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
143 static MonoMethodSignature *helper_sig_domain_get = NULL;
144 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
145 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
146 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
147 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
148 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
151 * Instruction metadata
159 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
160 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
166 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
171 /* keep in sync with the enum in mini.h */
174 #include "mini-ops.h"
179 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
180 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
182 * This should contain the index of the last sreg + 1. This is not the same
183 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
185 const gint8 ins_sreg_counts[] = {
186 #include "mini-ops.h"
191 #define MONO_INIT_VARINFO(vi,id) do { \
192 (vi)->range.first_use.pos.bid = 0xffff; \
198 mono_inst_set_src_registers (MonoInst *ins, int *regs)
200 ins->sreg1 = regs [0];
201 ins->sreg2 = regs [1];
202 ins->sreg3 = regs [2];
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_replace_type (type);
281 switch (type->type) {
284 case MONO_TYPE_BOOLEAN:
296 case MONO_TYPE_FNPTR:
298 case MONO_TYPE_CLASS:
299 case MONO_TYPE_STRING:
300 case MONO_TYPE_OBJECT:
301 case MONO_TYPE_SZARRAY:
302 case MONO_TYPE_ARRAY:
306 #if SIZEOF_REGISTER == 8
315 case MONO_TYPE_VALUETYPE:
316 if (type->data.klass->enumtype) {
317 type = mono_class_enum_basetype (type->data.klass);
320 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
323 case MONO_TYPE_TYPEDBYREF:
325 case MONO_TYPE_GENERICINST:
326 type = &type->data.generic_class->container_class->byval_arg;
330 g_assert (cfg->generic_sharing_context);
331 if (mini_type_var_is_vt (cfg, type))
336 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
342 mono_print_bb (MonoBasicBlock *bb, const char *msg)
347 printf ("\n%s %d: [IN: ", msg, bb->block_num);
348 for (i = 0; i < bb->in_count; ++i)
349 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
351 for (i = 0; i < bb->out_count; ++i)
352 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
354 for (tree = bb->code; tree; tree = tree->next)
355 mono_print_ins_index (-1, tree);
359 mono_create_helper_signatures (void)
361 helper_sig_domain_get = mono_create_icall_signature ("ptr");
362 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
363 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
364 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
365 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
366 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
367 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
371 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
372 * foo<T> (int i) { ldarg.0; box T; }
374 #define UNVERIFIED do { \
375 if (cfg->gsharedvt) { \
376 if (cfg->verbose_level > 2) \
377 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
379 goto exception_exit; \
381 if (mini_get_debug_options ()->break_on_unverified) \
387 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
389 #define TYPE_LOAD_ERROR(klass) do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else { cfg->exception_ptr = klass; goto load_error; } } while (0)
391 #define GET_BBLOCK(cfg,tblock,ip) do { \
392 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
394 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
395 NEW_BBLOCK (cfg, (tblock)); \
396 (tblock)->cil_code = (ip); \
397 ADD_BBLOCK (cfg, (tblock)); \
401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
402 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
403 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
404 (dest)->dreg = alloc_ireg_mp ((cfg)); \
405 (dest)->sreg1 = (sr1); \
406 (dest)->sreg2 = (sr2); \
407 (dest)->inst_imm = (imm); \
408 (dest)->backend.shift_amount = (shift); \
409 MONO_ADD_INS ((cfg)->cbb, (dest)); \
413 #if SIZEOF_REGISTER == 8
414 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
415 /* FIXME: Need to add many more cases */ \
416 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
418 int dr = alloc_preg (cfg); \
419 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
420 (ins)->sreg2 = widen->dreg; \
424 #define ADD_WIDEN_OP(ins, arg1, arg2)
427 #define ADD_BINOP(op) do { \
428 MONO_INST_NEW (cfg, ins, (op)); \
430 ins->sreg1 = sp [0]->dreg; \
431 ins->sreg2 = sp [1]->dreg; \
432 type_from_op (ins, sp [0], sp [1]); \
434 /* Have to insert a widening op */ \
435 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
436 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
437 MONO_ADD_INS ((cfg)->cbb, (ins)); \
438 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
441 #define ADD_UNOP(op) do { \
442 MONO_INST_NEW (cfg, ins, (op)); \
444 ins->sreg1 = sp [0]->dreg; \
445 type_from_op (ins, sp [0], NULL); \
447 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
448 MONO_ADD_INS ((cfg)->cbb, (ins)); \
449 *sp++ = mono_decompose_opcode (cfg, ins); \
452 #define ADD_BINCOND(next_block) do { \
455 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
456 cmp->sreg1 = sp [0]->dreg; \
457 cmp->sreg2 = sp [1]->dreg; \
458 type_from_op (cmp, sp [0], sp [1]); \
460 type_from_op (ins, sp [0], sp [1]); \
461 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
462 GET_BBLOCK (cfg, tblock, target); \
463 link_bblock (cfg, bblock, tblock); \
464 ins->inst_true_bb = tblock; \
465 if ((next_block)) { \
466 link_bblock (cfg, bblock, (next_block)); \
467 ins->inst_false_bb = (next_block); \
468 start_new_bblock = 1; \
470 GET_BBLOCK (cfg, tblock, ip); \
471 link_bblock (cfg, bblock, tblock); \
472 ins->inst_false_bb = tblock; \
473 start_new_bblock = 2; \
475 if (sp != stack_start) { \
476 handle_stack_args (cfg, stack_start, sp - stack_start); \
477 CHECK_UNVERIFIABLE (cfg); \
479 MONO_ADD_INS (bblock, cmp); \
480 MONO_ADD_INS (bblock, ins); \
484 * link_bblock: Links two basic blocks
486 * links two basic blocks in the control flow graph, the 'from'
487 * argument is the starting block and the 'to' argument is the block
488 * the control flow ends to after 'from'.
491 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
493 MonoBasicBlock **newa;
497 if (from->cil_code) {
499 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
501 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
504 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
506 printf ("edge from entry to exit\n");
511 for (i = 0; i < from->out_count; ++i) {
512 if (to == from->out_bb [i]) {
518 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
519 for (i = 0; i < from->out_count; ++i) {
520 newa [i] = from->out_bb [i];
528 for (i = 0; i < to->in_count; ++i) {
529 if (from == to->in_bb [i]) {
535 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
536 for (i = 0; i < to->in_count; ++i) {
537 newa [i] = to->in_bb [i];
546 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
548 link_bblock (cfg, from, to);
552 * mono_find_block_region:
554 * We mark each basic block with a region ID. We use that to avoid BB
555 * optimizations when blocks are in different regions.
558 * A region token that encodes where this region is, and information
559 * about the clause owner for this block.
561 * The region encodes the try/catch/filter clause that owns this block
562 * as well as the type. -1 is a special value that represents a block
563 * that is in none of try/catch/filter.
566 mono_find_block_region (MonoCompile *cfg, int offset)
568 MonoMethodHeader *header = cfg->header;
569 MonoExceptionClause *clause;
572 for (i = 0; i < header->num_clauses; ++i) {
573 clause = &header->clauses [i];
574 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
575 (offset < (clause->handler_offset)))
576 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
578 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
579 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
580 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
581 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
582 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
584 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
587 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
588 return ((i + 1) << 8) | clause->flags;
595 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
597 MonoMethodHeader *header = cfg->header;
598 MonoExceptionClause *clause;
602 for (i = 0; i < header->num_clauses; ++i) {
603 clause = &header->clauses [i];
604 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
605 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
606 if (clause->flags == type)
607 res = g_list_append (res, clause);
614 mono_create_spvar_for_region (MonoCompile *cfg, int region)
618 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
622 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
623 /* prevent it from being register allocated */
624 var->flags |= MONO_INST_VOLATILE;
626 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
630 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
632 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
636 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
640 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
644 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
645 /* prevent it from being register allocated */
646 var->flags |= MONO_INST_VOLATILE;
648 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
654 * Returns the type used in the eval stack when @type is loaded.
655 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
658 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
662 type = mini_replace_type (type);
663 inst->klass = klass = mono_class_from_mono_type (type);
665 inst->type = STACK_MP;
670 switch (type->type) {
672 inst->type = STACK_INV;
676 case MONO_TYPE_BOOLEAN:
682 inst->type = STACK_I4;
687 case MONO_TYPE_FNPTR:
688 inst->type = STACK_PTR;
690 case MONO_TYPE_CLASS:
691 case MONO_TYPE_STRING:
692 case MONO_TYPE_OBJECT:
693 case MONO_TYPE_SZARRAY:
694 case MONO_TYPE_ARRAY:
695 inst->type = STACK_OBJ;
699 inst->type = STACK_I8;
703 inst->type = STACK_R8;
705 case MONO_TYPE_VALUETYPE:
706 if (type->data.klass->enumtype) {
707 type = mono_class_enum_basetype (type->data.klass);
711 inst->type = STACK_VTYPE;
714 case MONO_TYPE_TYPEDBYREF:
715 inst->klass = mono_defaults.typed_reference_class;
716 inst->type = STACK_VTYPE;
718 case MONO_TYPE_GENERICINST:
719 type = &type->data.generic_class->container_class->byval_arg;
723 g_assert (cfg->generic_sharing_context);
724 if (mini_is_gsharedvt_type (cfg, type)) {
725 g_assert (cfg->gsharedvt);
726 inst->type = STACK_VTYPE;
728 inst->type = STACK_OBJ;
732 g_error ("unknown type 0x%02x in eval stack type", type->type);
737 * The following tables are used to quickly validate the IL code in type_from_op ().
740 bin_num_table [STACK_MAX] [STACK_MAX] = {
741 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
748 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
753 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
756 /* reduce the size of this table */
758 bin_int_table [STACK_MAX] [STACK_MAX] = {
759 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
760 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
761 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
762 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
763 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
764 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
765 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
766 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
770 bin_comp_table [STACK_MAX] [STACK_MAX] = {
771 /* Inv i L p F & O vt */
773 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
774 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
775 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
776 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
777 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
778 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
779 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
782 /* reduce the size of this table */
784 shift_table [STACK_MAX] [STACK_MAX] = {
785 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
786 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
787 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
788 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
789 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
790 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
796 * Tables to map from the non-specific opcode to the matching
797 * type-specific opcode.
799 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
801 binops_op_map [STACK_MAX] = {
802 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
805 /* handles from CEE_NEG to CEE_CONV_U8 */
807 unops_op_map [STACK_MAX] = {
808 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
811 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
813 ovfops_op_map [STACK_MAX] = {
814 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
817 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
819 ovf2ops_op_map [STACK_MAX] = {
820 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
823 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
825 ovf3ops_op_map [STACK_MAX] = {
826 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
829 /* handles from CEE_BEQ to CEE_BLT_UN */
831 beqops_op_map [STACK_MAX] = {
832 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
835 /* handles from CEE_CEQ to CEE_CLT_UN */
837 ceqops_op_map [STACK_MAX] = {
838 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
842 * Sets ins->type (the type on the eval stack) according to the
843 * type of the opcode and the arguments to it.
844 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
846 * FIXME: this function sets ins->type unconditionally in some cases, but
847 * it should set it to invalid for some types (a conv.x on an object)
850 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
852 switch (ins->opcode) {
859 /* FIXME: check unverifiable args for STACK_MP */
860 ins->type = bin_num_table [src1->type] [src2->type];
861 ins->opcode += binops_op_map [ins->type];
868 ins->type = bin_int_table [src1->type] [src2->type];
869 ins->opcode += binops_op_map [ins->type];
874 ins->type = shift_table [src1->type] [src2->type];
875 ins->opcode += binops_op_map [ins->type];
880 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
881 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
882 ins->opcode = OP_LCOMPARE;
883 else if (src1->type == STACK_R8)
884 ins->opcode = OP_FCOMPARE;
886 ins->opcode = OP_ICOMPARE;
888 case OP_ICOMPARE_IMM:
889 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
890 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
891 ins->opcode = OP_LCOMPARE_IMM;
903 ins->opcode += beqops_op_map [src1->type];
906 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
907 ins->opcode += ceqops_op_map [src1->type];
913 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
914 ins->opcode += ceqops_op_map [src1->type];
918 ins->type = neg_table [src1->type];
919 ins->opcode += unops_op_map [ins->type];
922 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
923 ins->type = src1->type;
925 ins->type = STACK_INV;
926 ins->opcode += unops_op_map [ins->type];
932 ins->type = STACK_I4;
933 ins->opcode += unops_op_map [src1->type];
936 ins->type = STACK_R8;
937 switch (src1->type) {
940 ins->opcode = OP_ICONV_TO_R_UN;
943 ins->opcode = OP_LCONV_TO_R_UN;
947 case CEE_CONV_OVF_I1:
948 case CEE_CONV_OVF_U1:
949 case CEE_CONV_OVF_I2:
950 case CEE_CONV_OVF_U2:
951 case CEE_CONV_OVF_I4:
952 case CEE_CONV_OVF_U4:
953 ins->type = STACK_I4;
954 ins->opcode += ovf3ops_op_map [src1->type];
956 case CEE_CONV_OVF_I_UN:
957 case CEE_CONV_OVF_U_UN:
958 ins->type = STACK_PTR;
959 ins->opcode += ovf2ops_op_map [src1->type];
961 case CEE_CONV_OVF_I1_UN:
962 case CEE_CONV_OVF_I2_UN:
963 case CEE_CONV_OVF_I4_UN:
964 case CEE_CONV_OVF_U1_UN:
965 case CEE_CONV_OVF_U2_UN:
966 case CEE_CONV_OVF_U4_UN:
967 ins->type = STACK_I4;
968 ins->opcode += ovf2ops_op_map [src1->type];
971 ins->type = STACK_PTR;
972 switch (src1->type) {
974 ins->opcode = OP_ICONV_TO_U;
978 #if SIZEOF_VOID_P == 8
979 ins->opcode = OP_LCONV_TO_U;
981 ins->opcode = OP_MOVE;
985 ins->opcode = OP_LCONV_TO_U;
988 ins->opcode = OP_FCONV_TO_U;
994 ins->type = STACK_I8;
995 ins->opcode += unops_op_map [src1->type];
997 case CEE_CONV_OVF_I8:
998 case CEE_CONV_OVF_U8:
999 ins->type = STACK_I8;
1000 ins->opcode += ovf3ops_op_map [src1->type];
1002 case CEE_CONV_OVF_U8_UN:
1003 case CEE_CONV_OVF_I8_UN:
1004 ins->type = STACK_I8;
1005 ins->opcode += ovf2ops_op_map [src1->type];
1009 ins->type = STACK_R8;
1010 ins->opcode += unops_op_map [src1->type];
1013 ins->type = STACK_R8;
1017 ins->type = STACK_I4;
1018 ins->opcode += ovfops_op_map [src1->type];
1021 case CEE_CONV_OVF_I:
1022 case CEE_CONV_OVF_U:
1023 ins->type = STACK_PTR;
1024 ins->opcode += ovfops_op_map [src1->type];
1027 case CEE_ADD_OVF_UN:
1029 case CEE_MUL_OVF_UN:
1031 case CEE_SUB_OVF_UN:
1032 ins->type = bin_num_table [src1->type] [src2->type];
1033 ins->opcode += ovfops_op_map [src1->type];
1034 if (ins->type == STACK_R8)
1035 ins->type = STACK_INV;
1037 case OP_LOAD_MEMBASE:
1038 ins->type = STACK_PTR;
1040 case OP_LOADI1_MEMBASE:
1041 case OP_LOADU1_MEMBASE:
1042 case OP_LOADI2_MEMBASE:
1043 case OP_LOADU2_MEMBASE:
1044 case OP_LOADI4_MEMBASE:
1045 case OP_LOADU4_MEMBASE:
1046 ins->type = STACK_PTR;
1048 case OP_LOADI8_MEMBASE:
1049 ins->type = STACK_I8;
1051 case OP_LOADR4_MEMBASE:
1052 case OP_LOADR8_MEMBASE:
1053 ins->type = STACK_R8;
1056 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1060 if (ins->type == STACK_MP)
1061 ins->klass = mono_defaults.object_class;
1066 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1072 param_table [STACK_MAX] [STACK_MAX] = {
1077 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1081 switch (args->type) {
1091 for (i = 0; i < sig->param_count; ++i) {
1092 switch (args [i].type) {
1096 if (!sig->params [i]->byref)
1100 if (sig->params [i]->byref)
1102 switch (sig->params [i]->type) {
1103 case MONO_TYPE_CLASS:
1104 case MONO_TYPE_STRING:
1105 case MONO_TYPE_OBJECT:
1106 case MONO_TYPE_SZARRAY:
1107 case MONO_TYPE_ARRAY:
1114 if (sig->params [i]->byref)
1116 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1125 /*if (!param_table [args [i].type] [sig->params [i]->type])
1133 * When we need a pointer to the current domain many times in a method, we
1134 * call mono_domain_get() once and we store the result in a local variable.
1135 * This function returns the variable that represents the MonoDomain*.
1137 inline static MonoInst *
1138 mono_get_domainvar (MonoCompile *cfg)
1140 if (!cfg->domainvar)
1141 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1142 return cfg->domainvar;
1146 * The got_var contains the address of the Global Offset Table when AOT
1150 mono_get_got_var (MonoCompile *cfg)
1152 #ifdef MONO_ARCH_NEED_GOT_VAR
1153 if (!cfg->compile_aot)
1155 if (!cfg->got_var) {
1156 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1158 return cfg->got_var;
1165 mono_get_vtable_var (MonoCompile *cfg)
1167 g_assert (cfg->generic_sharing_context);
1169 if (!cfg->rgctx_var) {
1170 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1171 /* force the var to be stack allocated */
1172 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1175 return cfg->rgctx_var;
1179 type_from_stack_type (MonoInst *ins) {
1180 switch (ins->type) {
1181 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1182 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1183 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1184 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1186 return &ins->klass->this_arg;
1187 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1188 case STACK_VTYPE: return &ins->klass->byval_arg;
1190 g_error ("stack type %d to monotype not handled\n", ins->type);
1195 static G_GNUC_UNUSED int
1196 type_to_stack_type (MonoType *t)
1198 t = mono_type_get_underlying_type (t);
1202 case MONO_TYPE_BOOLEAN:
1205 case MONO_TYPE_CHAR:
1212 case MONO_TYPE_FNPTR:
1214 case MONO_TYPE_CLASS:
1215 case MONO_TYPE_STRING:
1216 case MONO_TYPE_OBJECT:
1217 case MONO_TYPE_SZARRAY:
1218 case MONO_TYPE_ARRAY:
1226 case MONO_TYPE_VALUETYPE:
1227 case MONO_TYPE_TYPEDBYREF:
1229 case MONO_TYPE_GENERICINST:
1230 if (mono_type_generic_inst_is_valuetype (t))
1236 g_assert_not_reached ();
1243 array_access_to_klass (int opcode)
1247 return mono_defaults.byte_class;
1249 return mono_defaults.uint16_class;
1252 return mono_defaults.int_class;
1255 return mono_defaults.sbyte_class;
1258 return mono_defaults.int16_class;
1261 return mono_defaults.int32_class;
1263 return mono_defaults.uint32_class;
1266 return mono_defaults.int64_class;
1269 return mono_defaults.single_class;
1272 return mono_defaults.double_class;
1273 case CEE_LDELEM_REF:
1274 case CEE_STELEM_REF:
1275 return mono_defaults.object_class;
1277 g_assert_not_reached ();
1283 * We try to share variables when possible
1286 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1291 /* inlining can result in deeper stacks */
1292 if (slot >= cfg->header->max_stack)
1293 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1295 pos = ins->type - 1 + slot * STACK_MAX;
1297 switch (ins->type) {
1304 if ((vnum = cfg->intvars [pos]))
1305 return cfg->varinfo [vnum];
1306 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1307 cfg->intvars [pos] = res->inst_c0;
1310 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1316 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1319 * Don't use this if a generic_context is set, since that means AOT can't
1320 * look up the method using just the image+token.
1321 * table == 0 means this is a reference made from a wrapper.
1323 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1324 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1325 jump_info_token->image = image;
1326 jump_info_token->token = token;
1327 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1332 * This function is called to handle items that are left on the evaluation stack
1333 * at basic block boundaries. What happens is that we save the values to local variables
1334 * and we reload them later when first entering the target basic block (with the
1335 * handle_loaded_temps () function).
1336 * A single joint point will use the same variables (stored in the array bb->out_stack or
1337 * bb->in_stack, if the basic block is before or after the joint point).
1339 * This function needs to be called _before_ emitting the last instruction of
1340 * the bb (i.e. before emitting a branch).
1341 * If the stack merge fails at a join point, cfg->unverifiable is set.
1344 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1347 MonoBasicBlock *bb = cfg->cbb;
1348 MonoBasicBlock *outb;
1349 MonoInst *inst, **locals;
1354 if (cfg->verbose_level > 3)
1355 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1356 if (!bb->out_scount) {
1357 bb->out_scount = count;
1358 //printf ("bblock %d has out:", bb->block_num);
1360 for (i = 0; i < bb->out_count; ++i) {
1361 outb = bb->out_bb [i];
1362 /* exception handlers are linked, but they should not be considered for stack args */
1363 if (outb->flags & BB_EXCEPTION_HANDLER)
1365 //printf (" %d", outb->block_num);
1366 if (outb->in_stack) {
1368 bb->out_stack = outb->in_stack;
1374 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1375 for (i = 0; i < count; ++i) {
1377 * try to reuse temps already allocated for this purpouse, if they occupy the same
1378 * stack slot and if they are of the same type.
1379 * This won't cause conflicts since if 'local' is used to
1380 * store one of the values in the in_stack of a bblock, then
1381 * the same variable will be used for the same outgoing stack
1383 * This doesn't work when inlining methods, since the bblocks
1384 * in the inlined methods do not inherit their in_stack from
1385 * the bblock they are inlined to. See bug #58863 for an
1388 if (cfg->inlined_method)
1389 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1391 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1396 for (i = 0; i < bb->out_count; ++i) {
1397 outb = bb->out_bb [i];
1398 /* exception handlers are linked, but they should not be considered for stack args */
1399 if (outb->flags & BB_EXCEPTION_HANDLER)
1401 if (outb->in_scount) {
1402 if (outb->in_scount != bb->out_scount) {
1403 cfg->unverifiable = TRUE;
1406 continue; /* check they are the same locals */
1408 outb->in_scount = count;
1409 outb->in_stack = bb->out_stack;
1412 locals = bb->out_stack;
1414 for (i = 0; i < count; ++i) {
1415 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1416 inst->cil_code = sp [i]->cil_code;
1417 sp [i] = locals [i];
1418 if (cfg->verbose_level > 3)
1419 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1423 * It is possible that the out bblocks already have in_stack assigned, and
1424 * the in_stacks differ. In this case, we will store to all the different
1431 /* Find a bblock which has a different in_stack */
1433 while (bindex < bb->out_count) {
1434 outb = bb->out_bb [bindex];
1435 /* exception handlers are linked, but they should not be considered for stack args */
1436 if (outb->flags & BB_EXCEPTION_HANDLER) {
1440 if (outb->in_stack != locals) {
1441 for (i = 0; i < count; ++i) {
1442 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1443 inst->cil_code = sp [i]->cil_code;
1444 sp [i] = locals [i];
1445 if (cfg->verbose_level > 3)
1446 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1448 locals = outb->in_stack;
1457 /* Emit code which loads interface_offsets [klass->interface_id]
1458 * The array is stored in memory before vtable.
1461 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1463 if (cfg->compile_aot) {
1464 int ioffset_reg = alloc_preg (cfg);
1465 int iid_reg = alloc_preg (cfg);
1467 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1468 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1472 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1477 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1479 int ibitmap_reg = alloc_preg (cfg);
1480 #ifdef COMPRESSED_INTERFACE_BITMAP
1482 MonoInst *res, *ins;
1483 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1484 MONO_ADD_INS (cfg->cbb, ins);
1486 if (cfg->compile_aot)
1487 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1489 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1490 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1491 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1493 int ibitmap_byte_reg = alloc_preg (cfg);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1497 if (cfg->compile_aot) {
1498 int iid_reg = alloc_preg (cfg);
1499 int shifted_iid_reg = alloc_preg (cfg);
1500 int ibitmap_byte_address_reg = alloc_preg (cfg);
1501 int masked_iid_reg = alloc_preg (cfg);
1502 int iid_one_bit_reg = alloc_preg (cfg);
1503 int iid_bit_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1506 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1509 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1510 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1511 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1520 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1521 * stored in "klass_reg" implements the interface "klass".
1524 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1526 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1530 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1531 * stored in "vtable_reg" implements the interface "klass".
1534 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1536 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1540 * Emit code which checks whenever the interface id of @klass is smaller than
1541 * than the value given by max_iid_reg.
1544 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1545 MonoBasicBlock *false_target)
1547 if (cfg->compile_aot) {
1548 int iid_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1557 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1560 /* Same as above, but obtains max_iid from a vtable */
1562 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1563 MonoBasicBlock *false_target)
1565 int max_iid_reg = alloc_preg (cfg);
1567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1568 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1571 /* Same as above, but obtains max_iid from a klass */
1573 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1574 MonoBasicBlock *false_target)
1576 int max_iid_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1579 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1583 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1585 int idepth_reg = alloc_preg (cfg);
1586 int stypes_reg = alloc_preg (cfg);
1587 int stype = alloc_preg (cfg);
1589 mono_class_setup_supertypes (klass);
1591 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1592 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1594 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1597 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1600 } else if (cfg->compile_aot) {
1601 int const_reg = alloc_preg (cfg);
1602 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1607 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1611 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1613 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1617 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1619 int intf_reg = alloc_preg (cfg);
1621 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1622 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1627 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1631 * Variant of the above that takes a register to the class, not the vtable.
1634 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1636 int intf_bit_reg = alloc_preg (cfg);
1638 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1639 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1644 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1648 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1652 } else if (cfg->compile_aot) {
1653 int const_reg = alloc_preg (cfg);
1654 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1655 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1659 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1663 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1665 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1669 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1671 if (cfg->compile_aot) {
1672 int const_reg = alloc_preg (cfg);
1673 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1674 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1676 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1678 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1682 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1685 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1688 int rank_reg = alloc_preg (cfg);
1689 int eclass_reg = alloc_preg (cfg);
1691 g_assert (!klass_inst);
1692 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1694 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1695 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1697 if (klass->cast_class == mono_defaults.object_class) {
1698 int parent_reg = alloc_preg (cfg);
1699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1700 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1701 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1702 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1703 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1704 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1705 } else if (klass->cast_class == mono_defaults.enum_class) {
1706 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1707 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1708 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1710 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1711 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1714 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1715 /* Check that the object is a vector too */
1716 int bounds_reg = alloc_preg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1719 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1722 int idepth_reg = alloc_preg (cfg);
1723 int stypes_reg = alloc_preg (cfg);
1724 int stype = alloc_preg (cfg);
1726 mono_class_setup_supertypes (klass);
1728 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1730 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1731 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1733 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1735 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1740 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1742 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1746 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1750 g_assert (val == 0);
1755 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1758 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1761 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1764 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1766 #if SIZEOF_REGISTER == 8
1768 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1774 val_reg = alloc_preg (cfg);
1776 if (SIZEOF_REGISTER == 8)
1777 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1779 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1782 /* This could be optimized further if neccesary */
1784 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1791 #if !NO_UNALIGNED_ACCESS
1792 if (SIZEOF_REGISTER == 8) {
1794 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1799 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1807 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1817 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1824 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1831 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1832 g_assert (size < 10000);
1835 /* This could be optimized further if neccesary */
1837 cur_reg = alloc_preg (cfg);
1838 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1839 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1846 #if !NO_UNALIGNED_ACCESS
1847 if (SIZEOF_REGISTER == 8) {
1849 cur_reg = alloc_preg (cfg);
1850 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1851 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1860 cur_reg = alloc_preg (cfg);
1861 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1868 cur_reg = alloc_preg (cfg);
1869 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1870 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1876 cur_reg = alloc_preg (cfg);
1877 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1878 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1886 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1890 if (cfg->compile_aot) {
1891 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1892 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1894 ins->sreg2 = c->dreg;
1895 MONO_ADD_INS (cfg->cbb, ins);
1897 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1899 ins->inst_offset = mini_get_tls_offset (tls_key);
1900 MONO_ADD_INS (cfg->cbb, ins);
1907 * Emit IR to push the current LMF onto the LMF stack.
1910 emit_push_lmf (MonoCompile *cfg)
1913 * Emit IR to push the LMF:
1914 * lmf_addr = <lmf_addr from tls>
1915 * lmf->lmf_addr = lmf_addr
1916 * lmf->prev_lmf = *lmf_addr
1919 int lmf_reg, prev_lmf_reg;
1920 MonoInst *ins, *lmf_ins;
1925 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1926 /* Load current lmf */
1927 lmf_ins = mono_get_lmf_intrinsic (cfg);
1929 MONO_ADD_INS (cfg->cbb, lmf_ins);
1930 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1931 lmf_reg = ins->dreg;
1932 /* Save previous_lmf */
1933 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1935 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1938 * Store lmf_addr in a variable, so it can be allocated to a global register.
1940 if (!cfg->lmf_addr_var)
1941 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1944 ins = mono_get_jit_tls_intrinsic (cfg);
1946 int jit_tls_dreg = ins->dreg;
1948 MONO_ADD_INS (cfg->cbb, ins);
1949 lmf_reg = alloc_preg (cfg);
1950 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
1952 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1955 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
1957 MONO_ADD_INS (cfg->cbb, lmf_ins);
1959 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
1961 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
1963 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1964 lmf_reg = ins->dreg;
1966 prev_lmf_reg = alloc_preg (cfg);
1967 /* Save previous_lmf */
1968 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
1969 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
1971 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
1978 * Emit IR to pop the current LMF from the LMF stack.
1981 emit_pop_lmf (MonoCompile *cfg)
1983 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
1989 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1990 lmf_reg = ins->dreg;
1992 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1993 /* Load previous_lmf */
1994 prev_lmf_reg = alloc_preg (cfg);
1995 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1997 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2000 * Emit IR to pop the LMF:
2001 * *(lmf->lmf_addr) = lmf->prev_lmf
2003 /* This could be called before emit_push_lmf () */
2004 if (!cfg->lmf_addr_var)
2005 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2006 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2008 prev_lmf_reg = alloc_preg (cfg);
2009 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2010 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2015 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2018 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2021 type = mini_get_basic_type_from_generic (gsctx, type);
2022 type = mini_replace_type (type);
2023 switch (type->type) {
2024 case MONO_TYPE_VOID:
2025 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2028 case MONO_TYPE_BOOLEAN:
2031 case MONO_TYPE_CHAR:
2034 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2038 case MONO_TYPE_FNPTR:
2039 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2040 case MONO_TYPE_CLASS:
2041 case MONO_TYPE_STRING:
2042 case MONO_TYPE_OBJECT:
2043 case MONO_TYPE_SZARRAY:
2044 case MONO_TYPE_ARRAY:
2045 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2048 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2051 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2052 case MONO_TYPE_VALUETYPE:
2053 if (type->data.klass->enumtype) {
2054 type = mono_class_enum_basetype (type->data.klass);
2057 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2058 case MONO_TYPE_TYPEDBYREF:
2059 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2060 case MONO_TYPE_GENERICINST:
2061 type = &type->data.generic_class->container_class->byval_arg;
2064 case MONO_TYPE_MVAR:
2066 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2068 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2074 * target_type_is_incompatible:
2075 * @cfg: MonoCompile context
2077 * Check that the item @arg on the evaluation stack can be stored
2078 * in the target type (can be a local, or field, etc).
2079 * The cfg arg can be used to check if we need verification or just
2082 * Returns: non-0 value if arg can't be stored on a target.
2085 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2087 MonoType *simple_type;
2090 target = mini_replace_type (target);
2091 if (target->byref) {
2092 /* FIXME: check that the pointed to types match */
2093 if (arg->type == STACK_MP)
2094 return arg->klass != mono_class_from_mono_type (target);
2095 if (arg->type == STACK_PTR)
2100 simple_type = mono_type_get_underlying_type (target);
2101 switch (simple_type->type) {
2102 case MONO_TYPE_VOID:
2106 case MONO_TYPE_BOOLEAN:
2109 case MONO_TYPE_CHAR:
2112 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2116 /* STACK_MP is needed when setting pinned locals */
2117 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2122 case MONO_TYPE_FNPTR:
2124 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2125 * in native int. (#688008).
2127 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2130 case MONO_TYPE_CLASS:
2131 case MONO_TYPE_STRING:
2132 case MONO_TYPE_OBJECT:
2133 case MONO_TYPE_SZARRAY:
2134 case MONO_TYPE_ARRAY:
2135 if (arg->type != STACK_OBJ)
2137 /* FIXME: check type compatibility */
2141 if (arg->type != STACK_I8)
2146 if (arg->type != STACK_R8)
2149 case MONO_TYPE_VALUETYPE:
2150 if (arg->type != STACK_VTYPE)
2152 klass = mono_class_from_mono_type (simple_type);
2153 if (klass != arg->klass)
2156 case MONO_TYPE_TYPEDBYREF:
2157 if (arg->type != STACK_VTYPE)
2159 klass = mono_class_from_mono_type (simple_type);
2160 if (klass != arg->klass)
2163 case MONO_TYPE_GENERICINST:
2164 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2165 if (arg->type != STACK_VTYPE)
2167 klass = mono_class_from_mono_type (simple_type);
2168 if (klass != arg->klass)
2172 if (arg->type != STACK_OBJ)
2174 /* FIXME: check type compatibility */
2178 case MONO_TYPE_MVAR:
2179 g_assert (cfg->generic_sharing_context);
2180 if (mini_type_var_is_vt (cfg, simple_type)) {
2181 if (arg->type != STACK_VTYPE)
2184 if (arg->type != STACK_OBJ)
2189 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2195 * Prepare arguments for passing to a function call.
2196 * Return a non-zero value if the arguments can't be passed to the given
2198 * The type checks are not yet complete and some conversions may need
2199 * casts on 32 or 64 bit architectures.
2201 * FIXME: implement this using target_type_is_incompatible ()
2204 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2206 MonoType *simple_type;
2210 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2214 for (i = 0; i < sig->param_count; ++i) {
2215 if (sig->params [i]->byref) {
2216 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2220 simple_type = sig->params [i];
2221 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2223 switch (simple_type->type) {
2224 case MONO_TYPE_VOID:
2229 case MONO_TYPE_BOOLEAN:
2232 case MONO_TYPE_CHAR:
2235 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2241 case MONO_TYPE_FNPTR:
2242 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2245 case MONO_TYPE_CLASS:
2246 case MONO_TYPE_STRING:
2247 case MONO_TYPE_OBJECT:
2248 case MONO_TYPE_SZARRAY:
2249 case MONO_TYPE_ARRAY:
2250 if (args [i]->type != STACK_OBJ)
2255 if (args [i]->type != STACK_I8)
2260 if (args [i]->type != STACK_R8)
2263 case MONO_TYPE_VALUETYPE:
2264 if (simple_type->data.klass->enumtype) {
2265 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2268 if (args [i]->type != STACK_VTYPE)
2271 case MONO_TYPE_TYPEDBYREF:
2272 if (args [i]->type != STACK_VTYPE)
2275 case MONO_TYPE_GENERICINST:
2276 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2279 case MONO_TYPE_MVAR:
2281 if (args [i]->type != STACK_VTYPE)
2285 g_error ("unknown type 0x%02x in check_call_signature",
2293 callvirt_to_call (int opcode)
2296 case OP_CALL_MEMBASE:
2298 case OP_VOIDCALL_MEMBASE:
2300 case OP_FCALL_MEMBASE:
2302 case OP_VCALL_MEMBASE:
2304 case OP_LCALL_MEMBASE:
2307 g_assert_not_reached ();
2313 #ifdef MONO_ARCH_HAVE_IMT
2314 /* Either METHOD or IMT_ARG needs to be set */
2316 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2320 if (COMPILE_LLVM (cfg)) {
2321 method_reg = alloc_preg (cfg);
2324 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2325 } else if (cfg->compile_aot) {
2326 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2329 MONO_INST_NEW (cfg, ins, OP_PCONST);
2330 ins->inst_p0 = method;
2331 ins->dreg = method_reg;
2332 MONO_ADD_INS (cfg->cbb, ins);
2336 call->imt_arg_reg = method_reg;
2338 #ifdef MONO_ARCH_IMT_REG
2339 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2341 /* Need this to keep the IMT arg alive */
2342 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2347 #ifdef MONO_ARCH_IMT_REG
2348 method_reg = alloc_preg (cfg);
2351 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2352 } else if (cfg->compile_aot) {
2353 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2356 MONO_INST_NEW (cfg, ins, OP_PCONST);
2357 ins->inst_p0 = method;
2358 ins->dreg = method_reg;
2359 MONO_ADD_INS (cfg->cbb, ins);
2362 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2364 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2369 static MonoJumpInfo *
2370 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2372 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2376 ji->data.target = target;
2382 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2384 if (cfg->generic_sharing_context)
2385 return mono_class_check_context_used (klass);
2391 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2393 if (cfg->generic_sharing_context)
2394 return mono_method_check_context_used (method);
2400 * check_method_sharing:
2402 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2405 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2407 gboolean pass_vtable = FALSE;
2408 gboolean pass_mrgctx = FALSE;
2410 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2411 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2412 gboolean sharable = FALSE;
2414 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2417 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2418 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2419 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2421 sharable = sharing_enabled && context_sharable;
2425 * Pass vtable iff target method might
2426 * be shared, which means that sharing
2427 * is enabled for its class and its
2428 * context is sharable (and it's not a
2431 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2435 if (mini_method_get_context (cmethod) &&
2436 mini_method_get_context (cmethod)->method_inst) {
2437 g_assert (!pass_vtable);
2439 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2442 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2443 MonoGenericContext *context = mini_method_get_context (cmethod);
2444 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2446 if (sharing_enabled && context_sharable)
2448 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2453 if (out_pass_vtable)
2454 *out_pass_vtable = pass_vtable;
2455 if (out_pass_mrgctx)
2456 *out_pass_mrgctx = pass_mrgctx;
2459 inline static MonoCallInst *
2460 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2461 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2465 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2470 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2472 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2475 call->signature = sig;
2476 call->rgctx_reg = rgctx;
2477 sig_ret = mini_replace_type (sig->ret);
2479 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2482 if (mini_type_is_vtype (cfg, sig_ret)) {
2483 call->vret_var = cfg->vret_addr;
2484 //g_assert_not_reached ();
2486 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2487 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2490 temp->backend.is_pinvoke = sig->pinvoke;
2493 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2494 * address of return value to increase optimization opportunities.
2495 * Before vtype decomposition, the dreg of the call ins itself represents the
2496 * fact the call modifies the return value. After decomposition, the call will
2497 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2498 * will be transformed into an LDADDR.
2500 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2501 loada->dreg = alloc_preg (cfg);
2502 loada->inst_p0 = temp;
2503 /* We reference the call too since call->dreg could change during optimization */
2504 loada->inst_p1 = call;
2505 MONO_ADD_INS (cfg->cbb, loada);
2507 call->inst.dreg = temp->dreg;
2509 call->vret_var = loada;
2510 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2511 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2513 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2514 if (COMPILE_SOFT_FLOAT (cfg)) {
2516 * If the call has a float argument, we would need to do an r8->r4 conversion using
2517 * an icall, but that cannot be done during the call sequence since it would clobber
2518 * the call registers + the stack. So we do it before emitting the call.
2520 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2522 MonoInst *in = call->args [i];
2524 if (i >= sig->hasthis)
2525 t = sig->params [i - sig->hasthis];
2527 t = &mono_defaults.int_class->byval_arg;
2528 t = mono_type_get_underlying_type (t);
2530 if (!t->byref && t->type == MONO_TYPE_R4) {
2531 MonoInst *iargs [1];
2535 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2537 /* The result will be in an int vreg */
2538 call->args [i] = conv;
2544 call->need_unbox_trampoline = unbox_trampoline;
2547 if (COMPILE_LLVM (cfg))
2548 mono_llvm_emit_call (cfg, call);
2550 mono_arch_emit_call (cfg, call);
2552 mono_arch_emit_call (cfg, call);
2555 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2556 cfg->flags |= MONO_CFG_HAS_CALLS;
2562 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2564 #ifdef MONO_ARCH_RGCTX_REG
2565 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2566 cfg->uses_rgctx_reg = TRUE;
2567 call->rgctx_reg = TRUE;
2569 call->rgctx_arg_reg = rgctx_reg;
2576 inline static MonoInst*
2577 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2582 gboolean check_sp = FALSE;
2584 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2585 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2587 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2592 rgctx_reg = mono_alloc_preg (cfg);
2593 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2597 if (!cfg->stack_inbalance_var)
2598 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2600 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2601 ins->dreg = cfg->stack_inbalance_var->dreg;
2602 MONO_ADD_INS (cfg->cbb, ins);
2605 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2607 call->inst.sreg1 = addr->dreg;
2610 emit_imt_argument (cfg, call, NULL, imt_arg);
2612 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2617 sp_reg = mono_alloc_preg (cfg);
2619 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2621 MONO_ADD_INS (cfg->cbb, ins);
2623 /* Restore the stack so we don't crash when throwing the exception */
2624 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2625 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2626 MONO_ADD_INS (cfg->cbb, ins);
2628 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2629 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2633 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2635 return (MonoInst*)call;
2639 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2642 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2644 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2647 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2648 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2650 #ifndef DISABLE_REMOTING
2651 gboolean might_be_remote = FALSE;
2653 gboolean virtual = this != NULL;
2654 gboolean enable_for_aot = TRUE;
2658 gboolean need_unbox_trampoline;
2661 sig = mono_method_signature (method);
2664 rgctx_reg = mono_alloc_preg (cfg);
2665 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2668 if (method->string_ctor) {
2669 /* Create the real signature */
2670 /* FIXME: Cache these */
2671 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2672 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2677 context_used = mini_method_check_context_used (cfg, method);
2679 #ifndef DISABLE_REMOTING
2680 might_be_remote = this && sig->hasthis &&
2681 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2682 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2684 if (might_be_remote && context_used) {
2687 g_assert (cfg->generic_sharing_context);
2689 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2691 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2695 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2697 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2699 #ifndef DISABLE_REMOTING
2700 if (might_be_remote)
2701 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2704 call->method = method;
2705 call->inst.flags |= MONO_INST_HAS_METHOD;
2706 call->inst.inst_left = this;
2707 call->tail_call = tail;
2710 int vtable_reg, slot_reg, this_reg;
2713 this_reg = this->dreg;
2715 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2716 MonoInst *dummy_use;
2718 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2720 /* Make a call to delegate->invoke_impl */
2721 call->inst.inst_basereg = this_reg;
2722 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2723 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2725 /* We must emit a dummy use here because the delegate trampoline will
2726 replace the 'this' argument with the delegate target making this activation
2727 no longer a root for the delegate.
2728 This is an issue for delegates that target collectible code such as dynamic
2729 methods of GC'able assemblies.
2731 For a test case look into #667921.
2733 FIXME: a dummy use is not the best way to do it as the local register allocator
2734 will put it on a caller save register and spil it around the call.
2735 Ideally, we would either put it on a callee save register or only do the store part.
2737 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2739 return (MonoInst*)call;
2742 if ((!cfg->compile_aot || enable_for_aot) &&
2743 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2744 (MONO_METHOD_IS_FINAL (method) &&
2745 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2746 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2748 * the method is not virtual, we just need to ensure this is not null
2749 * and then we can call the method directly.
2751 #ifndef DISABLE_REMOTING
2752 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2754 * The check above ensures method is not gshared, this is needed since
2755 * gshared methods can't have wrappers.
2757 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2761 if (!method->string_ctor)
2762 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2764 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2765 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2767 * the method is virtual, but we can statically dispatch since either
2768 * it's class or the method itself are sealed.
2769 * But first we need to ensure it's not a null reference.
2771 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2773 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2775 vtable_reg = alloc_preg (cfg);
2776 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2777 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2779 #ifdef MONO_ARCH_HAVE_IMT
2781 guint32 imt_slot = mono_method_get_imt_slot (method);
2782 emit_imt_argument (cfg, call, call->method, imt_arg);
2783 slot_reg = vtable_reg;
2784 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2787 if (slot_reg == -1) {
2788 slot_reg = alloc_preg (cfg);
2789 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2790 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2793 slot_reg = vtable_reg;
2794 offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2795 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2796 #ifdef MONO_ARCH_HAVE_IMT
2798 g_assert (mono_method_signature (method)->generic_param_count);
2799 emit_imt_argument (cfg, call, call->method, imt_arg);
2804 call->inst.sreg1 = slot_reg;
2805 call->inst.inst_offset = offset;
2806 call->virtual = TRUE;
2810 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2813 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2815 return (MonoInst*)call;
2819 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2821 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2825 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2832 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2835 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2837 return (MonoInst*)call;
2841 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2843 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2847 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2851 * mono_emit_abs_call:
2853 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2855 inline static MonoInst*
2856 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2857 MonoMethodSignature *sig, MonoInst **args)
2859 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2863 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2866 if (cfg->abs_patches == NULL)
2867 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2868 g_hash_table_insert (cfg->abs_patches, ji, ji);
2869 ins = mono_emit_native_call (cfg, ji, sig, args);
2870 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2875 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2877 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2878 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2882 * Native code might return non register sized integers
2883 * without initializing the upper bits.
2885 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2886 case OP_LOADI1_MEMBASE:
2887 widen_op = OP_ICONV_TO_I1;
2889 case OP_LOADU1_MEMBASE:
2890 widen_op = OP_ICONV_TO_U1;
2892 case OP_LOADI2_MEMBASE:
2893 widen_op = OP_ICONV_TO_I2;
2895 case OP_LOADU2_MEMBASE:
2896 widen_op = OP_ICONV_TO_U2;
2902 if (widen_op != -1) {
2903 int dreg = alloc_preg (cfg);
2906 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2907 widen->type = ins->type;
2917 get_memcpy_method (void)
2919 static MonoMethod *memcpy_method = NULL;
2920 if (!memcpy_method) {
2921 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2923 g_error ("Old corlib found. Install a new one");
2925 return memcpy_method;
2929 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2931 MonoClassField *field;
2932 gpointer iter = NULL;
2934 while ((field = mono_class_get_fields (klass, &iter))) {
2937 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2939 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2940 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2941 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2942 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2944 MonoClass *field_class = mono_class_from_mono_type (field->type);
2945 if (field_class->has_references)
2946 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2952 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
2954 int card_table_shift_bits;
2955 gpointer card_table_mask;
2957 MonoInst *dummy_use;
2958 int nursery_shift_bits;
2959 size_t nursery_size;
2960 gboolean has_card_table_wb = FALSE;
2962 if (!cfg->gen_write_barriers)
2965 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2967 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2969 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2970 has_card_table_wb = TRUE;
2973 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
2976 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2977 wbarrier->sreg1 = ptr->dreg;
2978 wbarrier->sreg2 = value->dreg;
2979 MONO_ADD_INS (cfg->cbb, wbarrier);
2980 } else if (card_table) {
2981 int offset_reg = alloc_preg (cfg);
2982 int card_reg = alloc_preg (cfg);
2985 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2986 if (card_table_mask)
2987 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2989 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2990 * IMM's larger than 32bits.
2992 if (cfg->compile_aot) {
2993 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2995 MONO_INST_NEW (cfg, ins, OP_PCONST);
2996 ins->inst_p0 = card_table;
2997 ins->dreg = card_reg;
2998 MONO_ADD_INS (cfg->cbb, ins);
3001 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3002 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3004 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3005 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3008 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3012 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3014 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3015 unsigned need_wb = 0;
3020 /*types with references can't have alignment smaller than sizeof(void*) */
3021 if (align < SIZEOF_VOID_P)
3024 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3025 if (size > 32 * SIZEOF_VOID_P)
3028 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3030 /* We don't unroll more than 5 stores to avoid code bloat. */
3031 if (size > 5 * SIZEOF_VOID_P) {
3032 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3033 size += (SIZEOF_VOID_P - 1);
3034 size &= ~(SIZEOF_VOID_P - 1);
3036 EMIT_NEW_ICONST (cfg, iargs [2], size);
3037 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3038 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3042 destreg = iargs [0]->dreg;
3043 srcreg = iargs [1]->dreg;
3046 dest_ptr_reg = alloc_preg (cfg);
3047 tmp_reg = alloc_preg (cfg);
3050 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3052 while (size >= SIZEOF_VOID_P) {
3053 MonoInst *load_inst;
3054 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3055 load_inst->dreg = tmp_reg;
3056 load_inst->inst_basereg = srcreg;
3057 load_inst->inst_offset = offset;
3058 MONO_ADD_INS (cfg->cbb, load_inst);
3060 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3063 emit_write_barrier (cfg, iargs [0], load_inst);
3065 offset += SIZEOF_VOID_P;
3066 size -= SIZEOF_VOID_P;
3069 /*tmp += sizeof (void*)*/
3070 if (size >= SIZEOF_VOID_P) {
3071 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3072 MONO_ADD_INS (cfg->cbb, iargs [0]);
3076 /* Those cannot be references since size < sizeof (void*) */
3078 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3079 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3085 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3086 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3092 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3093 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3102 * Emit code to copy a valuetype of type @klass whose address is stored in
3103 * @src->dreg to memory whose address is stored at @dest->dreg.
3106 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3108 MonoInst *iargs [4];
3109 int context_used, n;
3111 MonoMethod *memcpy_method;
3112 MonoInst *size_ins = NULL;
3113 MonoInst *memcpy_ins = NULL;
3117 * This check breaks with spilled vars... need to handle it during verification anyway.
3118 * g_assert (klass && klass == src->klass && klass == dest->klass);
3121 if (mini_is_gsharedvt_klass (cfg, klass)) {
3123 context_used = mini_class_check_context_used (cfg, klass);
3124 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3125 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3129 n = mono_class_native_size (klass, &align);
3131 n = mono_class_value_size (klass, &align);
3133 /* if native is true there should be no references in the struct */
3134 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3135 /* Avoid barriers when storing to the stack */
3136 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3137 (dest->opcode == OP_LDADDR))) {
3143 context_used = mini_class_check_context_used (cfg, klass);
3145 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3146 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3148 } else if (context_used) {
3149 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3151 if (cfg->compile_aot) {
3152 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3154 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3155 mono_class_compute_gc_descriptor (klass);
3160 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3162 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3167 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3168 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3169 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3174 iargs [2] = size_ins;
3176 EMIT_NEW_ICONST (cfg, iargs [2], n);
3178 memcpy_method = get_memcpy_method ();
3180 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3182 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3187 get_memset_method (void)
3189 static MonoMethod *memset_method = NULL;
3190 if (!memset_method) {
3191 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3193 g_error ("Old corlib found. Install a new one");
3195 return memset_method;
3199 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3201 MonoInst *iargs [3];
3202 int n, context_used;
3204 MonoMethod *memset_method;
3205 MonoInst *size_ins = NULL;
3206 MonoInst *bzero_ins = NULL;
3207 static MonoMethod *bzero_method;
3209 /* FIXME: Optimize this for the case when dest is an LDADDR */
3211 mono_class_init (klass);
3212 if (mini_is_gsharedvt_klass (cfg, klass)) {
3213 context_used = mini_class_check_context_used (cfg, klass);
3214 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3215 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3217 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3218 g_assert (bzero_method);
3220 iargs [1] = size_ins;
3221 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3225 n = mono_class_value_size (klass, &align);
3227 if (n <= sizeof (gpointer) * 5) {
3228 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3231 memset_method = get_memset_method ();
3233 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3234 EMIT_NEW_ICONST (cfg, iargs [2], n);
3235 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3240 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3242 MonoInst *this = NULL;
3244 g_assert (cfg->generic_sharing_context);
3246 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3247 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3248 !method->klass->valuetype)
3249 EMIT_NEW_ARGLOAD (cfg, this, 0);
3251 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3252 MonoInst *mrgctx_loc, *mrgctx_var;
3255 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3257 mrgctx_loc = mono_get_vtable_var (cfg);
3258 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3261 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3262 MonoInst *vtable_loc, *vtable_var;
3266 vtable_loc = mono_get_vtable_var (cfg);
3267 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3269 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3270 MonoInst *mrgctx_var = vtable_var;
3273 vtable_reg = alloc_preg (cfg);
3274 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3275 vtable_var->type = STACK_PTR;
3283 vtable_reg = alloc_preg (cfg);
3284 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3289 static MonoJumpInfoRgctxEntry *
3290 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3292 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3293 res->method = method;
3294 res->in_mrgctx = in_mrgctx;
3295 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3296 res->data->type = patch_type;
3297 res->data->data.target = patch_data;
3298 res->info_type = info_type;
3303 static inline MonoInst*
3304 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3306 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3310 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3311 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3313 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3314 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3316 return emit_rgctx_fetch (cfg, rgctx, entry);
3320 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3321 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3323 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3324 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3326 return emit_rgctx_fetch (cfg, rgctx, entry);
3330 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3331 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3333 MonoJumpInfoGSharedVtCall *call_info;
3334 MonoJumpInfoRgctxEntry *entry;
3337 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3338 call_info->sig = sig;
3339 call_info->method = cmethod;
3341 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3342 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3344 return emit_rgctx_fetch (cfg, rgctx, entry);
3349 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3350 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3352 MonoJumpInfoRgctxEntry *entry;
3355 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3356 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3358 return emit_rgctx_fetch (cfg, rgctx, entry);
3362 * emit_get_rgctx_method:
3364 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3365 * normal constants, else emit a load from the rgctx.
3368 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3369 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3371 if (!context_used) {
3374 switch (rgctx_type) {
3375 case MONO_RGCTX_INFO_METHOD:
3376 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3378 case MONO_RGCTX_INFO_METHOD_RGCTX:
3379 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3382 g_assert_not_reached ();
3385 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3386 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3388 return emit_rgctx_fetch (cfg, rgctx, entry);
3393 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3394 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3396 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3397 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3399 return emit_rgctx_fetch (cfg, rgctx, entry);
3403 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3405 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3406 MonoRuntimeGenericContextInfoTemplate *template;
3411 for (i = 0; i < info->num_entries; ++i) {
3412 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3414 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3418 if (info->num_entries == info->count_entries) {
3419 MonoRuntimeGenericContextInfoTemplate *new_entries;
3420 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3422 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3424 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3425 info->entries = new_entries;
3426 info->count_entries = new_count_entries;
3429 idx = info->num_entries;
3430 template = &info->entries [idx];
3431 template->info_type = rgctx_type;
3432 template->data = data;
3434 info->num_entries ++;
3440 * emit_get_gsharedvt_info:
3442 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3445 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3450 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3451 /* Load info->entries [idx] */
3452 dreg = alloc_preg (cfg);
3453 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3459 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3461 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3465 * On return the caller must check @klass for load errors.
3468 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3470 MonoInst *vtable_arg;
3474 context_used = mini_class_check_context_used (cfg, klass);
3477 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3478 klass, MONO_RGCTX_INFO_VTABLE);
3480 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3484 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3487 if (COMPILE_LLVM (cfg))
3488 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3490 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3491 #ifdef MONO_ARCH_VTABLE_REG
3492 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3493 cfg->uses_vtable_reg = TRUE;
3500 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3504 if (cfg->gen_seq_points && cfg->method == method) {
3505 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3507 ins->flags |= MONO_INST_NONEMPTY_STACK;
3508 MONO_ADD_INS (cfg->cbb, ins);
3513 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3515 if (mini_get_debug_options ()->better_cast_details) {
3516 int to_klass_reg = alloc_preg (cfg);
3517 int vtable_reg = alloc_preg (cfg);
3518 int klass_reg = alloc_preg (cfg);
3519 MonoBasicBlock *is_null_bb = NULL;
3523 NEW_BBLOCK (cfg, is_null_bb);
3525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3526 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3529 tls_get = mono_get_jit_tls_intrinsic (cfg);
3531 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3535 MONO_ADD_INS (cfg->cbb, tls_get);
3536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3537 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3539 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3540 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3541 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3544 MONO_START_BB (cfg, is_null_bb);
3546 *out_bblock = cfg->cbb;
3552 reset_cast_details (MonoCompile *cfg)
3554 /* Reset the variables holding the cast details */
3555 if (mini_get_debug_options ()->better_cast_details) {
3556 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3558 MONO_ADD_INS (cfg->cbb, tls_get);
3559 /* It is enough to reset the from field */
3560 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3565 * On return the caller must check @array_class for load errors
3568 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3570 int vtable_reg = alloc_preg (cfg);
3573 context_used = mini_class_check_context_used (cfg, array_class);
3575 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3577 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3579 if (cfg->opt & MONO_OPT_SHARED) {
3580 int class_reg = alloc_preg (cfg);
3581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3582 if (cfg->compile_aot) {
3583 int klass_reg = alloc_preg (cfg);
3584 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3585 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3589 } else if (context_used) {
3590 MonoInst *vtable_ins;
3592 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3593 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3595 if (cfg->compile_aot) {
3599 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3601 vt_reg = alloc_preg (cfg);
3602 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3603 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3606 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3612 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3614 reset_cast_details (cfg);
3618 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3619 * generic code is generated.
3622 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3624 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3627 MonoInst *rgctx, *addr;
3629 /* FIXME: What if the class is shared? We might not
3630 have to get the address of the method from the
3632 addr = emit_get_rgctx_method (cfg, context_used, method,
3633 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3635 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3637 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3639 gboolean pass_vtable, pass_mrgctx;
3640 MonoInst *rgctx_arg = NULL;
3642 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3643 g_assert (!pass_mrgctx);
3646 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3649 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3652 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3657 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3661 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3662 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3663 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3664 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3666 obj_reg = sp [0]->dreg;
3667 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3668 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3670 /* FIXME: generics */
3671 g_assert (klass->rank == 0);
3674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3675 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3677 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3678 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3681 MonoInst *element_class;
3683 /* This assertion is from the unboxcast insn */
3684 g_assert (klass->rank == 0);
3686 element_class = emit_get_rgctx_klass (cfg, context_used,
3687 klass->element_class, MONO_RGCTX_INFO_KLASS);
3689 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3690 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3692 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3693 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3694 reset_cast_details (cfg);
3697 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3698 MONO_ADD_INS (cfg->cbb, add);
3699 add->type = STACK_MP;
3706 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3708 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3709 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3713 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3719 args [1] = klass_inst;
3722 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3724 NEW_BBLOCK (cfg, is_ref_bb);
3725 NEW_BBLOCK (cfg, is_nullable_bb);
3726 NEW_BBLOCK (cfg, end_bb);
3727 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3731 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3732 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3734 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3735 addr_reg = alloc_dreg (cfg, STACK_MP);
3739 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3740 MONO_ADD_INS (cfg->cbb, addr);
3742 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3745 MONO_START_BB (cfg, is_ref_bb);
3747 /* Save the ref to a temporary */
3748 dreg = alloc_ireg (cfg);
3749 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3750 addr->dreg = addr_reg;
3751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3752 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3755 MONO_START_BB (cfg, is_nullable_bb);
3758 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3759 MonoInst *unbox_call;
3760 MonoMethodSignature *unbox_sig;
3763 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3765 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3766 unbox_sig->ret = &klass->byval_arg;
3767 unbox_sig->param_count = 1;
3768 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3769 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3771 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3772 addr->dreg = addr_reg;
3775 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3778 MONO_START_BB (cfg, end_bb);
3781 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3783 *out_cbb = cfg->cbb;
3789 * Returns NULL and set the cfg exception on error.
3792 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3794 MonoInst *iargs [2];
3800 MonoInst *iargs [2];
3802 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3804 if (cfg->opt & MONO_OPT_SHARED)
3805 rgctx_info = MONO_RGCTX_INFO_KLASS;
3807 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3808 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3810 if (cfg->opt & MONO_OPT_SHARED) {
3811 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3813 alloc_ftn = mono_object_new;
3816 alloc_ftn = mono_object_new_specific;
3819 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3820 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3822 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3825 if (cfg->opt & MONO_OPT_SHARED) {
3826 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3827 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3829 alloc_ftn = mono_object_new;
3830 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3831 /* This happens often in argument checking code, eg. throw new FooException... */
3832 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3833 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3834 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3836 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3837 MonoMethod *managed_alloc = NULL;
3841 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3842 cfg->exception_ptr = klass;
3846 #ifndef MONO_CROSS_COMPILE
3847 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3850 if (managed_alloc) {
3851 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3852 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3854 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3856 guint32 lw = vtable->klass->instance_size;
3857 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3858 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3859 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3862 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3866 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3870 * Returns NULL and set the cfg exception on error.
3873 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3875 MonoInst *alloc, *ins;
3877 *out_cbb = cfg->cbb;
3879 if (mono_class_is_nullable (klass)) {
3880 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3883 /* FIXME: What if the class is shared? We might not
3884 have to get the method address from the RGCTX. */
3885 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3886 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3887 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3889 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3891 gboolean pass_vtable, pass_mrgctx;
3892 MonoInst *rgctx_arg = NULL;
3894 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3895 g_assert (!pass_mrgctx);
3898 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3901 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3904 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3908 if (mini_is_gsharedvt_klass (cfg, klass)) {
3909 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3910 MonoInst *res, *is_ref, *src_var, *addr;
3913 dreg = alloc_ireg (cfg);
3915 NEW_BBLOCK (cfg, is_ref_bb);
3916 NEW_BBLOCK (cfg, is_nullable_bb);
3917 NEW_BBLOCK (cfg, end_bb);
3918 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3920 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3926 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3929 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3930 ins->opcode = OP_STOREV_MEMBASE;
3932 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
3933 res->type = STACK_OBJ;
3935 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3938 MONO_START_BB (cfg, is_ref_bb);
3939 addr_reg = alloc_ireg (cfg);
3941 /* val is a vtype, so has to load the value manually */
3942 src_var = get_vreg_to_inst (cfg, val->dreg);
3944 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
3945 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
3946 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
3947 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3950 MONO_START_BB (cfg, is_nullable_bb);
3953 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
3954 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
3956 MonoMethodSignature *box_sig;
3959 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
3960 * construct that method at JIT time, so have to do things by hand.
3962 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3963 box_sig->ret = &mono_defaults.object_class->byval_arg;
3964 box_sig->param_count = 1;
3965 box_sig->params [0] = &klass->byval_arg;
3966 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
3967 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
3968 res->type = STACK_OBJ;
3972 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3974 MONO_START_BB (cfg, end_bb);
3976 *out_cbb = cfg->cbb;
3980 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3984 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3991 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3994 MonoGenericContainer *container;
3995 MonoGenericInst *ginst;
3997 if (klass->generic_class) {
3998 container = klass->generic_class->container_class->generic_container;
3999 ginst = klass->generic_class->context.class_inst;
4000 } else if (klass->generic_container && context_used) {
4001 container = klass->generic_container;
4002 ginst = container->context.class_inst;
4007 for (i = 0; i < container->type_argc; ++i) {
4009 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4011 type = ginst->type_argv [i];
4012 if (mini_type_is_reference (cfg, type))
4018 // FIXME: This doesn't work yet (class libs tests fail?)
4019 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4022 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4024 MonoMethod *mono_castclass;
4027 mono_castclass = mono_marshal_get_castclass_with_cache ();
4029 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4030 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4031 reset_cast_details (cfg);
4037 * Returns NULL and set the cfg exception on error.
4040 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4042 MonoBasicBlock *is_null_bb;
4043 int obj_reg = src->dreg;
4044 int vtable_reg = alloc_preg (cfg);
4045 MonoInst *klass_inst = NULL;
4050 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4051 MonoInst *cache_ins;
4053 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4058 /* klass - it's the second element of the cache entry*/
4059 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4062 args [2] = cache_ins;
4064 return emit_castclass_with_cache (cfg, klass, args, NULL);
4067 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4070 NEW_BBLOCK (cfg, is_null_bb);
4072 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4073 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4075 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4077 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4078 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4079 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4081 int klass_reg = alloc_preg (cfg);
4083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4085 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4086 /* the remoting code is broken, access the class for now */
4087 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4088 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4090 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4091 cfg->exception_ptr = klass;
4094 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4099 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4102 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4106 MONO_START_BB (cfg, is_null_bb);
4108 reset_cast_details (cfg);
4114 * Returns NULL and set the cfg exception on error.
4117 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4120 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4121 int obj_reg = src->dreg;
4122 int vtable_reg = alloc_preg (cfg);
4123 int res_reg = alloc_ireg_ref (cfg);
4124 MonoInst *klass_inst = NULL;
4129 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4130 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4131 MonoInst *cache_ins;
4133 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4138 /* klass - it's the second element of the cache entry*/
4139 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4142 args [2] = cache_ins;
4144 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4147 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4150 NEW_BBLOCK (cfg, is_null_bb);
4151 NEW_BBLOCK (cfg, false_bb);
4152 NEW_BBLOCK (cfg, end_bb);
4154 /* Do the assignment at the beginning, so the other assignment can be if converted */
4155 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4156 ins->type = STACK_OBJ;
4159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4162 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4164 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4165 g_assert (!context_used);
4166 /* the is_null_bb target simply copies the input register to the output */
4167 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4169 int klass_reg = alloc_preg (cfg);
4172 int rank_reg = alloc_preg (cfg);
4173 int eclass_reg = alloc_preg (cfg);
4175 g_assert (!context_used);
4176 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4177 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4179 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4180 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
4181 if (klass->cast_class == mono_defaults.object_class) {
4182 int parent_reg = alloc_preg (cfg);
4183 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
4184 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4185 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4186 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4187 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4188 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4189 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4191 } else if (klass->cast_class == mono_defaults.enum_class) {
4192 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4193 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4194 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4195 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4197 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4198 /* Check that the object is a vector too */
4199 int bounds_reg = alloc_preg (cfg);
4200 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
4201 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4202 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4205 /* the is_null_bb target simply copies the input register to the output */
4206 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4208 } else if (mono_class_is_nullable (klass)) {
4209 g_assert (!context_used);
4210 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4211 /* the is_null_bb target simply copies the input register to the output */
4212 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4214 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4215 g_assert (!context_used);
4216 /* the remoting code is broken, access the class for now */
4217 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4218 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4220 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4221 cfg->exception_ptr = klass;
4224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4226 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4227 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4229 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4230 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4232 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4233 /* the is_null_bb target simply copies the input register to the output */
4234 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4239 MONO_START_BB (cfg, false_bb);
4241 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4242 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4244 MONO_START_BB (cfg, is_null_bb);
4246 MONO_START_BB (cfg, end_bb);
4252 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4254 /* This opcode takes as input an object reference and a class, and returns:
4255 0) if the object is an instance of the class,
4256 1) if the object is not instance of the class,
4257 2) if the object is a proxy whose type cannot be determined */
4260 #ifndef DISABLE_REMOTING
4261 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4263 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4265 int obj_reg = src->dreg;
4266 int dreg = alloc_ireg (cfg);
4268 #ifndef DISABLE_REMOTING
4269 int klass_reg = alloc_preg (cfg);
4272 NEW_BBLOCK (cfg, true_bb);
4273 NEW_BBLOCK (cfg, false_bb);
4274 NEW_BBLOCK (cfg, end_bb);
4275 #ifndef DISABLE_REMOTING
4276 NEW_BBLOCK (cfg, false2_bb);
4277 NEW_BBLOCK (cfg, no_proxy_bb);
4280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4283 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4284 #ifndef DISABLE_REMOTING
4285 NEW_BBLOCK (cfg, interface_fail_bb);
4288 tmp_reg = alloc_preg (cfg);
4289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4290 #ifndef DISABLE_REMOTING
4291 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4292 MONO_START_BB (cfg, interface_fail_bb);
4293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4295 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4297 tmp_reg = alloc_preg (cfg);
4298 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4300 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4302 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4305 #ifndef DISABLE_REMOTING
4306 tmp_reg = alloc_preg (cfg);
4307 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4308 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4310 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4311 tmp_reg = alloc_preg (cfg);
4312 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4313 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4315 tmp_reg = alloc_preg (cfg);
4316 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4320 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4321 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4323 MONO_START_BB (cfg, no_proxy_bb);
4325 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4327 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4331 MONO_START_BB (cfg, false_bb);
4333 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4336 #ifndef DISABLE_REMOTING
4337 MONO_START_BB (cfg, false2_bb);
4339 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4340 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4343 MONO_START_BB (cfg, true_bb);
4345 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4347 MONO_START_BB (cfg, end_bb);
4350 MONO_INST_NEW (cfg, ins, OP_ICONST);
4352 ins->type = STACK_I4;
4358 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4360 /* This opcode takes as input an object reference and a class, and returns:
4361 0) if the object is an instance of the class,
4362 1) if the object is a proxy whose type cannot be determined
4363 an InvalidCastException exception is thrown otherwhise*/
4366 #ifndef DISABLE_REMOTING
4367 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4369 MonoBasicBlock *ok_result_bb;
4371 int obj_reg = src->dreg;
4372 int dreg = alloc_ireg (cfg);
4373 int tmp_reg = alloc_preg (cfg);
4375 #ifndef DISABLE_REMOTING
4376 int klass_reg = alloc_preg (cfg);
4377 NEW_BBLOCK (cfg, end_bb);
4380 NEW_BBLOCK (cfg, ok_result_bb);
4382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4385 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4387 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4388 #ifndef DISABLE_REMOTING
4389 NEW_BBLOCK (cfg, interface_fail_bb);
4391 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4392 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4393 MONO_START_BB (cfg, interface_fail_bb);
4394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4396 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4398 tmp_reg = alloc_preg (cfg);
4399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4401 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4403 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4404 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4406 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4407 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4408 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4411 #ifndef DISABLE_REMOTING
4412 NEW_BBLOCK (cfg, no_proxy_bb);
4414 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
4415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
4416 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4418 tmp_reg = alloc_preg (cfg);
4419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4422 tmp_reg = alloc_preg (cfg);
4423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4425 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4427 NEW_BBLOCK (cfg, fail_1_bb);
4429 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4431 MONO_START_BB (cfg, fail_1_bb);
4433 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4434 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4436 MONO_START_BB (cfg, no_proxy_bb);
4438 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4440 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4444 MONO_START_BB (cfg, ok_result_bb);
4446 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4448 #ifndef DISABLE_REMOTING
4449 MONO_START_BB (cfg, end_bb);
4453 MONO_INST_NEW (cfg, ins, OP_ICONST);
4455 ins->type = STACK_I4;
4461 * Returns NULL and set the cfg exception on error.
4463 static G_GNUC_UNUSED MonoInst*
4464 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
4468 gpointer *trampoline;
4469 MonoInst *obj, *method_ins, *tramp_ins;
4473 obj = handle_alloc (cfg, klass, FALSE, 0);
4477 /* Inline the contents of mono_delegate_ctor */
4479 /* Set target field */
4480 /* Optimize away setting of NULL target */
4481 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4482 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4483 if (cfg->gen_write_barriers) {
4484 dreg = alloc_preg (cfg);
4485 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
4486 emit_write_barrier (cfg, ptr, target);
4490 /* Set method field */
4491 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4492 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4493 if (cfg->gen_write_barriers) {
4494 dreg = alloc_preg (cfg);
4495 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
4496 emit_write_barrier (cfg, ptr, method_ins);
4499 * To avoid looking up the compiled code belonging to the target method
4500 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4501 * store it, and we fill it after the method has been compiled.
4503 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4504 MonoInst *code_slot_ins;
4507 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4509 domain = mono_domain_get ();
4510 mono_domain_lock (domain);
4511 if (!domain_jit_info (domain)->method_code_hash)
4512 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4513 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4515 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4516 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4518 mono_domain_unlock (domain);
4520 if (cfg->compile_aot)
4521 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4523 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4525 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4528 /* Set invoke_impl field */
4529 if (cfg->compile_aot) {
4530 MonoClassMethodPair *del_tramp;
4532 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoClassMethodPair));
4533 del_tramp->klass = klass;
4534 del_tramp->method = context_used ? NULL : method;
4535 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4537 trampoline = mono_create_delegate_trampoline_with_method (cfg->domain, klass, context_used ? NULL : method);
4538 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4540 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4542 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4548 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4550 MonoJitICallInfo *info;
4552 /* Need to register the icall so it gets an icall wrapper */
4553 info = mono_get_array_new_va_icall (rank);
4555 cfg->flags |= MONO_CFG_HAS_VARARGS;
4557 /* mono_array_new_va () needs a vararg calling convention */
4558 cfg->disable_llvm = TRUE;
4560 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4561 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4565 mono_emit_load_got_addr (MonoCompile *cfg)
4567 MonoInst *getaddr, *dummy_use;
4569 if (!cfg->got_var || cfg->got_var_allocated)
4572 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4573 getaddr->cil_code = cfg->header->code;
4574 getaddr->dreg = cfg->got_var->dreg;
4576 /* Add it to the start of the first bblock */
4577 if (cfg->bb_entry->code) {
4578 getaddr->next = cfg->bb_entry->code;
4579 cfg->bb_entry->code = getaddr;
4582 MONO_ADD_INS (cfg->bb_entry, getaddr);
4584 cfg->got_var_allocated = TRUE;
4587 * Add a dummy use to keep the got_var alive, since real uses might
4588 * only be generated by the back ends.
4589 * Add it to end_bblock, so the variable's lifetime covers the whole
4591 * It would be better to make the usage of the got var explicit in all
4592 * cases when the backend needs it (i.e. calls, throw etc.), so this
4593 * wouldn't be needed.
4595 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4596 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4599 static int inline_limit;
4600 static gboolean inline_limit_inited;
4603 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4605 MonoMethodHeaderSummary header;
4607 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4608 MonoMethodSignature *sig = mono_method_signature (method);
4612 if (cfg->generic_sharing_context)
4615 if (cfg->inline_depth > 10)
4618 #ifdef MONO_ARCH_HAVE_LMF_OPS
4619 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4620 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4621 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4626 if (!mono_method_get_header_summary (method, &header))
4629 /*runtime, icall and pinvoke are checked by summary call*/
4630 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4631 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4632 (mono_class_is_marshalbyref (method->klass)) ||
4636 /* also consider num_locals? */
4637 /* Do the size check early to avoid creating vtables */
4638 if (!inline_limit_inited) {
4639 if (g_getenv ("MONO_INLINELIMIT"))
4640 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4642 inline_limit = INLINE_LENGTH_LIMIT;
4643 inline_limit_inited = TRUE;
4645 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4649 * if we can initialize the class of the method right away, we do,
4650 * otherwise we don't allow inlining if the class needs initialization,
4651 * since it would mean inserting a call to mono_runtime_class_init()
4652 * inside the inlined code
4654 if (!(cfg->opt & MONO_OPT_SHARED)) {
4655 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4656 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4657 vtable = mono_class_vtable (cfg->domain, method->klass);
4660 if (!cfg->compile_aot)
4661 mono_runtime_class_init (vtable);
4662 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4663 if (cfg->run_cctors && method->klass->has_cctor) {
4664 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4665 if (!method->klass->runtime_info)
4666 /* No vtable created yet */
4668 vtable = mono_class_vtable (cfg->domain, method->klass);
4671 /* This makes so that inline cannot trigger */
4672 /* .cctors: too many apps depend on them */
4673 /* running with a specific order... */
4674 if (! vtable->initialized)
4676 mono_runtime_class_init (vtable);
4678 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4679 if (!method->klass->runtime_info)
4680 /* No vtable created yet */
4682 vtable = mono_class_vtable (cfg->domain, method->klass);
4685 if (!vtable->initialized)
4690 * If we're compiling for shared code
4691 * the cctor will need to be run at aot method load time, for example,
4692 * or at the end of the compilation of the inlining method.
4694 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4699 * CAS - do not inline methods with declarative security
4700 * Note: this has to be before any possible return TRUE;
4702 if (mono_security_method_has_declsec (method))
4705 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4706 if (mono_arch_is_soft_float ()) {
4708 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4710 for (i = 0; i < sig->param_count; ++i)
4711 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4720 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4722 if (!cfg->compile_aot) {
4724 if (vtable->initialized)
4728 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4729 if (cfg->method == method)
4733 if (!mono_class_needs_cctor_run (klass, method))
4736 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4737 /* The initialization is already done before the method is called */
4744 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4748 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4751 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4754 mono_class_init (klass);
4755 size = mono_class_array_element_size (klass);
4758 mult_reg = alloc_preg (cfg);
4759 array_reg = arr->dreg;
4760 index_reg = index->dreg;
4762 #if SIZEOF_REGISTER == 8
4763 /* The array reg is 64 bits but the index reg is only 32 */
4764 if (COMPILE_LLVM (cfg)) {
4766 index2_reg = index_reg;
4768 index2_reg = alloc_preg (cfg);
4769 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4772 if (index->type == STACK_I8) {
4773 index2_reg = alloc_preg (cfg);
4774 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4776 index2_reg = index_reg;
4781 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4783 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4784 if (size == 1 || size == 2 || size == 4 || size == 8) {
4785 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4787 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4788 ins->klass = mono_class_get_element_class (klass);
4789 ins->type = STACK_MP;
4795 add_reg = alloc_ireg_mp (cfg);
4798 MonoInst *rgctx_ins;
4801 g_assert (cfg->generic_sharing_context);
4802 context_used = mini_class_check_context_used (cfg, klass);
4803 g_assert (context_used);
4804 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4805 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4809 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4810 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4811 ins->klass = mono_class_get_element_class (klass);
4812 ins->type = STACK_MP;
4813 MONO_ADD_INS (cfg->cbb, ins);
4818 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4820 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4822 int bounds_reg = alloc_preg (cfg);
4823 int add_reg = alloc_ireg_mp (cfg);
4824 int mult_reg = alloc_preg (cfg);
4825 int mult2_reg = alloc_preg (cfg);
4826 int low1_reg = alloc_preg (cfg);
4827 int low2_reg = alloc_preg (cfg);
4828 int high1_reg = alloc_preg (cfg);
4829 int high2_reg = alloc_preg (cfg);
4830 int realidx1_reg = alloc_preg (cfg);
4831 int realidx2_reg = alloc_preg (cfg);
4832 int sum_reg = alloc_preg (cfg);
4833 int index1, index2, tmpreg;
4837 mono_class_init (klass);
4838 size = mono_class_array_element_size (klass);
4840 index1 = index_ins1->dreg;
4841 index2 = index_ins2->dreg;
4843 #if SIZEOF_REGISTER == 8
4844 /* The array reg is 64 bits but the index reg is only 32 */
4845 if (COMPILE_LLVM (cfg)) {
4848 tmpreg = alloc_preg (cfg);
4849 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
4851 tmpreg = alloc_preg (cfg);
4852 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
4856 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
4860 /* range checking */
4861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4862 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4864 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4865 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4866 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4867 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4868 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4869 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4870 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4872 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4873 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4874 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4875 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4876 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4877 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4878 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4880 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4881 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4882 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4883 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4884 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4886 ins->type = STACK_MP;
4888 MONO_ADD_INS (cfg->cbb, ins);
4895 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4899 MonoMethod *addr_method;
4902 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4905 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4907 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4908 /* emit_ldelema_2 depends on OP_LMUL */
4909 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4910 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4914 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4915 addr_method = mono_marshal_get_array_address (rank, element_size);
4916 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4921 static MonoBreakPolicy
4922 always_insert_breakpoint (MonoMethod *method)
4924 return MONO_BREAK_POLICY_ALWAYS;
4927 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4930 * mono_set_break_policy:
4931 * policy_callback: the new callback function
4933 * Allow embedders to decide wherther to actually obey breakpoint instructions
4934 * (both break IL instructions and Debugger.Break () method calls), for example
4935 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4936 * untrusted or semi-trusted code.
4938 * @policy_callback will be called every time a break point instruction needs to
4939 * be inserted with the method argument being the method that calls Debugger.Break()
4940 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4941 * if it wants the breakpoint to not be effective in the given method.
4942 * #MONO_BREAK_POLICY_ALWAYS is the default.
4945 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4947 if (policy_callback)
4948 break_policy_func = policy_callback;
4950 break_policy_func = always_insert_breakpoint;
4954 should_insert_brekpoint (MonoMethod *method) {
4955 switch (break_policy_func (method)) {
4956 case MONO_BREAK_POLICY_ALWAYS:
4958 case MONO_BREAK_POLICY_NEVER:
4960 case MONO_BREAK_POLICY_ON_DBG:
4961 g_warning ("mdb no longer supported");
4964 g_warning ("Incorrect value returned from break policy callback");
4969 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4971 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4973 MonoInst *addr, *store, *load;
4974 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4976 /* the bounds check is already done by the callers */
4977 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4979 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4980 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4981 if (mini_type_is_reference (cfg, fsig->params [2]))
4982 emit_write_barrier (cfg, addr, load);
4984 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4985 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4992 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4994 return mini_type_is_reference (cfg, &klass->byval_arg);
4998 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5000 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5001 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5002 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5003 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5004 MonoInst *iargs [3];
5007 mono_class_setup_vtable (obj_array);
5008 g_assert (helper->slot);
5010 if (sp [0]->type != STACK_OBJ)
5012 if (sp [2]->type != STACK_OBJ)
5019 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5023 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5026 // FIXME-VT: OP_ICONST optimization
5027 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5028 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5029 ins->opcode = OP_STOREV_MEMBASE;
5030 } else if (sp [1]->opcode == OP_ICONST) {
5031 int array_reg = sp [0]->dreg;
5032 int index_reg = sp [1]->dreg;
5033 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
5036 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5037 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5039 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5040 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5041 if (generic_class_is_reference_type (cfg, klass))
5042 emit_write_barrier (cfg, addr, sp [2]);
5049 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5054 eklass = mono_class_from_mono_type (fsig->params [2]);
5056 eklass = mono_class_from_mono_type (fsig->ret);
5059 return emit_array_store (cfg, eklass, args, FALSE);
5061 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5062 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5068 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5072 //Only allow for valuetypes
5073 if (!param_klass->valuetype || !return_klass->valuetype)
5077 if (param_klass->has_references || return_klass->has_references)
5080 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5081 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5082 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5085 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5086 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5089 //And have the same size
5090 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5096 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5098 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5099 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5101 //Valuetypes that are semantically equivalent
5102 if (is_unsafe_mov_compatible (param_klass, return_klass))
5105 //Arrays of valuetypes that are semantically equivalent
5106 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5113 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5115 #ifdef MONO_ARCH_SIMD_INTRINSICS
5116 MonoInst *ins = NULL;
5118 if (cfg->opt & MONO_OPT_SIMD) {
5119 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5125 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5129 emit_memory_barrier (MonoCompile *cfg, int kind)
5131 MonoInst *ins = NULL;
5132 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5133 MONO_ADD_INS (cfg->cbb, ins);
5134 ins->backend.memory_barrier_kind = kind;
5140 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5142 MonoInst *ins = NULL;
5145 /* The LLVM backend supports these intrinsics */
5146 if (cmethod->klass == mono_defaults.math_class) {
5147 if (strcmp (cmethod->name, "Sin") == 0) {
5149 } else if (strcmp (cmethod->name, "Cos") == 0) {
5151 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5153 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5158 MONO_INST_NEW (cfg, ins, opcode);
5159 ins->type = STACK_R8;
5160 ins->dreg = mono_alloc_freg (cfg);
5161 ins->sreg1 = args [0]->dreg;
5162 MONO_ADD_INS (cfg->cbb, ins);
5166 if (cfg->opt & MONO_OPT_CMOV) {
5167 if (strcmp (cmethod->name, "Min") == 0) {
5168 if (fsig->params [0]->type == MONO_TYPE_I4)
5170 if (fsig->params [0]->type == MONO_TYPE_U4)
5171 opcode = OP_IMIN_UN;
5172 else if (fsig->params [0]->type == MONO_TYPE_I8)
5174 else if (fsig->params [0]->type == MONO_TYPE_U8)
5175 opcode = OP_LMIN_UN;
5176 } else if (strcmp (cmethod->name, "Max") == 0) {
5177 if (fsig->params [0]->type == MONO_TYPE_I4)
5179 if (fsig->params [0]->type == MONO_TYPE_U4)
5180 opcode = OP_IMAX_UN;
5181 else if (fsig->params [0]->type == MONO_TYPE_I8)
5183 else if (fsig->params [0]->type == MONO_TYPE_U8)
5184 opcode = OP_LMAX_UN;
5189 MONO_INST_NEW (cfg, ins, opcode);
5190 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5191 ins->dreg = mono_alloc_ireg (cfg);
5192 ins->sreg1 = args [0]->dreg;
5193 ins->sreg2 = args [1]->dreg;
5194 MONO_ADD_INS (cfg->cbb, ins);
5202 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5204 if (cmethod->klass == mono_defaults.array_class) {
5205 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5206 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5207 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5208 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5209 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5210 return emit_array_unsafe_mov (cfg, fsig, args);
5217 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5219 MonoInst *ins = NULL;
5221 static MonoClass *runtime_helpers_class = NULL;
5222 if (! runtime_helpers_class)
5223 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5224 "System.Runtime.CompilerServices", "RuntimeHelpers");
5226 if (cmethod->klass == mono_defaults.string_class) {
5227 if (strcmp (cmethod->name, "get_Chars") == 0) {
5228 int dreg = alloc_ireg (cfg);
5229 int index_reg = alloc_preg (cfg);
5230 int mult_reg = alloc_preg (cfg);
5231 int add_reg = alloc_preg (cfg);
5233 #if SIZEOF_REGISTER == 8
5234 /* The array reg is 64 bits but the index reg is only 32 */
5235 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5237 index_reg = args [1]->dreg;
5239 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5241 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5242 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
5243 add_reg = ins->dreg;
5244 /* Avoid a warning */
5246 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5250 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5251 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5252 add_reg, G_STRUCT_OFFSET (MonoString, chars));
5254 type_from_op (ins, NULL, NULL);
5256 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5257 int dreg = alloc_ireg (cfg);
5258 /* Decompose later to allow more optimizations */
5259 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5260 ins->type = STACK_I4;
5261 ins->flags |= MONO_INST_FAULT;
5262 cfg->cbb->has_array_access = TRUE;
5263 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5266 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5267 int mult_reg = alloc_preg (cfg);
5268 int add_reg = alloc_preg (cfg);
5270 /* The corlib functions check for oob already. */
5271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5272 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5274 return cfg->cbb->last_ins;
5277 } else if (cmethod->klass == mono_defaults.object_class) {
5279 if (strcmp (cmethod->name, "GetType") == 0) {
5280 int dreg = alloc_ireg_ref (cfg);
5281 int vt_reg = alloc_preg (cfg);
5282 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5283 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
5284 type_from_op (ins, NULL, NULL);
5287 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5288 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5289 int dreg = alloc_ireg (cfg);
5290 int t1 = alloc_ireg (cfg);
5292 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5293 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5294 ins->type = STACK_I4;
5298 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5299 MONO_INST_NEW (cfg, ins, OP_NOP);
5300 MONO_ADD_INS (cfg->cbb, ins);
5304 } else if (cmethod->klass == mono_defaults.array_class) {
5305 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5306 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5308 #ifndef MONO_BIG_ARRAYS
5310 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5313 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5314 int dreg = alloc_ireg (cfg);
5315 int bounds_reg = alloc_ireg_mp (cfg);
5316 MonoBasicBlock *end_bb, *szarray_bb;
5317 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5319 NEW_BBLOCK (cfg, end_bb);
5320 NEW_BBLOCK (cfg, szarray_bb);
5322 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5323 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
5324 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5325 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5326 /* Non-szarray case */
5328 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5329 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
5331 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5332 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5334 MONO_START_BB (cfg, szarray_bb);
5337 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5338 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5340 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5341 MONO_START_BB (cfg, end_bb);
5343 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5344 ins->type = STACK_I4;
5350 if (cmethod->name [0] != 'g')
5353 if (strcmp (cmethod->name, "get_Rank") == 0) {
5354 int dreg = alloc_ireg (cfg);
5355 int vtable_reg = alloc_preg (cfg);
5356 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5357 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
5358 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5359 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
5360 type_from_op (ins, NULL, NULL);
5363 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5364 int dreg = alloc_ireg (cfg);
5366 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5367 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
5368 type_from_op (ins, NULL, NULL);
5373 } else if (cmethod->klass == runtime_helpers_class) {
5375 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5376 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
5380 } else if (cmethod->klass == mono_defaults.thread_class) {
5381 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5382 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5383 MONO_ADD_INS (cfg->cbb, ins);
5385 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5386 return emit_memory_barrier (cfg, FullBarrier);
5388 } else if (cmethod->klass == mono_defaults.monitor_class) {
5390 /* FIXME this should be integrated to the check below once we support the trampoline version */
5391 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5392 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5393 MonoMethod *fast_method = NULL;
5395 /* Avoid infinite recursion */
5396 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5399 fast_method = mono_monitor_get_fast_path (cmethod);
5403 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5407 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5408 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5411 if (COMPILE_LLVM (cfg)) {
5413 * Pass the argument normally, the LLVM backend will handle the
5414 * calling convention problems.
5416 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5418 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5419 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5420 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5421 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5424 return (MonoInst*)call;
5425 } else if (strcmp (cmethod->name, "Exit") == 0) {
5428 if (COMPILE_LLVM (cfg)) {
5429 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5431 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5432 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5433 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5434 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5437 return (MonoInst*)call;
5439 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5441 MonoMethod *fast_method = NULL;
5443 /* Avoid infinite recursion */
5444 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5445 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5446 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5449 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5450 strcmp (cmethod->name, "Exit") == 0)
5451 fast_method = mono_monitor_get_fast_path (cmethod);
5455 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5458 } else if (cmethod->klass->image == mono_defaults.corlib &&
5459 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5460 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5463 #if SIZEOF_REGISTER == 8
5464 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5465 /* 64 bit reads are already atomic */
5466 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
5467 ins->dreg = mono_alloc_preg (cfg);
5468 ins->inst_basereg = args [0]->dreg;
5469 ins->inst_offset = 0;
5470 MONO_ADD_INS (cfg->cbb, ins);
5474 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
5475 if (strcmp (cmethod->name, "Increment") == 0) {
5476 MonoInst *ins_iconst;
5479 if (fsig->params [0]->type == MONO_TYPE_I4) {
5480 opcode = OP_ATOMIC_ADD_NEW_I4;
5481 cfg->has_atomic_add_new_i4 = TRUE;
5483 #if SIZEOF_REGISTER == 8
5484 else if (fsig->params [0]->type == MONO_TYPE_I8)
5485 opcode = OP_ATOMIC_ADD_NEW_I8;
5488 if (!mono_arch_opcode_supported (opcode))
5490 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5491 ins_iconst->inst_c0 = 1;
5492 ins_iconst->dreg = mono_alloc_ireg (cfg);
5493 MONO_ADD_INS (cfg->cbb, ins_iconst);
5495 MONO_INST_NEW (cfg, ins, opcode);
5496 ins->dreg = mono_alloc_ireg (cfg);
5497 ins->inst_basereg = args [0]->dreg;
5498 ins->inst_offset = 0;
5499 ins->sreg2 = ins_iconst->dreg;
5500 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5501 MONO_ADD_INS (cfg->cbb, ins);
5503 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5504 MonoInst *ins_iconst;
5507 if (fsig->params [0]->type == MONO_TYPE_I4) {
5508 opcode = OP_ATOMIC_ADD_NEW_I4;
5509 cfg->has_atomic_add_new_i4 = TRUE;
5511 #if SIZEOF_REGISTER == 8
5512 else if (fsig->params [0]->type == MONO_TYPE_I8)
5513 opcode = OP_ATOMIC_ADD_NEW_I8;
5516 if (!mono_arch_opcode_supported (opcode))
5518 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5519 ins_iconst->inst_c0 = -1;
5520 ins_iconst->dreg = mono_alloc_ireg (cfg);
5521 MONO_ADD_INS (cfg->cbb, ins_iconst);
5523 MONO_INST_NEW (cfg, ins, opcode);
5524 ins->dreg = mono_alloc_ireg (cfg);
5525 ins->inst_basereg = args [0]->dreg;
5526 ins->inst_offset = 0;
5527 ins->sreg2 = ins_iconst->dreg;
5528 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5529 MONO_ADD_INS (cfg->cbb, ins);
5531 } else if (strcmp (cmethod->name, "Add") == 0) {
5534 if (fsig->params [0]->type == MONO_TYPE_I4) {
5535 opcode = OP_ATOMIC_ADD_NEW_I4;
5536 cfg->has_atomic_add_new_i4 = TRUE;
5538 #if SIZEOF_REGISTER == 8
5539 else if (fsig->params [0]->type == MONO_TYPE_I8)
5540 opcode = OP_ATOMIC_ADD_NEW_I8;
5543 if (!mono_arch_opcode_supported (opcode))
5545 MONO_INST_NEW (cfg, ins, opcode);
5546 ins->dreg = mono_alloc_ireg (cfg);
5547 ins->inst_basereg = args [0]->dreg;
5548 ins->inst_offset = 0;
5549 ins->sreg2 = args [1]->dreg;
5550 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
5551 MONO_ADD_INS (cfg->cbb, ins);
5554 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
5556 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
5557 if (strcmp (cmethod->name, "Exchange") == 0) {
5559 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5561 if (fsig->params [0]->type == MONO_TYPE_I4) {
5562 opcode = OP_ATOMIC_EXCHANGE_I4;
5563 cfg->has_atomic_exchange_i4 = TRUE;
5565 #if SIZEOF_REGISTER == 8
5566 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5567 (fsig->params [0]->type == MONO_TYPE_I))
5568 opcode = OP_ATOMIC_EXCHANGE_I8;
5570 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5571 opcode = OP_ATOMIC_EXCHANGE_I4;
5572 cfg->has_atomic_exchange_i4 = TRUE;
5578 if (!mono_arch_opcode_supported (opcode))
5581 MONO_INST_NEW (cfg, ins, opcode);
5582 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5583 ins->inst_basereg = args [0]->dreg;
5584 ins->inst_offset = 0;
5585 ins->sreg2 = args [1]->dreg;
5586 MONO_ADD_INS (cfg->cbb, ins);
5588 switch (fsig->params [0]->type) {
5590 ins->type = STACK_I4;
5594 ins->type = STACK_I8;
5596 case MONO_TYPE_OBJECT:
5597 ins->type = STACK_OBJ;
5600 g_assert_not_reached ();
5603 if (cfg->gen_write_barriers && is_ref)
5604 emit_write_barrier (cfg, args [0], args [1]);
5606 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
5608 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
5609 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5611 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5612 if (fsig->params [1]->type == MONO_TYPE_I4)
5614 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5615 size = sizeof (gpointer);
5616 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5619 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5621 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5622 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5623 ins->sreg1 = args [0]->dreg;
5624 ins->sreg2 = args [1]->dreg;
5625 ins->sreg3 = args [2]->dreg;
5626 ins->type = STACK_I4;
5627 MONO_ADD_INS (cfg->cbb, ins);
5628 cfg->has_atomic_cas_i4 = TRUE;
5629 } else if (size == 8) {
5630 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5632 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5633 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5634 ins->sreg1 = args [0]->dreg;
5635 ins->sreg2 = args [1]->dreg;
5636 ins->sreg3 = args [2]->dreg;
5637 ins->type = STACK_I8;
5638 MONO_ADD_INS (cfg->cbb, ins);
5640 /* g_assert_not_reached (); */
5642 if (cfg->gen_write_barriers && is_ref)
5643 emit_write_barrier (cfg, args [0], args [1]);
5645 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
5647 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5648 ins = emit_memory_barrier (cfg, FullBarrier);
5652 } else if (cmethod->klass->image == mono_defaults.corlib) {
5653 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5654 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5655 if (should_insert_brekpoint (cfg->method)) {
5656 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5658 MONO_INST_NEW (cfg, ins, OP_NOP);
5659 MONO_ADD_INS (cfg->cbb, ins);
5663 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5664 && strcmp (cmethod->klass->name, "Environment") == 0) {
5666 EMIT_NEW_ICONST (cfg, ins, 1);
5668 EMIT_NEW_ICONST (cfg, ins, 0);
5672 } else if (cmethod->klass == mono_defaults.math_class) {
5674 * There is general branches code for Min/Max, but it does not work for
5676 * http://everything2.com/?node_id=1051618
5678 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5679 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5681 MonoJumpInfoToken *ji;
5684 cfg->disable_llvm = TRUE;
5686 if (args [0]->opcode == OP_GOT_ENTRY) {
5687 pi = args [0]->inst_p1;
5688 g_assert (pi->opcode == OP_PATCH_INFO);
5689 g_assert ((int)pi->inst_p1 == MONO_PATCH_INFO_LDSTR);
5692 g_assert ((int)args [0]->inst_p1 == MONO_PATCH_INFO_LDSTR);
5693 ji = args [0]->inst_p0;
5696 NULLIFY_INS (args [0]);
5699 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5700 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5701 ins->dreg = mono_alloc_ireg (cfg);
5703 ins->inst_p0 = mono_string_to_utf8 (s);
5704 MONO_ADD_INS (cfg->cbb, ins);
5709 #ifdef MONO_ARCH_SIMD_INTRINSICS
5710 if (cfg->opt & MONO_OPT_SIMD) {
5711 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5717 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5721 if (COMPILE_LLVM (cfg)) {
5722 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5727 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5731 * This entry point could be used later for arbitrary method
5734 inline static MonoInst*
5735 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5736 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5738 if (method->klass == mono_defaults.string_class) {
5739 /* managed string allocation support */
5740 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5741 MonoInst *iargs [2];
5742 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5743 MonoMethod *managed_alloc = NULL;
5745 g_assert (vtable); /*Should not fail since it System.String*/
5746 #ifndef MONO_CROSS_COMPILE
5747 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5751 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5752 iargs [1] = args [0];
5753 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5760 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5762 MonoInst *store, *temp;
5765 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5766 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5769 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5770 * would be different than the MonoInst's used to represent arguments, and
5771 * the ldelema implementation can't deal with that.
5772 * Solution: When ldelema is used on an inline argument, create a var for
5773 * it, emit ldelema on that var, and emit the saving code below in
5774 * inline_method () if needed.
5776 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5777 cfg->args [i] = temp;
5778 /* This uses cfg->args [i] which is set by the preceeding line */
5779 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5780 store->cil_code = sp [0]->cil_code;
5785 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5786 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5788 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5790 check_inline_called_method_name_limit (MonoMethod *called_method)
5793 static const char *limit = NULL;
5795 if (limit == NULL) {
5796 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5798 if (limit_string != NULL)
5799 limit = limit_string;
5804 if (limit [0] != '\0') {
5805 char *called_method_name = mono_method_full_name (called_method, TRUE);
5807 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5808 g_free (called_method_name);
5810 //return (strncmp_result <= 0);
5811 return (strncmp_result == 0);
5818 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5820 check_inline_caller_method_name_limit (MonoMethod *caller_method)
5823 static const char *limit = NULL;
5825 if (limit == NULL) {
5826 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
5827 if (limit_string != NULL) {
5828 limit = limit_string;
5834 if (limit [0] != '\0') {
5835 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
5837 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
5838 g_free (caller_method_name);
5840 //return (strncmp_result <= 0);
5841 return (strncmp_result == 0);
5849 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5851 static double r8_0 = 0.0;
5855 rtype = mini_replace_type (rtype);
5859 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5860 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5861 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5862 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5863 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
5864 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5865 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5866 ins->type = STACK_R8;
5867 ins->inst_p0 = (void*)&r8_0;
5869 MONO_ADD_INS (cfg->cbb, ins);
5870 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5871 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5872 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5873 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5874 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
5876 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
5881 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
5885 rtype = mini_replace_type (rtype);
5889 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
5890 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
5891 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
5892 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
5893 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
5894 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
5895 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
5896 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
5897 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
5898 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5899 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
5900 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
5902 emit_init_rvar (cfg, dreg, rtype);
5906 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
5908 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
5910 MonoInst *var = cfg->locals [local];
5911 if (COMPILE_SOFT_FLOAT (cfg)) {
5913 int reg = alloc_dreg (cfg, var->type);
5914 emit_init_rvar (cfg, reg, type);
5915 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
5918 emit_init_rvar (cfg, var->dreg, type);
5920 emit_dummy_init_rvar (cfg, var->dreg, type);
5925 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
5926 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
5928 MonoInst *ins, *rvar = NULL;
5929 MonoMethodHeader *cheader;
5930 MonoBasicBlock *ebblock, *sbblock;
5932 MonoMethod *prev_inlined_method;
5933 MonoInst **prev_locals, **prev_args;
5934 MonoType **prev_arg_types;
5935 guint prev_real_offset;
5936 GHashTable *prev_cbb_hash;
5937 MonoBasicBlock **prev_cil_offset_to_bb;
5938 MonoBasicBlock *prev_cbb;
5939 unsigned char* prev_cil_start;
5940 guint32 prev_cil_offset_to_bb_len;
5941 MonoMethod *prev_current_method;
5942 MonoGenericContext *prev_generic_context;
5943 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
5945 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
5947 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5948 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
5951 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
5952 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
5956 if (cfg->verbose_level > 2)
5957 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
5959 if (!cmethod->inline_info) {
5960 cfg->stat_inlineable_methods++;
5961 cmethod->inline_info = 1;
5964 /* allocate local variables */
5965 cheader = mono_method_get_header (cmethod);
5967 if (cheader == NULL || mono_loader_get_last_error ()) {
5968 MonoLoaderError *error = mono_loader_get_last_error ();
5971 mono_metadata_free_mh (cheader);
5972 if (inline_always && error)
5973 mono_cfg_set_exception (cfg, error->exception_type);
5975 mono_loader_clear_error ();
5979 /*Must verify before creating locals as it can cause the JIT to assert.*/
5980 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
5981 mono_metadata_free_mh (cheader);
5985 /* allocate space to store the return value */
5986 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
5987 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
5990 prev_locals = cfg->locals;
5991 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
5992 for (i = 0; i < cheader->num_locals; ++i)
5993 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
5995 /* allocate start and end blocks */
5996 /* This is needed so if the inline is aborted, we can clean up */
5997 NEW_BBLOCK (cfg, sbblock);
5998 sbblock->real_offset = real_offset;
6000 NEW_BBLOCK (cfg, ebblock);
6001 ebblock->block_num = cfg->num_bblocks++;
6002 ebblock->real_offset = real_offset;
6004 prev_args = cfg->args;
6005 prev_arg_types = cfg->arg_types;
6006 prev_inlined_method = cfg->inlined_method;
6007 cfg->inlined_method = cmethod;
6008 cfg->ret_var_set = FALSE;
6009 cfg->inline_depth ++;
6010 prev_real_offset = cfg->real_offset;
6011 prev_cbb_hash = cfg->cbb_hash;
6012 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6013 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6014 prev_cil_start = cfg->cil_start;
6015 prev_cbb = cfg->cbb;
6016 prev_current_method = cfg->current_method;
6017 prev_generic_context = cfg->generic_context;
6018 prev_ret_var_set = cfg->ret_var_set;
6020 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6023 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
6025 ret_var_set = cfg->ret_var_set;
6027 cfg->inlined_method = prev_inlined_method;
6028 cfg->real_offset = prev_real_offset;
6029 cfg->cbb_hash = prev_cbb_hash;
6030 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6031 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6032 cfg->cil_start = prev_cil_start;
6033 cfg->locals = prev_locals;
6034 cfg->args = prev_args;
6035 cfg->arg_types = prev_arg_types;
6036 cfg->current_method = prev_current_method;
6037 cfg->generic_context = prev_generic_context;
6038 cfg->ret_var_set = prev_ret_var_set;
6039 cfg->inline_depth --;
6041 if ((costs >= 0 && costs < 60) || inline_always) {
6042 if (cfg->verbose_level > 2)
6043 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6045 cfg->stat_inlined_methods++;
6047 /* always add some code to avoid block split failures */
6048 MONO_INST_NEW (cfg, ins, OP_NOP);
6049 MONO_ADD_INS (prev_cbb, ins);
6051 prev_cbb->next_bb = sbblock;
6052 link_bblock (cfg, prev_cbb, sbblock);
6055 * Get rid of the begin and end bblocks if possible to aid local
6058 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6060 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6061 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6063 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6064 MonoBasicBlock *prev = ebblock->in_bb [0];
6065 mono_merge_basic_blocks (cfg, prev, ebblock);
6067 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6068 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6069 cfg->cbb = prev_cbb;
6073 * Its possible that the rvar is set in some prev bblock, but not in others.
6079 for (i = 0; i < ebblock->in_count; ++i) {
6080 bb = ebblock->in_bb [i];
6082 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6085 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6095 * If the inlined method contains only a throw, then the ret var is not
6096 * set, so set it to a dummy value.
6099 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6101 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6104 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6107 if (cfg->verbose_level > 2)
6108 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6109 cfg->exception_type = MONO_EXCEPTION_NONE;
6110 mono_loader_clear_error ();
6112 /* This gets rid of the newly added bblocks */
6113 cfg->cbb = prev_cbb;
6115 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6120 * Some of these comments may well be out-of-date.
6121 * Design decisions: we do a single pass over the IL code (and we do bblock
6122 * splitting/merging in the few cases when it's required: a back jump to an IL
6123 * address that was not already seen as bblock starting point).
6124 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6125 * Complex operations are decomposed in simpler ones right away. We need to let the
6126 * arch-specific code peek and poke inside this process somehow (except when the
6127 * optimizations can take advantage of the full semantic info of coarse opcodes).
6128 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6129 * MonoInst->opcode initially is the IL opcode or some simplification of that
6130 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6131 * opcode with value bigger than OP_LAST.
6132 * At this point the IR can be handed over to an interpreter, a dumb code generator
6133 * or to the optimizing code generator that will translate it to SSA form.
6135 * Profiling directed optimizations.
6136 * We may compile by default with few or no optimizations and instrument the code
6137 * or the user may indicate what methods to optimize the most either in a config file
6138 * or through repeated runs where the compiler applies offline the optimizations to
6139 * each method and then decides if it was worth it.
6142 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6143 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6144 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6145 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6146 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6147 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6148 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6149 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
6151 /* offset from br.s -> br like opcodes */
6152 #define BIG_BRANCH_OFFSET 13
6155 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6157 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6159 return b == NULL || b == bb;
6163 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6165 unsigned char *ip = start;
6166 unsigned char *target;
6169 MonoBasicBlock *bblock;
6170 const MonoOpcode *opcode;
6173 cli_addr = ip - start;
6174 i = mono_opcode_value ((const guint8 **)&ip, end);
6177 opcode = &mono_opcodes [i];
6178 switch (opcode->argument) {
6179 case MonoInlineNone:
6182 case MonoInlineString:
6183 case MonoInlineType:
6184 case MonoInlineField:
6185 case MonoInlineMethod:
6188 case MonoShortInlineR:
6195 case MonoShortInlineVar:
6196 case MonoShortInlineI:
6199 case MonoShortInlineBrTarget:
6200 target = start + cli_addr + 2 + (signed char)ip [1];
6201 GET_BBLOCK (cfg, bblock, target);
6204 GET_BBLOCK (cfg, bblock, ip);
6206 case MonoInlineBrTarget:
6207 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6208 GET_BBLOCK (cfg, bblock, target);
6211 GET_BBLOCK (cfg, bblock, ip);
6213 case MonoInlineSwitch: {
6214 guint32 n = read32 (ip + 1);
6217 cli_addr += 5 + 4 * n;
6218 target = start + cli_addr;
6219 GET_BBLOCK (cfg, bblock, target);
6221 for (j = 0; j < n; ++j) {
6222 target = start + cli_addr + (gint32)read32 (ip);
6223 GET_BBLOCK (cfg, bblock, target);
6233 g_assert_not_reached ();
6236 if (i == CEE_THROW) {
6237 unsigned char *bb_start = ip - 1;
6239 /* Find the start of the bblock containing the throw */
6241 while ((bb_start >= start) && !bblock) {
6242 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6246 bblock->out_of_line = 1;
6256 static inline MonoMethod *
6257 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6261 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6262 method = mono_method_get_wrapper_data (m, token);
6264 method = mono_class_inflate_generic_method (method, context);
6266 method = mono_get_method_full (m->klass->image, token, klass, context);
6272 static inline MonoMethod *
6273 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6275 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6277 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6283 static inline MonoClass*
6284 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6288 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6289 klass = mono_method_get_wrapper_data (method, token);
6291 klass = mono_class_inflate_generic_class (klass, context);
6293 klass = mono_class_get_full (method->klass->image, token, context);
6296 mono_class_init (klass);
6300 static inline MonoMethodSignature*
6301 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6303 MonoMethodSignature *fsig;
6305 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6308 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6310 fsig = mono_inflate_generic_signature (fsig, context, &error);
6312 g_assert (mono_error_ok (&error));
6315 fsig = mono_metadata_parse_signature (method->klass->image, token);
6321 * Returns TRUE if the JIT should abort inlining because "callee"
6322 * is influenced by security attributes.
6325 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6329 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6333 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6334 if (result == MONO_JIT_SECURITY_OK)
6337 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6338 /* Generate code to throw a SecurityException before the actual call/link */
6339 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6342 NEW_ICONST (cfg, args [0], 4);
6343 NEW_METHODCONST (cfg, args [1], caller);
6344 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6345 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6346 /* don't hide previous results */
6347 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6348 cfg->exception_data = result;
6356 throw_exception (void)
6358 static MonoMethod *method = NULL;
6361 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6362 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6369 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6371 MonoMethod *thrower = throw_exception ();
6374 EMIT_NEW_PCONST (cfg, args [0], ex);
6375 mono_emit_method_call (cfg, thrower, args, NULL);
6379 * Return the original method is a wrapper is specified. We can only access
6380 * the custom attributes from the original method.
6383 get_original_method (MonoMethod *method)
6385 if (method->wrapper_type == MONO_WRAPPER_NONE)
6388 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6389 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6392 /* in other cases we need to find the original method */
6393 return mono_marshal_method_from_wrapper (method);
6397 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6398 MonoBasicBlock *bblock, unsigned char *ip)
6400 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6401 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6403 emit_throw_exception (cfg, ex);
6407 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6408 MonoBasicBlock *bblock, unsigned char *ip)
6410 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6411 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6413 emit_throw_exception (cfg, ex);
6417 * Check that the IL instructions at ip are the array initialization
6418 * sequence and return the pointer to the data and the size.
6421 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6424 * newarr[System.Int32]
6426 * ldtoken field valuetype ...
6427 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6429 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6430 guint32 token = read32 (ip + 7);
6431 guint32 field_token = read32 (ip + 2);
6432 guint32 field_index = field_token & 0xffffff;
6434 const char *data_ptr;
6436 MonoMethod *cmethod;
6437 MonoClass *dummy_class;
6438 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
6444 *out_field_token = field_token;
6446 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6449 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6451 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6452 case MONO_TYPE_BOOLEAN:
6456 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6457 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6458 case MONO_TYPE_CHAR:
6475 if (size > mono_type_size (field->type, &dummy_align))
6478 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6479 if (!method->klass->image->dynamic) {
6480 field_index = read32 (ip + 2) & 0xffffff;
6481 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6482 data_ptr = mono_image_rva_map (method->klass->image, rva);
6483 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6484 /* for aot code we do the lookup on load */
6485 if (aot && data_ptr)
6486 return GUINT_TO_POINTER (rva);
6488 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6490 data_ptr = mono_field_get_data (field);
6498 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6500 char *method_fname = mono_method_full_name (method, TRUE);
6502 MonoMethodHeader *header = mono_method_get_header (method);
6504 if (header->code_size == 0)
6505 method_code = g_strdup ("method body is empty.");
6507 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6508 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6509 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6510 g_free (method_fname);
6511 g_free (method_code);
6512 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6516 set_exception_object (MonoCompile *cfg, MonoException *exception)
6518 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6519 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6520 cfg->exception_ptr = exception;
6524 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6527 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6528 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6529 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6530 /* Optimize reg-reg moves away */
6532 * Can't optimize other opcodes, since sp[0] might point to
6533 * the last ins of a decomposed opcode.
6535 sp [0]->dreg = (cfg)->locals [n]->dreg;
6537 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6542 * ldloca inhibits many optimizations so try to get rid of it in common
6545 static inline unsigned char *
6546 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6556 local = read16 (ip + 2);
6560 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6561 /* From the INITOBJ case */
6562 token = read32 (ip + 2);
6563 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6564 CHECK_TYPELOAD (klass);
6565 type = mini_replace_type (&klass->byval_arg);
6566 emit_init_local (cfg, local, type, TRUE);
6574 is_exception_class (MonoClass *class)
6577 if (class == mono_defaults.exception_class)
6579 class = class->parent;
6585 * is_jit_optimizer_disabled:
6587 * Determine whenever M's assembly has a DebuggableAttribute with the
6588 * IsJITOptimizerDisabled flag set.
6591 is_jit_optimizer_disabled (MonoMethod *m)
6593 MonoAssembly *ass = m->klass->image->assembly;
6594 MonoCustomAttrInfo* attrs;
6595 static MonoClass *klass;
6597 gboolean val = FALSE;
6600 if (ass->jit_optimizer_disabled_inited)
6601 return ass->jit_optimizer_disabled;
6604 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6607 ass->jit_optimizer_disabled = FALSE;
6608 mono_memory_barrier ();
6609 ass->jit_optimizer_disabled_inited = TRUE;
6613 attrs = mono_custom_attrs_from_assembly (ass);
6615 for (i = 0; i < attrs->num_attrs; ++i) {
6616 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6619 MonoMethodSignature *sig;
6621 if (!attr->ctor || attr->ctor->klass != klass)
6623 /* Decode the attribute. See reflection.c */
6624 len = attr->data_size;
6625 p = (const char*)attr->data;
6626 g_assert (read16 (p) == 0x0001);
6629 // FIXME: Support named parameters
6630 sig = mono_method_signature (attr->ctor);
6631 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6633 /* Two boolean arguments */
6637 mono_custom_attrs_free (attrs);
6640 ass->jit_optimizer_disabled = val;
6641 mono_memory_barrier ();
6642 ass->jit_optimizer_disabled_inited = TRUE;
6648 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6650 gboolean supported_tail_call;
6653 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6654 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6656 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6659 for (i = 0; i < fsig->param_count; ++i) {
6660 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6661 /* These can point to the current method's stack */
6662 supported_tail_call = FALSE;
6664 if (fsig->hasthis && cmethod->klass->valuetype)
6665 /* this might point to the current method's stack */
6666 supported_tail_call = FALSE;
6667 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6668 supported_tail_call = FALSE;
6669 if (cfg->method->save_lmf)
6670 supported_tail_call = FALSE;
6671 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6672 supported_tail_call = FALSE;
6673 if (call_opcode != CEE_CALL)
6674 supported_tail_call = FALSE;
6676 /* Debugging support */
6678 if (supported_tail_call) {
6679 if (!mono_debug_count ())
6680 supported_tail_call = FALSE;
6684 return supported_tail_call;
6687 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6688 * it to the thread local value based on the tls_offset field. Every other kind of access to
6689 * the field causes an assert.
6692 is_magic_tls_access (MonoClassField *field)
6694 if (strcmp (field->name, "tlsdata"))
6696 if (strcmp (field->parent->name, "ThreadLocal`1"))
6698 return field->parent->image == mono_defaults.corlib;
6701 /* emits the code needed to access a managed tls var (like ThreadStatic)
6702 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6703 * pointer for the current thread.
6704 * Returns the MonoInst* representing the address of the tls var.
6707 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6710 int static_data_reg, array_reg, dreg;
6711 int offset2_reg, idx_reg;
6712 // inlined access to the tls data
6713 // idx = (offset >> 24) - 1;
6714 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6715 static_data_reg = alloc_ireg (cfg);
6716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
6717 idx_reg = alloc_ireg (cfg);
6718 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6719 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6720 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6721 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6722 array_reg = alloc_ireg (cfg);
6723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6724 offset2_reg = alloc_ireg (cfg);
6725 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6726 dreg = alloc_ireg (cfg);
6727 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6732 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6733 * this address is cached per-method in cached_tls_addr.
6736 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6738 MonoInst *load, *addr, *temp, *store, *thread_ins;
6739 MonoClassField *offset_field;
6741 if (*cached_tls_addr) {
6742 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6745 thread_ins = mono_get_thread_intrinsic (cfg);
6746 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6748 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6750 MONO_ADD_INS (cfg->cbb, thread_ins);
6752 MonoMethod *thread_method;
6753 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6754 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6756 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6757 addr->klass = mono_class_from_mono_type (tls_field->type);
6758 addr->type = STACK_MP;
6759 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6760 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6762 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6767 * mono_method_to_ir:
6769 * Translate the .net IL into linear IR.
6772 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
6773 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
6774 guint inline_offset, gboolean is_virtual_call)
6777 MonoInst *ins, **sp, **stack_start;
6778 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
6779 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
6780 MonoMethod *cmethod, *method_definition;
6781 MonoInst **arg_array;
6782 MonoMethodHeader *header;
6784 guint32 token, ins_flag;
6786 MonoClass *constrained_call = NULL;
6787 unsigned char *ip, *end, *target, *err_pos;
6788 MonoMethodSignature *sig;
6789 MonoGenericContext *generic_context = NULL;
6790 MonoGenericContainer *generic_container = NULL;
6791 MonoType **param_types;
6792 int i, n, start_new_bblock, dreg;
6793 int num_calls = 0, inline_costs = 0;
6794 int breakpoint_id = 0;
6796 MonoBoolean security, pinvoke;
6797 MonoSecurityManager* secman = NULL;
6798 MonoDeclSecurityActions actions;
6799 GSList *class_inits = NULL;
6800 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
6802 gboolean init_locals, seq_points, skip_dead_blocks;
6803 gboolean disable_inline, sym_seq_points = FALSE;
6804 MonoInst *cached_tls_addr = NULL;
6805 MonoDebugMethodInfo *minfo;
6806 MonoBitSet *seq_point_locs = NULL;
6807 MonoBitSet *seq_point_set_locs = NULL;
6809 disable_inline = is_jit_optimizer_disabled (method);
6811 /* serialization and xdomain stuff may need access to private fields and methods */
6812 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
6813 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
6814 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
6815 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
6816 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
6817 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
6819 dont_verify |= mono_security_smcs_hack_enabled ();
6821 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
6822 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
6823 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
6824 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
6825 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
6827 image = method->klass->image;
6828 header = mono_method_get_header (method);
6830 MonoLoaderError *error;
6832 if ((error = mono_loader_get_last_error ())) {
6833 mono_cfg_set_exception (cfg, error->exception_type);
6835 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6836 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
6838 goto exception_exit;
6840 generic_container = mono_method_get_generic_container (method);
6841 sig = mono_method_signature (method);
6842 num_args = sig->hasthis + sig->param_count;
6843 ip = (unsigned char*)header->code;
6844 cfg->cil_start = ip;
6845 end = ip + header->code_size;
6846 cfg->stat_cil_code_size += header->code_size;
6848 seq_points = cfg->gen_seq_points && cfg->method == method;
6849 #ifdef PLATFORM_ANDROID
6850 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
6853 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
6854 /* We could hit a seq point before attaching to the JIT (#8338) */
6858 if (cfg->gen_seq_points && cfg->method == method) {
6859 minfo = mono_debug_lookup_method (method);
6861 int i, n_il_offsets;
6865 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL);
6866 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6867 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
6868 sym_seq_points = TRUE;
6869 for (i = 0; i < n_il_offsets; ++i) {
6870 if (il_offsets [i] < header->code_size)
6871 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
6873 g_free (il_offsets);
6874 g_free (line_numbers);
6879 * Methods without init_locals set could cause asserts in various passes
6880 * (#497220). To work around this, we emit dummy initialization opcodes
6881 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
6882 * on some platforms.
6884 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
6885 init_locals = header->init_locals;
6889 method_definition = method;
6890 while (method_definition->is_inflated) {
6891 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
6892 method_definition = imethod->declaring;
6895 /* SkipVerification is not allowed if core-clr is enabled */
6896 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
6898 dont_verify_stloc = TRUE;
6901 if (sig->is_inflated)
6902 generic_context = mono_method_get_context (method);
6903 else if (generic_container)
6904 generic_context = &generic_container->context;
6905 cfg->generic_context = generic_context;
6907 if (!cfg->generic_sharing_context)
6908 g_assert (!sig->has_type_parameters);
6910 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
6911 g_assert (method->is_inflated);
6912 g_assert (mono_method_get_context (method)->method_inst);
6914 if (method->is_inflated && mono_method_get_context (method)->method_inst)
6915 g_assert (sig->generic_param_count);
6917 if (cfg->method == method) {
6918 cfg->real_offset = 0;
6920 cfg->real_offset = inline_offset;
6923 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
6924 cfg->cil_offset_to_bb_len = header->code_size;
6926 cfg->current_method = method;
6928 if (cfg->verbose_level > 2)
6929 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
6931 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
6933 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
6934 for (n = 0; n < sig->param_count; ++n)
6935 param_types [n + sig->hasthis] = sig->params [n];
6936 cfg->arg_types = param_types;
6938 dont_inline = g_list_prepend (dont_inline, method);
6939 if (cfg->method == method) {
6941 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
6942 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
6945 NEW_BBLOCK (cfg, start_bblock);
6946 cfg->bb_entry = start_bblock;
6947 start_bblock->cil_code = NULL;
6948 start_bblock->cil_length = 0;
6949 #if defined(__native_client_codegen__)
6950 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
6951 ins->dreg = alloc_dreg (cfg, STACK_I4);
6952 MONO_ADD_INS (start_bblock, ins);
6956 NEW_BBLOCK (cfg, end_bblock);
6957 cfg->bb_exit = end_bblock;
6958 end_bblock->cil_code = NULL;
6959 end_bblock->cil_length = 0;
6960 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
6961 g_assert (cfg->num_bblocks == 2);
6963 arg_array = cfg->args;
6965 if (header->num_clauses) {
6966 cfg->spvars = g_hash_table_new (NULL, NULL);
6967 cfg->exvars = g_hash_table_new (NULL, NULL);
6969 /* handle exception clauses */
6970 for (i = 0; i < header->num_clauses; ++i) {
6971 MonoBasicBlock *try_bb;
6972 MonoExceptionClause *clause = &header->clauses [i];
6973 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
6974 try_bb->real_offset = clause->try_offset;
6975 try_bb->try_start = TRUE;
6976 try_bb->region = ((i + 1) << 8) | clause->flags;
6977 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
6978 tblock->real_offset = clause->handler_offset;
6979 tblock->flags |= BB_EXCEPTION_HANDLER;
6982 * Linking the try block with the EH block hinders inlining as we won't be able to
6983 * merge the bblocks from inlining and produce an artificial hole for no good reason.
6985 if (COMPILE_LLVM (cfg))
6986 link_bblock (cfg, try_bb, tblock);
6988 if (*(ip + clause->handler_offset) == CEE_POP)
6989 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
6991 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
6992 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
6993 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
6994 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
6995 MONO_ADD_INS (tblock, ins);
6997 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
6998 /* finally clauses already have a seq point */
6999 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7000 MONO_ADD_INS (tblock, ins);
7003 /* todo: is a fault block unsafe to optimize? */
7004 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7005 tblock->flags |= BB_EXCEPTION_UNSAFE;
7009 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7011 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7013 /* catch and filter blocks get the exception object on the stack */
7014 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7015 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7016 MonoInst *dummy_use;
7018 /* mostly like handle_stack_args (), but just sets the input args */
7019 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7020 tblock->in_scount = 1;
7021 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7022 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7025 * Add a dummy use for the exvar so its liveness info will be
7029 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7031 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7032 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7033 tblock->flags |= BB_EXCEPTION_HANDLER;
7034 tblock->real_offset = clause->data.filter_offset;
7035 tblock->in_scount = 1;
7036 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7037 /* The filter block shares the exvar with the handler block */
7038 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7039 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7040 MONO_ADD_INS (tblock, ins);
7044 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7045 clause->data.catch_class &&
7046 cfg->generic_sharing_context &&
7047 mono_class_check_context_used (clause->data.catch_class)) {
7049 * In shared generic code with catch
7050 * clauses containing type variables
7051 * the exception handling code has to
7052 * be able to get to the rgctx.
7053 * Therefore we have to make sure that
7054 * the vtable/mrgctx argument (for
7055 * static or generic methods) or the
7056 * "this" argument (for non-static
7057 * methods) are live.
7059 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7060 mini_method_get_context (method)->method_inst ||
7061 method->klass->valuetype) {
7062 mono_get_vtable_var (cfg);
7064 MonoInst *dummy_use;
7066 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7071 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7072 cfg->cbb = start_bblock;
7073 cfg->args = arg_array;
7074 mono_save_args (cfg, sig, inline_args);
7077 /* FIRST CODE BLOCK */
7078 NEW_BBLOCK (cfg, bblock);
7079 bblock->cil_code = ip;
7083 ADD_BBLOCK (cfg, bblock);
7085 if (cfg->method == method) {
7086 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7087 if (breakpoint_id) {
7088 MONO_INST_NEW (cfg, ins, OP_BREAK);
7089 MONO_ADD_INS (bblock, ins);
7093 if (mono_security_cas_enabled ())
7094 secman = mono_security_manager_get_methods ();
7096 security = (secman && mono_security_method_has_declsec (method));
7097 /* at this point having security doesn't mean we have any code to generate */
7098 if (security && (cfg->method == method)) {
7099 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7100 * And we do not want to enter the next section (with allocation) if we
7101 * have nothing to generate */
7102 security = mono_declsec_get_demands (method, &actions);
7105 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7106 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7108 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7109 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7110 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7112 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7113 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7117 mono_custom_attrs_free (custom);
7120 custom = mono_custom_attrs_from_class (wrapped->klass);
7121 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7125 mono_custom_attrs_free (custom);
7128 /* not a P/Invoke after all */
7133 /* we use a separate basic block for the initialization code */
7134 NEW_BBLOCK (cfg, init_localsbb);
7135 cfg->bb_init = init_localsbb;
7136 init_localsbb->real_offset = cfg->real_offset;
7137 start_bblock->next_bb = init_localsbb;
7138 init_localsbb->next_bb = bblock;
7139 link_bblock (cfg, start_bblock, init_localsbb);
7140 link_bblock (cfg, init_localsbb, bblock);
7142 cfg->cbb = init_localsbb;
7144 if (cfg->gsharedvt && cfg->method == method) {
7145 MonoGSharedVtMethodInfo *info;
7146 MonoInst *var, *locals_var;
7149 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7150 info->method = cfg->method;
7151 info->count_entries = 16;
7152 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7153 cfg->gsharedvt_info = info;
7155 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7156 /* prevent it from being register allocated */
7157 //var->flags |= MONO_INST_VOLATILE;
7158 cfg->gsharedvt_info_var = var;
7160 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7161 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7163 /* Allocate locals */
7164 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7165 /* prevent it from being register allocated */
7166 //locals_var->flags |= MONO_INST_VOLATILE;
7167 cfg->gsharedvt_locals_var = locals_var;
7169 dreg = alloc_ireg (cfg);
7170 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7172 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7173 ins->dreg = locals_var->dreg;
7175 MONO_ADD_INS (cfg->cbb, ins);
7176 cfg->gsharedvt_locals_var_ins = ins;
7178 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7181 ins->flags |= MONO_INST_INIT;
7185 /* at this point we know, if security is TRUE, that some code needs to be generated */
7186 if (security && (cfg->method == method)) {
7189 cfg->stat_cas_demand_generation++;
7191 if (actions.demand.blob) {
7192 /* Add code for SecurityAction.Demand */
7193 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7194 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7195 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7196 mono_emit_method_call (cfg, secman->demand, args, NULL);
7198 if (actions.noncasdemand.blob) {
7199 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7200 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7201 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7202 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7203 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7204 mono_emit_method_call (cfg, secman->demand, args, NULL);
7206 if (actions.demandchoice.blob) {
7207 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7208 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7209 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7210 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7211 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7215 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7217 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7220 if (mono_security_core_clr_enabled ()) {
7221 /* check if this is native code, e.g. an icall or a p/invoke */
7222 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7223 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7225 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7226 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7228 /* if this ia a native call then it can only be JITted from platform code */
7229 if ((icall || pinvk) && method->klass && method->klass->image) {
7230 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7231 MonoException *ex = icall ? mono_get_exception_security () :
7232 mono_get_exception_method_access ();
7233 emit_throw_exception (cfg, ex);
7240 CHECK_CFG_EXCEPTION;
7242 if (header->code_size == 0)
7245 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7250 if (cfg->method == method)
7251 mono_debug_init_method (cfg, bblock, breakpoint_id);
7253 for (n = 0; n < header->num_locals; ++n) {
7254 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7259 /* We force the vtable variable here for all shared methods
7260 for the possibility that they might show up in a stack
7261 trace where their exact instantiation is needed. */
7262 if (cfg->generic_sharing_context && method == cfg->method) {
7263 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7264 mini_method_get_context (method)->method_inst ||
7265 method->klass->valuetype) {
7266 mono_get_vtable_var (cfg);
7268 /* FIXME: Is there a better way to do this?
7269 We need the variable live for the duration
7270 of the whole method. */
7271 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7275 /* add a check for this != NULL to inlined methods */
7276 if (is_virtual_call) {
7279 NEW_ARGLOAD (cfg, arg_ins, 0);
7280 MONO_ADD_INS (cfg->cbb, arg_ins);
7281 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7284 skip_dead_blocks = !dont_verify;
7285 if (skip_dead_blocks) {
7286 original_bb = bb = mono_basic_block_split (method, &error);
7287 if (!mono_error_ok (&error)) {
7288 mono_error_cleanup (&error);
7294 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7295 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7298 start_new_bblock = 0;
7301 if (cfg->method == method)
7302 cfg->real_offset = ip - header->code;
7304 cfg->real_offset = inline_offset;
7309 if (start_new_bblock) {
7310 bblock->cil_length = ip - bblock->cil_code;
7311 if (start_new_bblock == 2) {
7312 g_assert (ip == tblock->cil_code);
7314 GET_BBLOCK (cfg, tblock, ip);
7316 bblock->next_bb = tblock;
7319 start_new_bblock = 0;
7320 for (i = 0; i < bblock->in_scount; ++i) {
7321 if (cfg->verbose_level > 3)
7322 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7323 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7327 g_slist_free (class_inits);
7330 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7331 link_bblock (cfg, bblock, tblock);
7332 if (sp != stack_start) {
7333 handle_stack_args (cfg, stack_start, sp - stack_start);
7335 CHECK_UNVERIFIABLE (cfg);
7337 bblock->next_bb = tblock;
7340 for (i = 0; i < bblock->in_scount; ++i) {
7341 if (cfg->verbose_level > 3)
7342 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7343 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7346 g_slist_free (class_inits);
7351 if (skip_dead_blocks) {
7352 int ip_offset = ip - header->code;
7354 if (ip_offset == bb->end)
7358 int op_size = mono_opcode_size (ip, end);
7359 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7361 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7363 if (ip_offset + op_size == bb->end) {
7364 MONO_INST_NEW (cfg, ins, OP_NOP);
7365 MONO_ADD_INS (bblock, ins);
7366 start_new_bblock = 1;
7374 * Sequence points are points where the debugger can place a breakpoint.
7375 * Currently, we generate these automatically at points where the IL
7378 if (seq_points && ((sp == stack_start) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7380 * Make methods interruptable at the beginning, and at the targets of
7381 * backward branches.
7382 * Also, do this at the start of every bblock in methods with clauses too,
7383 * to be able to handle instructions with inprecise control flow like
7385 * Backward branches are handled at the end of method-to-ir ().
7387 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7389 /* Avoid sequence points on empty IL like .volatile */
7390 // FIXME: Enable this
7391 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7392 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7393 if (sp != stack_start)
7394 ins->flags |= MONO_INST_NONEMPTY_STACK;
7395 MONO_ADD_INS (cfg->cbb, ins);
7398 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7401 bblock->real_offset = cfg->real_offset;
7403 if ((cfg->method == method) && cfg->coverage_info) {
7404 guint32 cil_offset = ip - header->code;
7405 cfg->coverage_info->data [cil_offset].cil_code = ip;
7407 /* TODO: Use an increment here */
7408 #if defined(TARGET_X86)
7409 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7410 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7412 MONO_ADD_INS (cfg->cbb, ins);
7414 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7415 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7419 if (cfg->verbose_level > 3)
7420 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7424 if (seq_points && !sym_seq_points && sp != stack_start) {
7426 * The C# compiler uses these nops to notify the JIT that it should
7427 * insert seq points.
7429 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7430 MONO_ADD_INS (cfg->cbb, ins);
7432 if (cfg->keep_cil_nops)
7433 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7435 MONO_INST_NEW (cfg, ins, OP_NOP);
7437 MONO_ADD_INS (bblock, ins);
7440 if (should_insert_brekpoint (cfg->method)) {
7441 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7443 MONO_INST_NEW (cfg, ins, OP_NOP);
7446 MONO_ADD_INS (bblock, ins);
7452 CHECK_STACK_OVF (1);
7453 n = (*ip)-CEE_LDARG_0;
7455 EMIT_NEW_ARGLOAD (cfg, ins, n);
7463 CHECK_STACK_OVF (1);
7464 n = (*ip)-CEE_LDLOC_0;
7466 EMIT_NEW_LOCLOAD (cfg, ins, n);
7475 n = (*ip)-CEE_STLOC_0;
7478 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7480 emit_stloc_ir (cfg, sp, header, n);
7487 CHECK_STACK_OVF (1);
7490 EMIT_NEW_ARGLOAD (cfg, ins, n);
7496 CHECK_STACK_OVF (1);
7499 NEW_ARGLOADA (cfg, ins, n);
7500 MONO_ADD_INS (cfg->cbb, ins);
7510 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7512 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7517 CHECK_STACK_OVF (1);
7520 EMIT_NEW_LOCLOAD (cfg, ins, n);
7524 case CEE_LDLOCA_S: {
7525 unsigned char *tmp_ip;
7527 CHECK_STACK_OVF (1);
7528 CHECK_LOCAL (ip [1]);
7530 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7536 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7545 CHECK_LOCAL (ip [1]);
7546 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7548 emit_stloc_ir (cfg, sp, header, ip [1]);
7553 CHECK_STACK_OVF (1);
7554 EMIT_NEW_PCONST (cfg, ins, NULL);
7555 ins->type = STACK_OBJ;
7560 CHECK_STACK_OVF (1);
7561 EMIT_NEW_ICONST (cfg, ins, -1);
7574 CHECK_STACK_OVF (1);
7575 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7581 CHECK_STACK_OVF (1);
7583 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7589 CHECK_STACK_OVF (1);
7590 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7596 CHECK_STACK_OVF (1);
7597 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7598 ins->type = STACK_I8;
7599 ins->dreg = alloc_dreg (cfg, STACK_I8);
7601 ins->inst_l = (gint64)read64 (ip);
7602 MONO_ADD_INS (bblock, ins);
7608 gboolean use_aotconst = FALSE;
7610 #ifdef TARGET_POWERPC
7611 /* FIXME: Clean this up */
7612 if (cfg->compile_aot)
7613 use_aotconst = TRUE;
7616 /* FIXME: we should really allocate this only late in the compilation process */
7617 f = mono_domain_alloc (cfg->domain, sizeof (float));
7619 CHECK_STACK_OVF (1);
7625 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7627 dreg = alloc_freg (cfg);
7628 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7629 ins->type = STACK_R8;
7631 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7632 ins->type = STACK_R8;
7633 ins->dreg = alloc_dreg (cfg, STACK_R8);
7635 MONO_ADD_INS (bblock, ins);
7645 gboolean use_aotconst = FALSE;
7647 #ifdef TARGET_POWERPC
7648 /* FIXME: Clean this up */
7649 if (cfg->compile_aot)
7650 use_aotconst = TRUE;
7653 /* FIXME: we should really allocate this only late in the compilation process */
7654 d = mono_domain_alloc (cfg->domain, sizeof (double));
7656 CHECK_STACK_OVF (1);
7662 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7664 dreg = alloc_freg (cfg);
7665 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7666 ins->type = STACK_R8;
7668 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7669 ins->type = STACK_R8;
7670 ins->dreg = alloc_dreg (cfg, STACK_R8);
7672 MONO_ADD_INS (bblock, ins);
7681 MonoInst *temp, *store;
7683 CHECK_STACK_OVF (1);
7687 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7688 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7690 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7693 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7706 if (sp [0]->type == STACK_R8)
7707 /* we need to pop the value from the x86 FP stack */
7708 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7714 INLINE_FAILURE ("jmp");
7715 GSHAREDVT_FAILURE (*ip);
7718 if (stack_start != sp)
7720 token = read32 (ip + 1);
7721 /* FIXME: check the signature matches */
7722 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7724 if (!cmethod || mono_loader_get_last_error ())
7727 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
7728 GENERIC_SHARING_FAILURE (CEE_JMP);
7730 if (mono_security_cas_enabled ())
7731 CHECK_CFG_EXCEPTION;
7733 if (ARCH_HAVE_OP_TAIL_CALL) {
7734 MonoMethodSignature *fsig = mono_method_signature (cmethod);
7737 /* Handle tail calls similarly to calls */
7738 n = fsig->param_count + fsig->hasthis;
7742 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
7743 call->method = cmethod;
7744 call->tail_call = TRUE;
7745 call->signature = mono_method_signature (cmethod);
7746 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
7747 call->inst.inst_p0 = cmethod;
7748 for (i = 0; i < n; ++i)
7749 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
7751 mono_arch_emit_call (cfg, call);
7752 MONO_ADD_INS (bblock, (MonoInst*)call);
7754 for (i = 0; i < num_args; ++i)
7755 /* Prevent arguments from being optimized away */
7756 arg_array [i]->flags |= MONO_INST_VOLATILE;
7758 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7759 ins = (MonoInst*)call;
7760 ins->inst_p0 = cmethod;
7761 MONO_ADD_INS (bblock, ins);
7765 start_new_bblock = 1;
7770 case CEE_CALLVIRT: {
7771 MonoInst *addr = NULL;
7772 MonoMethodSignature *fsig = NULL;
7774 int virtual = *ip == CEE_CALLVIRT;
7775 int calli = *ip == CEE_CALLI;
7776 gboolean pass_imt_from_rgctx = FALSE;
7777 MonoInst *imt_arg = NULL;
7778 MonoInst *keep_this_alive = NULL;
7779 gboolean pass_vtable = FALSE;
7780 gboolean pass_mrgctx = FALSE;
7781 MonoInst *vtable_arg = NULL;
7782 gboolean check_this = FALSE;
7783 gboolean supported_tail_call = FALSE;
7784 gboolean tail_call = FALSE;
7785 gboolean need_seq_point = FALSE;
7786 guint32 call_opcode = *ip;
7787 gboolean emit_widen = TRUE;
7788 gboolean push_res = TRUE;
7789 gboolean skip_ret = FALSE;
7790 gboolean delegate_invoke = FALSE;
7793 token = read32 (ip + 1);
7798 //GSHAREDVT_FAILURE (*ip);
7803 fsig = mini_get_signature (method, token, generic_context);
7804 n = fsig->param_count + fsig->hasthis;
7806 if (method->dynamic && fsig->pinvoke) {
7810 * This is a call through a function pointer using a pinvoke
7811 * signature. Have to create a wrapper and call that instead.
7812 * FIXME: This is very slow, need to create a wrapper at JIT time
7813 * instead based on the signature.
7815 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
7816 EMIT_NEW_PCONST (cfg, args [1], fsig);
7818 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
7821 MonoMethod *cil_method;
7823 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7824 cil_method = cmethod;
7826 if (constrained_call) {
7827 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7828 if (cfg->verbose_level > 2)
7829 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7830 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
7831 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
7832 cfg->generic_sharing_context)) {
7833 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
7836 if (cfg->verbose_level > 2)
7837 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
7839 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
7841 * This is needed since get_method_constrained can't find
7842 * the method in klass representing a type var.
7843 * The type var is guaranteed to be a reference type in this
7846 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
7847 g_assert (!cmethod->klass->valuetype);
7849 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
7854 if (!cmethod || mono_loader_get_last_error ())
7856 if (!dont_verify && !cfg->skip_visibility) {
7857 MonoMethod *target_method = cil_method;
7858 if (method->is_inflated) {
7859 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
7861 if (!mono_method_can_access_method (method_definition, target_method) &&
7862 !mono_method_can_access_method (method, cil_method))
7863 METHOD_ACCESS_FAILURE;
7866 if (mono_security_core_clr_enabled ())
7867 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
7869 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
7870 /* MS.NET seems to silently convert this to a callvirt */
7875 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
7876 * converts to a callvirt.
7878 * tests/bug-515884.il is an example of this behavior
7880 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
7881 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
7882 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
7886 if (!cmethod->klass->inited)
7887 if (!mono_class_init (cmethod->klass))
7888 TYPE_LOAD_ERROR (cmethod->klass);
7890 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
7891 mini_class_is_system_array (cmethod->klass)) {
7892 array_rank = cmethod->klass->rank;
7893 fsig = mono_method_signature (cmethod);
7895 fsig = mono_method_signature (cmethod);
7900 if (fsig->pinvoke) {
7901 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
7902 check_for_pending_exc, cfg->compile_aot);
7903 fsig = mono_method_signature (wrapper);
7904 } else if (constrained_call) {
7905 fsig = mono_method_signature (cmethod);
7907 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
7911 mono_save_token_info (cfg, image, token, cil_method);
7913 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7915 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
7916 * foo (bar (), baz ())
7917 * works correctly. MS does this also:
7918 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
7919 * The problem with this approach is that the debugger will stop after all calls returning a value,
7920 * even for simple cases, like:
7923 /* Special case a few common successor opcodes */
7924 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
7925 need_seq_point = TRUE;
7928 n = fsig->param_count + fsig->hasthis;
7930 /* Don't support calls made using type arguments for now */
7932 if (cfg->gsharedvt) {
7933 if (mini_is_gsharedvt_signature (cfg, fsig))
7934 GSHAREDVT_FAILURE (*ip);
7938 if (mono_security_cas_enabled ()) {
7939 if (check_linkdemand (cfg, method, cmethod))
7940 INLINE_FAILURE ("linkdemand");
7941 CHECK_CFG_EXCEPTION;
7944 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
7945 g_assert_not_reached ();
7948 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
7951 if (!cfg->generic_sharing_context && cmethod)
7952 g_assert (!mono_method_check_context_used (cmethod));
7956 //g_assert (!virtual || fsig->hasthis);
7960 if (constrained_call) {
7961 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
7963 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
7965 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
7966 /* The 'Own method' case below */
7967 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
7968 /* 'The type parameter is instantiated as a reference type' case below. */
7969 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
7970 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
7971 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
7972 MonoInst *args [16];
7975 * This case handles calls to
7976 * - object:ToString()/Equals()/GetHashCode(),
7977 * - System.IComparable<T>:CompareTo()
7978 * - System.IEquatable<T>:Equals ()
7979 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
7983 if (mono_method_check_context_used (cmethod))
7984 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
7986 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
7987 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
7989 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
7990 if (fsig->hasthis && fsig->param_count) {
7991 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
7992 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
7993 ins->dreg = alloc_preg (cfg);
7994 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
7995 MONO_ADD_INS (cfg->cbb, ins);
7998 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8001 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8003 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8004 addr_reg = ins->dreg;
8005 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8007 EMIT_NEW_ICONST (cfg, args [3], 0);
8008 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8011 EMIT_NEW_ICONST (cfg, args [3], 0);
8012 EMIT_NEW_ICONST (cfg, args [4], 0);
8014 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8017 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8018 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8019 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret)) {
8023 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8024 MONO_ADD_INS (cfg->cbb, add);
8026 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8027 MONO_ADD_INS (cfg->cbb, ins);
8028 /* ins represents the call result */
8033 GSHAREDVT_FAILURE (*ip);
8037 * We have the `constrained.' prefix opcode.
8039 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8041 * The type parameter is instantiated as a valuetype,
8042 * but that type doesn't override the method we're
8043 * calling, so we need to box `this'.
8045 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8046 ins->klass = constrained_call;
8047 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8048 CHECK_CFG_EXCEPTION;
8049 } else if (!constrained_call->valuetype) {
8050 int dreg = alloc_ireg_ref (cfg);
8053 * The type parameter is instantiated as a reference
8054 * type. We have a managed pointer on the stack, so
8055 * we need to dereference it here.
8057 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8058 ins->type = STACK_OBJ;
8061 if (cmethod->klass->valuetype) {
8064 /* Interface method */
8067 mono_class_setup_vtable (constrained_call);
8068 CHECK_TYPELOAD (constrained_call);
8069 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8071 TYPE_LOAD_ERROR (constrained_call);
8072 slot = mono_method_get_vtable_slot (cmethod);
8074 TYPE_LOAD_ERROR (cmethod->klass);
8075 cmethod = constrained_call->vtable [ioffset + slot];
8077 if (cmethod->klass == mono_defaults.enum_class) {
8078 /* Enum implements some interfaces, so treat this as the first case */
8079 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8080 ins->klass = constrained_call;
8081 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8082 CHECK_CFG_EXCEPTION;
8087 constrained_call = NULL;
8090 if (!calli && check_call_signature (cfg, fsig, sp))
8093 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8094 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8095 delegate_invoke = TRUE;
8098 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8100 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8101 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8109 * If the callee is a shared method, then its static cctor
8110 * might not get called after the call was patched.
8112 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8113 emit_generic_class_init (cfg, cmethod->klass);
8114 CHECK_TYPELOAD (cmethod->klass);
8118 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8120 if (cfg->generic_sharing_context && cmethod) {
8121 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8123 context_used = mini_method_check_context_used (cfg, cmethod);
8125 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8126 /* Generic method interface
8127 calls are resolved via a
8128 helper function and don't
8130 if (!cmethod_context || !cmethod_context->method_inst)
8131 pass_imt_from_rgctx = TRUE;
8135 * If a shared method calls another
8136 * shared method then the caller must
8137 * have a generic sharing context
8138 * because the magic trampoline
8139 * requires it. FIXME: We shouldn't
8140 * have to force the vtable/mrgctx
8141 * variable here. Instead there
8142 * should be a flag in the cfg to
8143 * request a generic sharing context.
8146 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8147 mono_get_vtable_var (cfg);
8152 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8154 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8156 CHECK_TYPELOAD (cmethod->klass);
8157 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8162 g_assert (!vtable_arg);
8164 if (!cfg->compile_aot) {
8166 * emit_get_rgctx_method () calls mono_class_vtable () so check
8167 * for type load errors before.
8169 mono_class_setup_vtable (cmethod->klass);
8170 CHECK_TYPELOAD (cmethod->klass);
8173 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8175 /* !marshalbyref is needed to properly handle generic methods + remoting */
8176 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8177 MONO_METHOD_IS_FINAL (cmethod)) &&
8178 !mono_class_is_marshalbyref (cmethod->klass)) {
8185 if (pass_imt_from_rgctx) {
8186 g_assert (!pass_vtable);
8189 imt_arg = emit_get_rgctx_method (cfg, context_used,
8190 cmethod, MONO_RGCTX_INFO_METHOD);
8194 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8196 /* Calling virtual generic methods */
8197 if (cmethod && virtual &&
8198 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8199 !(MONO_METHOD_IS_FINAL (cmethod) &&
8200 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8201 fsig->generic_param_count &&
8202 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8203 MonoInst *this_temp, *this_arg_temp, *store;
8204 MonoInst *iargs [4];
8205 gboolean use_imt = FALSE;
8207 g_assert (fsig->is_inflated);
8209 /* Prevent inlining of methods that contain indirect calls */
8210 INLINE_FAILURE ("virtual generic call");
8212 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8213 GSHAREDVT_FAILURE (*ip);
8215 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8216 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8221 g_assert (!imt_arg);
8223 g_assert (cmethod->is_inflated);
8224 imt_arg = emit_get_rgctx_method (cfg, context_used,
8225 cmethod, MONO_RGCTX_INFO_METHOD);
8226 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8228 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8229 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8230 MONO_ADD_INS (bblock, store);
8232 /* FIXME: This should be a managed pointer */
8233 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8235 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8236 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8237 cmethod, MONO_RGCTX_INFO_METHOD);
8238 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8239 addr = mono_emit_jit_icall (cfg,
8240 mono_helper_compile_generic_method, iargs);
8242 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8244 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8251 * Implement a workaround for the inherent races involved in locking:
8257 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8258 * try block, the Exit () won't be executed, see:
8259 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8260 * To work around this, we extend such try blocks to include the last x bytes
8261 * of the Monitor.Enter () call.
8263 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8264 MonoBasicBlock *tbb;
8266 GET_BBLOCK (cfg, tbb, ip + 5);
8268 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8269 * from Monitor.Enter like ArgumentNullException.
8271 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8272 /* Mark this bblock as needing to be extended */
8273 tbb->extend_try_block = TRUE;
8277 /* Conversion to a JIT intrinsic */
8278 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8280 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8281 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8288 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8289 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8290 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8291 !g_list_find (dont_inline, cmethod)) {
8293 gboolean always = FALSE;
8295 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8296 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8297 /* Prevent inlining of methods that call wrappers */
8298 INLINE_FAILURE ("wrapper call");
8299 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8303 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always);
8305 cfg->real_offset += 5;
8308 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8309 /* *sp is already set by inline_method */
8314 inline_costs += costs;
8320 /* Tail recursion elimination */
8321 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8322 gboolean has_vtargs = FALSE;
8325 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8326 INLINE_FAILURE ("tail call");
8328 /* keep it simple */
8329 for (i = fsig->param_count - 1; i >= 0; i--) {
8330 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8335 for (i = 0; i < n; ++i)
8336 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8337 MONO_INST_NEW (cfg, ins, OP_BR);
8338 MONO_ADD_INS (bblock, ins);
8339 tblock = start_bblock->out_bb [0];
8340 link_bblock (cfg, bblock, tblock);
8341 ins->inst_target_bb = tblock;
8342 start_new_bblock = 1;
8344 /* skip the CEE_RET, too */
8345 if (ip_in_bb (cfg, bblock, ip + 5))
8352 inline_costs += 10 * num_calls++;
8355 * Making generic calls out of gsharedvt methods.
8357 if (cmethod && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8358 MonoRgctxInfoType info_type;
8361 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8362 //GSHAREDVT_FAILURE (*ip);
8363 // disable for possible remoting calls
8364 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8365 GSHAREDVT_FAILURE (*ip);
8366 if (fsig->generic_param_count) {
8367 /* virtual generic call */
8368 g_assert (mono_use_imt);
8369 g_assert (!imt_arg);
8370 /* Same as the virtual generic case above */
8371 imt_arg = emit_get_rgctx_method (cfg, context_used,
8372 cmethod, MONO_RGCTX_INFO_METHOD);
8373 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8378 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8379 /* test_0_multi_dim_arrays () in gshared.cs */
8380 GSHAREDVT_FAILURE (*ip);
8382 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8383 keep_this_alive = sp [0];
8385 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8386 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8388 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8389 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8391 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8393 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8395 * We pass the address to the gsharedvt trampoline in the rgctx reg
8397 MonoInst *callee = addr;
8399 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8401 GSHAREDVT_FAILURE (*ip);
8403 addr = emit_get_rgctx_sig (cfg, context_used,
8404 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8405 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8409 /* Generic sharing */
8410 /* FIXME: only do this for generic methods if
8411 they are not shared! */
8412 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8413 (!mono_method_is_generic_sharable (cmethod, TRUE) ||
8414 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8415 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8416 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8417 INLINE_FAILURE ("gshared");
8419 g_assert (cfg->generic_sharing_context && cmethod);
8423 * We are compiling a call to a
8424 * generic method from shared code,
8425 * which means that we have to look up
8426 * the method in the rgctx and do an
8430 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8432 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8433 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8437 /* Indirect calls */
8439 if (call_opcode == CEE_CALL)
8440 g_assert (context_used);
8441 else if (call_opcode == CEE_CALLI)
8442 g_assert (!vtable_arg);
8444 /* FIXME: what the hell is this??? */
8445 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8446 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8448 /* Prevent inlining of methods with indirect calls */
8449 INLINE_FAILURE ("indirect call");
8451 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8456 * Instead of emitting an indirect call, emit a direct call
8457 * with the contents of the aotconst as the patch info.
8459 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8460 info_type = addr->inst_c1;
8461 info_data = addr->inst_p0;
8463 info_type = addr->inst_right->inst_c1;
8464 info_data = addr->inst_right->inst_left;
8467 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8468 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8473 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8481 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8482 MonoInst *val = sp [fsig->param_count];
8484 if (val->type == STACK_OBJ) {
8485 MonoInst *iargs [2];
8490 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8493 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8494 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8495 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8496 emit_write_barrier (cfg, addr, val);
8497 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8498 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8500 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8501 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8502 if (!cmethod->klass->element_class->valuetype && !readonly)
8503 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8504 CHECK_TYPELOAD (cmethod->klass);
8507 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8510 g_assert_not_reached ();
8517 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8521 /* Tail prefix / tail call optimization */
8523 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8524 /* FIXME: runtime generic context pointer for jumps? */
8525 /* FIXME: handle this for generic sharing eventually */
8526 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8527 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8528 supported_tail_call = TRUE;
8530 if (supported_tail_call) {
8533 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8534 INLINE_FAILURE ("tail call");
8536 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8538 if (ARCH_HAVE_OP_TAIL_CALL) {
8539 /* Handle tail calls similarly to normal calls */
8542 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8543 call->tail_call = TRUE;
8544 call->method = cmethod;
8545 call->signature = mono_method_signature (cmethod);
8548 * We implement tail calls by storing the actual arguments into the
8549 * argument variables, then emitting a CEE_JMP.
8551 for (i = 0; i < n; ++i) {
8552 /* Prevent argument from being register allocated */
8553 arg_array [i]->flags |= MONO_INST_VOLATILE;
8554 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8556 ins = (MonoInst*)call;
8557 ins->inst_p0 = cmethod;
8558 ins->inst_p1 = arg_array [0];
8559 MONO_ADD_INS (bblock, ins);
8560 link_bblock (cfg, bblock, end_bblock);
8561 start_new_bblock = 1;
8563 // FIXME: Eliminate unreachable epilogs
8566 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8567 * only reachable from this call.
8569 GET_BBLOCK (cfg, tblock, ip + 5);
8570 if (tblock == bblock || tblock->in_count == 0)
8579 * Synchronized wrappers.
8580 * Its hard to determine where to replace a method with its synchronized
8581 * wrapper without causing an infinite recursion. The current solution is
8582 * to add the synchronized wrapper in the trampolines, and to
8583 * change the called method to a dummy wrapper, and resolve that wrapper
8584 * to the real method in mono_jit_compile_method ().
8586 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8587 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8588 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8589 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8593 INLINE_FAILURE ("call");
8594 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8595 imt_arg, vtable_arg);
8598 link_bblock (cfg, bblock, end_bblock);
8599 start_new_bblock = 1;
8601 // FIXME: Eliminate unreachable epilogs
8604 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8605 * only reachable from this call.
8607 GET_BBLOCK (cfg, tblock, ip + 5);
8608 if (tblock == bblock || tblock->in_count == 0)
8615 /* End of call, INS should contain the result of the call, if any */
8617 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8620 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8625 if (keep_this_alive) {
8626 MonoInst *dummy_use;
8628 /* See mono_emit_method_call_full () */
8629 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8632 CHECK_CFG_EXCEPTION;
8636 g_assert (*ip == CEE_RET);
8640 constrained_call = NULL;
8642 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8646 if (cfg->method != method) {
8647 /* return from inlined method */
8649 * If in_count == 0, that means the ret is unreachable due to
8650 * being preceeded by a throw. In that case, inline_method () will
8651 * handle setting the return value
8652 * (test case: test_0_inline_throw ()).
8654 if (return_var && cfg->cbb->in_count) {
8655 MonoType *ret_type = mono_method_signature (method)->ret;
8661 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8664 //g_assert (returnvar != -1);
8665 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8666 cfg->ret_var_set = TRUE;
8669 if (cfg->lmf_var && cfg->cbb->in_count)
8673 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8675 if (seq_points && !sym_seq_points) {
8677 * Place a seq point here too even through the IL stack is not
8678 * empty, so a step over on
8681 * will work correctly.
8683 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8684 MONO_ADD_INS (cfg->cbb, ins);
8687 g_assert (!return_var);
8691 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8694 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8697 if (!cfg->vret_addr) {
8700 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8702 EMIT_NEW_RETLOADA (cfg, ret_addr);
8704 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8705 ins->klass = mono_class_from_mono_type (ret_type);
8708 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
8709 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
8710 MonoInst *iargs [1];
8714 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
8715 mono_arch_emit_setret (cfg, method, conv);
8717 mono_arch_emit_setret (cfg, method, *sp);
8720 mono_arch_emit_setret (cfg, method, *sp);
8725 if (sp != stack_start)
8727 MONO_INST_NEW (cfg, ins, OP_BR);
8729 ins->inst_target_bb = end_bblock;
8730 MONO_ADD_INS (bblock, ins);
8731 link_bblock (cfg, bblock, end_bblock);
8732 start_new_bblock = 1;
8736 MONO_INST_NEW (cfg, ins, OP_BR);
8738 target = ip + 1 + (signed char)(*ip);
8740 GET_BBLOCK (cfg, tblock, target);
8741 link_bblock (cfg, bblock, tblock);
8742 ins->inst_target_bb = tblock;
8743 if (sp != stack_start) {
8744 handle_stack_args (cfg, stack_start, sp - stack_start);
8746 CHECK_UNVERIFIABLE (cfg);
8748 MONO_ADD_INS (bblock, ins);
8749 start_new_bblock = 1;
8750 inline_costs += BRANCH_COST;
8764 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
8766 target = ip + 1 + *(signed char*)ip;
8772 inline_costs += BRANCH_COST;
8776 MONO_INST_NEW (cfg, ins, OP_BR);
8779 target = ip + 4 + (gint32)read32(ip);
8781 GET_BBLOCK (cfg, tblock, target);
8782 link_bblock (cfg, bblock, tblock);
8783 ins->inst_target_bb = tblock;
8784 if (sp != stack_start) {
8785 handle_stack_args (cfg, stack_start, sp - stack_start);
8787 CHECK_UNVERIFIABLE (cfg);
8790 MONO_ADD_INS (bblock, ins);
8792 start_new_bblock = 1;
8793 inline_costs += BRANCH_COST;
8800 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
8801 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
8802 guint32 opsize = is_short ? 1 : 4;
8804 CHECK_OPSIZE (opsize);
8806 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
8809 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
8814 GET_BBLOCK (cfg, tblock, target);
8815 link_bblock (cfg, bblock, tblock);
8816 GET_BBLOCK (cfg, tblock, ip);
8817 link_bblock (cfg, bblock, tblock);
8819 if (sp != stack_start) {
8820 handle_stack_args (cfg, stack_start, sp - stack_start);
8821 CHECK_UNVERIFIABLE (cfg);
8824 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
8825 cmp->sreg1 = sp [0]->dreg;
8826 type_from_op (cmp, sp [0], NULL);
8829 #if SIZEOF_REGISTER == 4
8830 if (cmp->opcode == OP_LCOMPARE_IMM) {
8831 /* Convert it to OP_LCOMPARE */
8832 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8833 ins->type = STACK_I8;
8834 ins->dreg = alloc_dreg (cfg, STACK_I8);
8836 MONO_ADD_INS (bblock, ins);
8837 cmp->opcode = OP_LCOMPARE;
8838 cmp->sreg2 = ins->dreg;
8841 MONO_ADD_INS (bblock, cmp);
8843 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
8844 type_from_op (ins, sp [0], NULL);
8845 MONO_ADD_INS (bblock, ins);
8846 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
8847 GET_BBLOCK (cfg, tblock, target);
8848 ins->inst_true_bb = tblock;
8849 GET_BBLOCK (cfg, tblock, ip);
8850 ins->inst_false_bb = tblock;
8851 start_new_bblock = 2;
8854 inline_costs += BRANCH_COST;
8869 MONO_INST_NEW (cfg, ins, *ip);
8871 target = ip + 4 + (gint32)read32(ip);
8877 inline_costs += BRANCH_COST;
8881 MonoBasicBlock **targets;
8882 MonoBasicBlock *default_bblock;
8883 MonoJumpInfoBBTable *table;
8884 int offset_reg = alloc_preg (cfg);
8885 int target_reg = alloc_preg (cfg);
8886 int table_reg = alloc_preg (cfg);
8887 int sum_reg = alloc_preg (cfg);
8888 gboolean use_op_switch;
8892 n = read32 (ip + 1);
8895 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
8899 CHECK_OPSIZE (n * sizeof (guint32));
8900 target = ip + n * sizeof (guint32);
8902 GET_BBLOCK (cfg, default_bblock, target);
8903 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
8905 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
8906 for (i = 0; i < n; ++i) {
8907 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
8908 targets [i] = tblock;
8909 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
8913 if (sp != stack_start) {
8915 * Link the current bb with the targets as well, so handle_stack_args
8916 * will set their in_stack correctly.
8918 link_bblock (cfg, bblock, default_bblock);
8919 for (i = 0; i < n; ++i)
8920 link_bblock (cfg, bblock, targets [i]);
8922 handle_stack_args (cfg, stack_start, sp - stack_start);
8924 CHECK_UNVERIFIABLE (cfg);
8927 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
8928 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
8931 for (i = 0; i < n; ++i)
8932 link_bblock (cfg, bblock, targets [i]);
8934 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
8935 table->table = targets;
8936 table->table_size = n;
8938 use_op_switch = FALSE;
8940 /* ARM implements SWITCH statements differently */
8941 /* FIXME: Make it use the generic implementation */
8942 if (!cfg->compile_aot)
8943 use_op_switch = TRUE;
8946 if (COMPILE_LLVM (cfg))
8947 use_op_switch = TRUE;
8949 cfg->cbb->has_jump_table = 1;
8951 if (use_op_switch) {
8952 MONO_INST_NEW (cfg, ins, OP_SWITCH);
8953 ins->sreg1 = src1->dreg;
8954 ins->inst_p0 = table;
8955 ins->inst_many_bb = targets;
8956 ins->klass = GUINT_TO_POINTER (n);
8957 MONO_ADD_INS (cfg->cbb, ins);
8959 if (sizeof (gpointer) == 8)
8960 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
8962 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
8964 #if SIZEOF_REGISTER == 8
8965 /* The upper word might not be zero, and we add it to a 64 bit address later */
8966 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
8969 if (cfg->compile_aot) {
8970 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
8972 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
8973 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
8974 ins->inst_p0 = table;
8975 ins->dreg = table_reg;
8976 MONO_ADD_INS (cfg->cbb, ins);
8979 /* FIXME: Use load_memindex */
8980 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
8981 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
8982 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
8984 start_new_bblock = 1;
8985 inline_costs += (BRANCH_COST * 2);
9005 dreg = alloc_freg (cfg);
9008 dreg = alloc_lreg (cfg);
9011 dreg = alloc_ireg_ref (cfg);
9014 dreg = alloc_preg (cfg);
9017 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9018 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9019 ins->flags |= ins_flag;
9020 MONO_ADD_INS (bblock, ins);
9022 if (ins_flag & MONO_INST_VOLATILE) {
9023 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9024 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9025 emit_memory_barrier (cfg, FullBarrier);
9041 if (ins_flag & MONO_INST_VOLATILE) {
9042 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9043 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9044 emit_memory_barrier (cfg, FullBarrier);
9047 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9048 ins->flags |= ins_flag;
9051 MONO_ADD_INS (bblock, ins);
9053 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9054 emit_write_barrier (cfg, sp [0], sp [1]);
9063 MONO_INST_NEW (cfg, ins, (*ip));
9065 ins->sreg1 = sp [0]->dreg;
9066 ins->sreg2 = sp [1]->dreg;
9067 type_from_op (ins, sp [0], sp [1]);
9069 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9071 /* Use the immediate opcodes if possible */
9072 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9073 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9074 if (imm_opcode != -1) {
9075 ins->opcode = imm_opcode;
9076 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9079 sp [1]->opcode = OP_NOP;
9083 MONO_ADD_INS ((cfg)->cbb, (ins));
9085 *sp++ = mono_decompose_opcode (cfg, ins);
9102 MONO_INST_NEW (cfg, ins, (*ip));
9104 ins->sreg1 = sp [0]->dreg;
9105 ins->sreg2 = sp [1]->dreg;
9106 type_from_op (ins, sp [0], sp [1]);
9108 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9109 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9111 /* FIXME: Pass opcode to is_inst_imm */
9113 /* Use the immediate opcodes if possible */
9114 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9117 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9118 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9119 /* Keep emulated opcodes which are optimized away later */
9120 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9121 imm_opcode = mono_op_to_op_imm (ins->opcode);
9124 if (imm_opcode != -1) {
9125 ins->opcode = imm_opcode;
9126 if (sp [1]->opcode == OP_I8CONST) {
9127 #if SIZEOF_REGISTER == 8
9128 ins->inst_imm = sp [1]->inst_l;
9130 ins->inst_ls_word = sp [1]->inst_ls_word;
9131 ins->inst_ms_word = sp [1]->inst_ms_word;
9135 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9138 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9139 if (sp [1]->next == NULL)
9140 sp [1]->opcode = OP_NOP;
9143 MONO_ADD_INS ((cfg)->cbb, (ins));
9145 *sp++ = mono_decompose_opcode (cfg, ins);
9158 case CEE_CONV_OVF_I8:
9159 case CEE_CONV_OVF_U8:
9163 /* Special case this earlier so we have long constants in the IR */
9164 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9165 int data = sp [-1]->inst_c0;
9166 sp [-1]->opcode = OP_I8CONST;
9167 sp [-1]->type = STACK_I8;
9168 #if SIZEOF_REGISTER == 8
9169 if ((*ip) == CEE_CONV_U8)
9170 sp [-1]->inst_c0 = (guint32)data;
9172 sp [-1]->inst_c0 = data;
9174 sp [-1]->inst_ls_word = data;
9175 if ((*ip) == CEE_CONV_U8)
9176 sp [-1]->inst_ms_word = 0;
9178 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9180 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9187 case CEE_CONV_OVF_I4:
9188 case CEE_CONV_OVF_I1:
9189 case CEE_CONV_OVF_I2:
9190 case CEE_CONV_OVF_I:
9191 case CEE_CONV_OVF_U:
9194 if (sp [-1]->type == STACK_R8) {
9195 ADD_UNOP (CEE_CONV_OVF_I8);
9202 case CEE_CONV_OVF_U1:
9203 case CEE_CONV_OVF_U2:
9204 case CEE_CONV_OVF_U4:
9207 if (sp [-1]->type == STACK_R8) {
9208 ADD_UNOP (CEE_CONV_OVF_U8);
9215 case CEE_CONV_OVF_I1_UN:
9216 case CEE_CONV_OVF_I2_UN:
9217 case CEE_CONV_OVF_I4_UN:
9218 case CEE_CONV_OVF_I8_UN:
9219 case CEE_CONV_OVF_U1_UN:
9220 case CEE_CONV_OVF_U2_UN:
9221 case CEE_CONV_OVF_U4_UN:
9222 case CEE_CONV_OVF_U8_UN:
9223 case CEE_CONV_OVF_I_UN:
9224 case CEE_CONV_OVF_U_UN:
9231 CHECK_CFG_EXCEPTION;
9235 case CEE_ADD_OVF_UN:
9237 case CEE_MUL_OVF_UN:
9239 case CEE_SUB_OVF_UN:
9245 GSHAREDVT_FAILURE (*ip);
9248 token = read32 (ip + 1);
9249 klass = mini_get_class (method, token, generic_context);
9250 CHECK_TYPELOAD (klass);
9252 if (generic_class_is_reference_type (cfg, klass)) {
9253 MonoInst *store, *load;
9254 int dreg = alloc_ireg_ref (cfg);
9256 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9257 load->flags |= ins_flag;
9258 MONO_ADD_INS (cfg->cbb, load);
9260 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9261 store->flags |= ins_flag;
9262 MONO_ADD_INS (cfg->cbb, store);
9264 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9265 emit_write_barrier (cfg, sp [0], sp [1]);
9267 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9279 token = read32 (ip + 1);
9280 klass = mini_get_class (method, token, generic_context);
9281 CHECK_TYPELOAD (klass);
9283 /* Optimize the common ldobj+stloc combination */
9293 loc_index = ip [5] - CEE_STLOC_0;
9300 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9301 CHECK_LOCAL (loc_index);
9303 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9304 ins->dreg = cfg->locals [loc_index]->dreg;
9305 ins->flags |= ins_flag;
9308 if (ins_flag & MONO_INST_VOLATILE) {
9309 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9310 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9311 emit_memory_barrier (cfg, FullBarrier);
9317 /* Optimize the ldobj+stobj combination */
9318 /* The reference case ends up being a load+store anyway */
9319 /* Skip this if the operation is volatile. */
9320 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9325 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9332 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9333 ins->flags |= ins_flag;
9336 if (ins_flag & MONO_INST_VOLATILE) {
9337 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9338 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9339 emit_memory_barrier (cfg, FullBarrier);
9348 CHECK_STACK_OVF (1);
9350 n = read32 (ip + 1);
9352 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9353 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9354 ins->type = STACK_OBJ;
9357 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9358 MonoInst *iargs [1];
9360 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9361 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9363 if (cfg->opt & MONO_OPT_SHARED) {
9364 MonoInst *iargs [3];
9366 if (cfg->compile_aot) {
9367 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9369 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9370 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9371 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9372 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9373 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9375 if (bblock->out_of_line) {
9376 MonoInst *iargs [2];
9378 if (image == mono_defaults.corlib) {
9380 * Avoid relocations in AOT and save some space by using a
9381 * version of helper_ldstr specialized to mscorlib.
9383 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9384 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9386 /* Avoid creating the string object */
9387 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9388 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9389 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9393 if (cfg->compile_aot) {
9394 NEW_LDSTRCONST (cfg, ins, image, n);
9396 MONO_ADD_INS (bblock, ins);
9399 NEW_PCONST (cfg, ins, NULL);
9400 ins->type = STACK_OBJ;
9401 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9403 OUT_OF_MEMORY_FAILURE;
9406 MONO_ADD_INS (bblock, ins);
9415 MonoInst *iargs [2];
9416 MonoMethodSignature *fsig;
9419 MonoInst *vtable_arg = NULL;
9422 token = read32 (ip + 1);
9423 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9424 if (!cmethod || mono_loader_get_last_error ())
9426 fsig = mono_method_get_signature (cmethod, image, token);
9430 mono_save_token_info (cfg, image, token, cmethod);
9432 if (!mono_class_init (cmethod->klass))
9433 TYPE_LOAD_ERROR (cmethod->klass);
9435 context_used = mini_method_check_context_used (cfg, cmethod);
9437 if (mono_security_cas_enabled ()) {
9438 if (check_linkdemand (cfg, method, cmethod))
9439 INLINE_FAILURE ("linkdemand");
9440 CHECK_CFG_EXCEPTION;
9441 } else if (mono_security_core_clr_enabled ()) {
9442 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9445 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9446 emit_generic_class_init (cfg, cmethod->klass);
9447 CHECK_TYPELOAD (cmethod->klass);
9451 if (cfg->gsharedvt) {
9452 if (mini_is_gsharedvt_variable_signature (sig))
9453 GSHAREDVT_FAILURE (*ip);
9457 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
9458 mono_method_is_generic_sharable (cmethod, TRUE)) {
9459 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
9460 mono_class_vtable (cfg->domain, cmethod->klass);
9461 CHECK_TYPELOAD (cmethod->klass);
9463 vtable_arg = emit_get_rgctx_method (cfg, context_used,
9464 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9467 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
9468 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9470 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9472 CHECK_TYPELOAD (cmethod->klass);
9473 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9478 n = fsig->param_count;
9482 * Generate smaller code for the common newobj <exception> instruction in
9483 * argument checking code.
9485 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9486 is_exception_class (cmethod->klass) && n <= 2 &&
9487 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9488 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9489 MonoInst *iargs [3];
9491 g_assert (!vtable_arg);
9495 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9498 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9502 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9507 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9510 g_assert_not_reached ();
9518 /* move the args to allow room for 'this' in the first position */
9524 /* check_call_signature () requires sp[0] to be set */
9525 this_ins.type = STACK_OBJ;
9527 if (check_call_signature (cfg, fsig, sp))
9532 if (mini_class_is_system_array (cmethod->klass)) {
9533 g_assert (!vtable_arg);
9535 *sp = emit_get_rgctx_method (cfg, context_used,
9536 cmethod, MONO_RGCTX_INFO_METHOD);
9538 /* Avoid varargs in the common case */
9539 if (fsig->param_count == 1)
9540 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9541 else if (fsig->param_count == 2)
9542 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9543 else if (fsig->param_count == 3)
9544 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9545 else if (fsig->param_count == 4)
9546 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9548 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9549 } else if (cmethod->string_ctor) {
9550 g_assert (!context_used);
9551 g_assert (!vtable_arg);
9552 /* we simply pass a null pointer */
9553 EMIT_NEW_PCONST (cfg, *sp, NULL);
9554 /* now call the string ctor */
9555 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9557 MonoInst* callvirt_this_arg = NULL;
9559 if (cmethod->klass->valuetype) {
9560 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9561 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9562 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9567 * The code generated by mini_emit_virtual_call () expects
9568 * iargs [0] to be a boxed instance, but luckily the vcall
9569 * will be transformed into a normal call there.
9571 } else if (context_used) {
9572 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9575 MonoVTable *vtable = NULL;
9577 if (!cfg->compile_aot)
9578 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9579 CHECK_TYPELOAD (cmethod->klass);
9582 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9583 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9584 * As a workaround, we call class cctors before allocating objects.
9586 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9587 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9588 if (cfg->verbose_level > 2)
9589 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9590 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9593 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9596 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9599 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9601 /* Now call the actual ctor */
9602 /* Avoid virtual calls to ctors if possible */
9603 if (mono_class_is_marshalbyref (cmethod->klass))
9604 callvirt_this_arg = sp [0];
9607 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
9608 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9609 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9614 CHECK_CFG_EXCEPTION;
9615 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
9616 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
9617 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
9618 !g_list_find (dont_inline, cmethod)) {
9621 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
9622 cfg->real_offset += 5;
9625 inline_costs += costs - 5;
9627 INLINE_FAILURE ("inline failure");
9628 // FIXME-VT: Clean this up
9629 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9630 GSHAREDVT_FAILURE(*ip);
9631 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
9633 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
9636 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
9637 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
9638 } else if (context_used &&
9639 ((!mono_method_is_generic_sharable (cmethod, TRUE) ||
9640 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
9641 MonoInst *cmethod_addr;
9643 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
9645 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
9646 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9648 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
9650 INLINE_FAILURE ("ctor call");
9651 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
9652 callvirt_this_arg, NULL, vtable_arg);
9656 if (alloc == NULL) {
9658 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9659 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9673 token = read32 (ip + 1);
9674 klass = mini_get_class (method, token, generic_context);
9675 CHECK_TYPELOAD (klass);
9676 if (sp [0]->type != STACK_OBJ)
9679 context_used = mini_class_check_context_used (cfg, klass);
9681 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9688 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9691 if (cfg->compile_aot)
9692 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9694 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9696 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
9698 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9701 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9702 MonoMethod *mono_castclass;
9703 MonoInst *iargs [1];
9706 mono_castclass = mono_marshal_get_castclass (klass);
9709 save_cast_details (cfg, klass, sp [0]->dreg, TRUE, &bblock);
9710 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9711 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9712 reset_cast_details (cfg);
9713 CHECK_CFG_EXCEPTION;
9714 g_assert (costs > 0);
9717 cfg->real_offset += 5;
9722 inline_costs += costs;
9725 ins = handle_castclass (cfg, klass, *sp, context_used);
9726 CHECK_CFG_EXCEPTION;
9736 token = read32 (ip + 1);
9737 klass = mini_get_class (method, token, generic_context);
9738 CHECK_TYPELOAD (klass);
9739 if (sp [0]->type != STACK_OBJ)
9742 context_used = mini_class_check_context_used (cfg, klass);
9744 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9745 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9752 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9755 if (cfg->compile_aot)
9756 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9758 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9760 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9763 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9764 MonoMethod *mono_isinst;
9765 MonoInst *iargs [1];
9768 mono_isinst = mono_marshal_get_isinst (klass);
9771 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9772 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9773 CHECK_CFG_EXCEPTION;
9774 g_assert (costs > 0);
9777 cfg->real_offset += 5;
9782 inline_costs += costs;
9785 ins = handle_isinst (cfg, klass, *sp, context_used);
9786 CHECK_CFG_EXCEPTION;
9793 case CEE_UNBOX_ANY: {
9797 token = read32 (ip + 1);
9798 klass = mini_get_class (method, token, generic_context);
9799 CHECK_TYPELOAD (klass);
9801 mono_save_token_info (cfg, image, token, klass);
9803 context_used = mini_class_check_context_used (cfg, klass);
9805 if (mini_is_gsharedvt_klass (cfg, klass)) {
9806 *sp = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9814 if (generic_class_is_reference_type (cfg, klass)) {
9815 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
9816 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9823 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9826 /*FIXME AOT support*/
9827 if (cfg->compile_aot)
9828 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9830 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9832 /* The wrapper doesn't inline well so the bloat of inlining doesn't pay off. */
9833 *sp++ = emit_castclass_with_cache (cfg, klass, args, &bblock);
9836 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9837 MonoMethod *mono_castclass;
9838 MonoInst *iargs [1];
9841 mono_castclass = mono_marshal_get_castclass (klass);
9844 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
9845 iargs, ip, cfg->real_offset, dont_inline, TRUE);
9846 CHECK_CFG_EXCEPTION;
9847 g_assert (costs > 0);
9850 cfg->real_offset += 5;
9854 inline_costs += costs;
9856 ins = handle_castclass (cfg, klass, *sp, context_used);
9857 CHECK_CFG_EXCEPTION;
9865 if (mono_class_is_nullable (klass)) {
9866 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
9873 ins = handle_unbox (cfg, klass, sp, context_used);
9879 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9892 token = read32 (ip + 1);
9893 klass = mini_get_class (method, token, generic_context);
9894 CHECK_TYPELOAD (klass);
9896 mono_save_token_info (cfg, image, token, klass);
9898 context_used = mini_class_check_context_used (cfg, klass);
9900 if (generic_class_is_reference_type (cfg, klass)) {
9906 if (klass == mono_defaults.void_class)
9908 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
9910 /* frequent check in generic code: box (struct), brtrue */
9912 // FIXME: LLVM can't handle the inconsistent bb linking
9913 if (!mono_class_is_nullable (klass) &&
9914 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
9915 (ip [5] == CEE_BRTRUE ||
9916 ip [5] == CEE_BRTRUE_S ||
9917 ip [5] == CEE_BRFALSE ||
9918 ip [5] == CEE_BRFALSE_S)) {
9919 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
9921 MonoBasicBlock *true_bb, *false_bb;
9925 if (cfg->verbose_level > 3) {
9926 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9927 printf ("<box+brtrue opt>\n");
9935 target = ip + 1 + (signed char)(*ip);
9942 target = ip + 4 + (gint)(read32 (ip));
9946 g_assert_not_reached ();
9950 * We need to link both bblocks, since it is needed for handling stack
9951 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
9952 * Branching to only one of them would lead to inconsistencies, so
9953 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
9955 GET_BBLOCK (cfg, true_bb, target);
9956 GET_BBLOCK (cfg, false_bb, ip);
9958 mono_link_bblock (cfg, cfg->cbb, true_bb);
9959 mono_link_bblock (cfg, cfg->cbb, false_bb);
9961 if (sp != stack_start) {
9962 handle_stack_args (cfg, stack_start, sp - stack_start);
9964 CHECK_UNVERIFIABLE (cfg);
9967 if (COMPILE_LLVM (cfg)) {
9968 dreg = alloc_ireg (cfg);
9969 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
9970 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
9972 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
9974 /* The JIT can't eliminate the iconst+compare */
9975 MONO_INST_NEW (cfg, ins, OP_BR);
9976 ins->inst_target_bb = is_true ? true_bb : false_bb;
9977 MONO_ADD_INS (cfg->cbb, ins);
9980 start_new_bblock = 1;
9984 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
9986 CHECK_CFG_EXCEPTION;
9995 token = read32 (ip + 1);
9996 klass = mini_get_class (method, token, generic_context);
9997 CHECK_TYPELOAD (klass);
9999 mono_save_token_info (cfg, image, token, klass);
10001 context_used = mini_class_check_context_used (cfg, klass);
10003 if (mono_class_is_nullable (klass)) {
10006 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10007 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10011 ins = handle_unbox (cfg, klass, sp, context_used);
10024 MonoClassField *field;
10025 #ifndef DISABLE_REMOTING
10029 gboolean is_instance;
10031 gpointer addr = NULL;
10032 gboolean is_special_static;
10034 MonoInst *store_val = NULL;
10035 MonoInst *thread_ins;
10038 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10040 if (op == CEE_STFLD) {
10043 store_val = sp [1];
10048 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10050 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10053 if (op == CEE_STSFLD) {
10056 store_val = sp [0];
10061 token = read32 (ip + 1);
10062 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10063 field = mono_method_get_wrapper_data (method, token);
10064 klass = field->parent;
10067 field = mono_field_from_token (image, token, &klass, generic_context);
10071 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10072 FIELD_ACCESS_FAILURE;
10073 mono_class_init (klass);
10075 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10078 /* if the class is Critical then transparent code cannot access it's fields */
10079 if (!is_instance && mono_security_core_clr_enabled ())
10080 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10082 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10083 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10084 if (mono_security_core_clr_enabled ())
10085 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10089 * LDFLD etc. is usable on static fields as well, so convert those cases to
10092 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10104 g_assert_not_reached ();
10106 is_instance = FALSE;
10109 context_used = mini_class_check_context_used (cfg, klass);
10111 /* INSTANCE CASE */
10113 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10114 if (op == CEE_STFLD) {
10115 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10117 #ifndef DISABLE_REMOTING
10118 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10119 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10120 MonoInst *iargs [5];
10122 GSHAREDVT_FAILURE (op);
10124 iargs [0] = sp [0];
10125 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10126 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10127 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10129 iargs [4] = sp [1];
10131 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10132 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10133 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10134 CHECK_CFG_EXCEPTION;
10135 g_assert (costs > 0);
10137 cfg->real_offset += 5;
10140 inline_costs += costs;
10142 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10149 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10151 if (mini_is_gsharedvt_klass (cfg, klass)) {
10152 MonoInst *offset_ins;
10154 context_used = mini_class_check_context_used (cfg, klass);
10156 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10157 dreg = alloc_ireg_mp (cfg);
10158 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10159 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10160 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10162 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10164 if (sp [0]->opcode != OP_LDADDR)
10165 store->flags |= MONO_INST_FAULT;
10167 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10168 /* insert call to write barrier */
10172 dreg = alloc_ireg_mp (cfg);
10173 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10174 emit_write_barrier (cfg, ptr, sp [1]);
10177 store->flags |= ins_flag;
10184 #ifndef DISABLE_REMOTING
10185 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10186 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10187 MonoInst *iargs [4];
10189 GSHAREDVT_FAILURE (op);
10191 iargs [0] = sp [0];
10192 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10193 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10194 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10195 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10196 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10197 iargs, ip, cfg->real_offset, dont_inline, TRUE);
10198 CHECK_CFG_EXCEPTION;
10200 g_assert (costs > 0);
10202 cfg->real_offset += 5;
10206 inline_costs += costs;
10208 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10214 if (sp [0]->type == STACK_VTYPE) {
10217 /* Have to compute the address of the variable */
10219 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10221 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10223 g_assert (var->klass == klass);
10225 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10229 if (op == CEE_LDFLDA) {
10230 if (is_magic_tls_access (field)) {
10231 GSHAREDVT_FAILURE (*ip);
10233 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10235 if (sp [0]->type == STACK_OBJ) {
10236 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10237 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10240 dreg = alloc_ireg_mp (cfg);
10242 if (mini_is_gsharedvt_klass (cfg, klass)) {
10243 MonoInst *offset_ins;
10245 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10246 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10248 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10250 ins->klass = mono_class_from_mono_type (field->type);
10251 ins->type = STACK_MP;
10257 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10259 if (mini_is_gsharedvt_klass (cfg, klass)) {
10260 MonoInst *offset_ins;
10262 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10263 dreg = alloc_ireg_mp (cfg);
10264 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10265 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10267 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10269 load->flags |= ins_flag;
10270 if (sp [0]->opcode != OP_LDADDR)
10271 load->flags |= MONO_INST_FAULT;
10285 * We can only support shared generic static
10286 * field access on architectures where the
10287 * trampoline code has been extended to handle
10288 * the generic class init.
10290 #ifndef MONO_ARCH_VTABLE_REG
10291 GENERIC_SHARING_FAILURE (op);
10294 context_used = mini_class_check_context_used (cfg, klass);
10296 ftype = mono_field_get_type (field);
10298 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10301 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10302 * to be called here.
10304 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10305 mono_class_vtable (cfg->domain, klass);
10306 CHECK_TYPELOAD (klass);
10308 mono_domain_lock (cfg->domain);
10309 if (cfg->domain->special_static_fields)
10310 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10311 mono_domain_unlock (cfg->domain);
10313 is_special_static = mono_class_field_is_special_static (field);
10315 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10316 thread_ins = mono_get_thread_intrinsic (cfg);
10320 /* Generate IR to compute the field address */
10321 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10323 * Fast access to TLS data
10324 * Inline version of get_thread_static_data () in
10328 int idx, static_data_reg, array_reg, dreg;
10330 GSHAREDVT_FAILURE (op);
10332 // offset &= 0x7fffffff;
10333 // idx = (offset >> 24) - 1;
10334 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10335 MONO_ADD_INS (cfg->cbb, thread_ins);
10336 static_data_reg = alloc_ireg (cfg);
10337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
10339 if (cfg->compile_aot) {
10340 int offset_reg, offset2_reg, idx_reg;
10342 /* For TLS variables, this will return the TLS offset */
10343 EMIT_NEW_SFLDACONST (cfg, ins, field);
10344 offset_reg = ins->dreg;
10345 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10346 idx_reg = alloc_ireg (cfg);
10347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10350 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10351 array_reg = alloc_ireg (cfg);
10352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10353 offset2_reg = alloc_ireg (cfg);
10354 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10355 dreg = alloc_ireg (cfg);
10356 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10358 offset = (gsize)addr & 0x7fffffff;
10359 idx = (offset >> 24) - 1;
10361 array_reg = alloc_ireg (cfg);
10362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10363 dreg = alloc_ireg (cfg);
10364 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10366 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10367 (cfg->compile_aot && is_special_static) ||
10368 (context_used && is_special_static)) {
10369 MonoInst *iargs [2];
10371 g_assert (field->parent);
10372 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10373 if (context_used) {
10374 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10375 field, MONO_RGCTX_INFO_CLASS_FIELD);
10377 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10379 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10380 } else if (context_used) {
10381 MonoInst *static_data;
10384 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10385 method->klass->name_space, method->klass->name, method->name,
10386 depth, field->offset);
10389 if (mono_class_needs_cctor_run (klass, method))
10390 emit_generic_class_init (cfg, klass);
10393 * The pointer we're computing here is
10395 * super_info.static_data + field->offset
10397 static_data = emit_get_rgctx_klass (cfg, context_used,
10398 klass, MONO_RGCTX_INFO_STATIC_DATA);
10400 if (mini_is_gsharedvt_klass (cfg, klass)) {
10401 MonoInst *offset_ins;
10403 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10404 dreg = alloc_ireg_mp (cfg);
10405 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10406 } else if (field->offset == 0) {
10409 int addr_reg = mono_alloc_preg (cfg);
10410 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10412 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10413 MonoInst *iargs [2];
10415 g_assert (field->parent);
10416 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10417 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10418 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10420 MonoVTable *vtable = NULL;
10422 if (!cfg->compile_aot)
10423 vtable = mono_class_vtable (cfg->domain, klass);
10424 CHECK_TYPELOAD (klass);
10427 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10428 if (!(g_slist_find (class_inits, klass))) {
10429 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10430 if (cfg->verbose_level > 2)
10431 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10432 class_inits = g_slist_prepend (class_inits, klass);
10435 if (cfg->run_cctors) {
10437 /* This makes so that inline cannot trigger */
10438 /* .cctors: too many apps depend on them */
10439 /* running with a specific order... */
10441 if (! vtable->initialized)
10442 INLINE_FAILURE ("class init");
10443 ex = mono_runtime_class_init_full (vtable, FALSE);
10445 set_exception_object (cfg, ex);
10446 goto exception_exit;
10450 if (cfg->compile_aot)
10451 EMIT_NEW_SFLDACONST (cfg, ins, field);
10454 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10456 EMIT_NEW_PCONST (cfg, ins, addr);
10459 MonoInst *iargs [1];
10460 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10461 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10465 /* Generate IR to do the actual load/store operation */
10467 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10468 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10469 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10470 emit_memory_barrier (cfg, FullBarrier);
10473 if (op == CEE_LDSFLDA) {
10474 ins->klass = mono_class_from_mono_type (ftype);
10475 ins->type = STACK_PTR;
10477 } else if (op == CEE_STSFLD) {
10480 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10481 store->flags |= ins_flag;
10483 gboolean is_const = FALSE;
10484 MonoVTable *vtable = NULL;
10485 gpointer addr = NULL;
10487 if (!context_used) {
10488 vtable = mono_class_vtable (cfg->domain, klass);
10489 CHECK_TYPELOAD (klass);
10491 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10492 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10493 int ro_type = ftype->type;
10495 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10496 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10497 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10500 GSHAREDVT_FAILURE (op);
10502 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10505 case MONO_TYPE_BOOLEAN:
10507 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10511 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10514 case MONO_TYPE_CHAR:
10516 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10520 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10525 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10529 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10534 case MONO_TYPE_PTR:
10535 case MONO_TYPE_FNPTR:
10536 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10537 type_to_eval_stack_type ((cfg), field->type, *sp);
10540 case MONO_TYPE_STRING:
10541 case MONO_TYPE_OBJECT:
10542 case MONO_TYPE_CLASS:
10543 case MONO_TYPE_SZARRAY:
10544 case MONO_TYPE_ARRAY:
10545 if (!mono_gc_is_moving ()) {
10546 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10547 type_to_eval_stack_type ((cfg), field->type, *sp);
10555 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10560 case MONO_TYPE_VALUETYPE:
10570 CHECK_STACK_OVF (1);
10572 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10573 load->flags |= ins_flag;
10579 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10580 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10581 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10582 emit_memory_barrier (cfg, FullBarrier);
10593 token = read32 (ip + 1);
10594 klass = mini_get_class (method, token, generic_context);
10595 CHECK_TYPELOAD (klass);
10596 if (ins_flag & MONO_INST_VOLATILE) {
10597 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10598 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10599 emit_memory_barrier (cfg, FullBarrier);
10601 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10602 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10603 ins->flags |= ins_flag;
10604 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10605 generic_class_is_reference_type (cfg, klass)) {
10606 /* insert call to write barrier */
10607 emit_write_barrier (cfg, sp [0], sp [1]);
10619 const char *data_ptr;
10621 guint32 field_token;
10627 token = read32 (ip + 1);
10629 klass = mini_get_class (method, token, generic_context);
10630 CHECK_TYPELOAD (klass);
10632 context_used = mini_class_check_context_used (cfg, klass);
10634 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10635 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10636 ins->sreg1 = sp [0]->dreg;
10637 ins->type = STACK_I4;
10638 ins->dreg = alloc_ireg (cfg);
10639 MONO_ADD_INS (cfg->cbb, ins);
10640 *sp = mono_decompose_opcode (cfg, ins);
10643 if (context_used) {
10644 MonoInst *args [3];
10645 MonoClass *array_class = mono_array_class_get (klass, 1);
10646 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10648 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10651 args [0] = emit_get_rgctx_klass (cfg, context_used,
10652 array_class, MONO_RGCTX_INFO_VTABLE);
10657 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10659 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10661 if (cfg->opt & MONO_OPT_SHARED) {
10662 /* Decompose now to avoid problems with references to the domainvar */
10663 MonoInst *iargs [3];
10665 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10666 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10667 iargs [2] = sp [0];
10669 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10671 /* Decompose later since it is needed by abcrem */
10672 MonoClass *array_type = mono_array_class_get (klass, 1);
10673 mono_class_vtable (cfg->domain, array_type);
10674 CHECK_TYPELOAD (array_type);
10676 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10677 ins->dreg = alloc_ireg_ref (cfg);
10678 ins->sreg1 = sp [0]->dreg;
10679 ins->inst_newa_class = klass;
10680 ins->type = STACK_OBJ;
10681 ins->klass = array_type;
10682 MONO_ADD_INS (cfg->cbb, ins);
10683 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10684 cfg->cbb->has_array_access = TRUE;
10686 /* Needed so mono_emit_load_get_addr () gets called */
10687 mono_get_got_var (cfg);
10697 * we inline/optimize the initialization sequence if possible.
10698 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10699 * for small sizes open code the memcpy
10700 * ensure the rva field is big enough
10702 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10703 MonoMethod *memcpy_method = get_memcpy_method ();
10704 MonoInst *iargs [3];
10705 int add_reg = alloc_ireg_mp (cfg);
10707 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
10708 if (cfg->compile_aot) {
10709 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10711 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10713 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10714 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10723 if (sp [0]->type != STACK_OBJ)
10726 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10727 ins->dreg = alloc_preg (cfg);
10728 ins->sreg1 = sp [0]->dreg;
10729 ins->type = STACK_I4;
10730 /* This flag will be inherited by the decomposition */
10731 ins->flags |= MONO_INST_FAULT;
10732 MONO_ADD_INS (cfg->cbb, ins);
10733 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10734 cfg->cbb->has_array_access = TRUE;
10742 if (sp [0]->type != STACK_OBJ)
10745 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10747 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10748 CHECK_TYPELOAD (klass);
10749 /* we need to make sure that this array is exactly the type it needs
10750 * to be for correctness. the wrappers are lax with their usage
10751 * so we need to ignore them here
10753 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10754 MonoClass *array_class = mono_array_class_get (klass, 1);
10755 mini_emit_check_array_type (cfg, sp [0], array_class);
10756 CHECK_TYPELOAD (array_class);
10760 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10765 case CEE_LDELEM_I1:
10766 case CEE_LDELEM_U1:
10767 case CEE_LDELEM_I2:
10768 case CEE_LDELEM_U2:
10769 case CEE_LDELEM_I4:
10770 case CEE_LDELEM_U4:
10771 case CEE_LDELEM_I8:
10773 case CEE_LDELEM_R4:
10774 case CEE_LDELEM_R8:
10775 case CEE_LDELEM_REF: {
10781 if (*ip == CEE_LDELEM) {
10783 token = read32 (ip + 1);
10784 klass = mini_get_class (method, token, generic_context);
10785 CHECK_TYPELOAD (klass);
10786 mono_class_init (klass);
10789 klass = array_access_to_klass (*ip);
10791 if (sp [0]->type != STACK_OBJ)
10794 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10796 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10797 // FIXME-VT: OP_ICONST optimization
10798 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10799 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10800 ins->opcode = OP_LOADV_MEMBASE;
10801 } else if (sp [1]->opcode == OP_ICONST) {
10802 int array_reg = sp [0]->dreg;
10803 int index_reg = sp [1]->dreg;
10804 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
10806 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10807 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10809 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10810 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10813 if (*ip == CEE_LDELEM)
10820 case CEE_STELEM_I1:
10821 case CEE_STELEM_I2:
10822 case CEE_STELEM_I4:
10823 case CEE_STELEM_I8:
10824 case CEE_STELEM_R4:
10825 case CEE_STELEM_R8:
10826 case CEE_STELEM_REF:
10831 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10833 if (*ip == CEE_STELEM) {
10835 token = read32 (ip + 1);
10836 klass = mini_get_class (method, token, generic_context);
10837 CHECK_TYPELOAD (klass);
10838 mono_class_init (klass);
10841 klass = array_access_to_klass (*ip);
10843 if (sp [0]->type != STACK_OBJ)
10846 emit_array_store (cfg, klass, sp, TRUE);
10848 if (*ip == CEE_STELEM)
10855 case CEE_CKFINITE: {
10859 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10860 ins->sreg1 = sp [0]->dreg;
10861 ins->dreg = alloc_freg (cfg);
10862 ins->type = STACK_R8;
10863 MONO_ADD_INS (bblock, ins);
10865 *sp++ = mono_decompose_opcode (cfg, ins);
10870 case CEE_REFANYVAL: {
10871 MonoInst *src_var, *src;
10873 int klass_reg = alloc_preg (cfg);
10874 int dreg = alloc_preg (cfg);
10876 GSHAREDVT_FAILURE (*ip);
10879 MONO_INST_NEW (cfg, ins, *ip);
10882 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10883 CHECK_TYPELOAD (klass);
10884 mono_class_init (klass);
10886 context_used = mini_class_check_context_used (cfg, klass);
10889 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10891 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10892 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10893 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
10895 if (context_used) {
10896 MonoInst *klass_ins;
10898 klass_ins = emit_get_rgctx_klass (cfg, context_used,
10899 klass, MONO_RGCTX_INFO_KLASS);
10902 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
10903 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
10905 mini_emit_class_check (cfg, klass_reg, klass);
10907 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
10908 ins->type = STACK_MP;
10913 case CEE_MKREFANY: {
10914 MonoInst *loc, *addr;
10916 GSHAREDVT_FAILURE (*ip);
10919 MONO_INST_NEW (cfg, ins, *ip);
10922 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
10923 CHECK_TYPELOAD (klass);
10924 mono_class_init (klass);
10926 context_used = mini_class_check_context_used (cfg, klass);
10928 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
10929 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
10931 if (context_used) {
10932 MonoInst *const_ins;
10933 int type_reg = alloc_preg (cfg);
10935 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
10936 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
10937 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10938 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10939 } else if (cfg->compile_aot) {
10940 int const_reg = alloc_preg (cfg);
10941 int type_reg = alloc_preg (cfg);
10943 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
10944 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
10945 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
10946 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
10948 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
10949 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
10951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
10953 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
10954 ins->type = STACK_VTYPE;
10955 ins->klass = mono_defaults.typed_reference_class;
10960 case CEE_LDTOKEN: {
10962 MonoClass *handle_class;
10964 CHECK_STACK_OVF (1);
10967 n = read32 (ip + 1);
10969 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
10970 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
10971 handle = mono_method_get_wrapper_data (method, n);
10972 handle_class = mono_method_get_wrapper_data (method, n + 1);
10973 if (handle_class == mono_defaults.typehandle_class)
10974 handle = &((MonoClass*)handle)->byval_arg;
10977 handle = mono_ldtoken (image, n, &handle_class, generic_context);
10981 mono_class_init (handle_class);
10982 if (cfg->generic_sharing_context) {
10983 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
10984 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
10985 /* This case handles ldtoken
10986 of an open type, like for
10989 } else if (handle_class == mono_defaults.typehandle_class) {
10990 /* If we get a MONO_TYPE_CLASS
10991 then we need to provide the
10993 instantiation of it. */
10994 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
10997 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
10998 } else if (handle_class == mono_defaults.fieldhandle_class)
10999 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11000 else if (handle_class == mono_defaults.methodhandle_class)
11001 context_used = mini_method_check_context_used (cfg, handle);
11003 g_assert_not_reached ();
11006 if ((cfg->opt & MONO_OPT_SHARED) &&
11007 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11008 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11009 MonoInst *addr, *vtvar, *iargs [3];
11010 int method_context_used;
11012 method_context_used = mini_method_check_context_used (cfg, method);
11014 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11016 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11017 EMIT_NEW_ICONST (cfg, iargs [1], n);
11018 if (method_context_used) {
11019 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11020 method, MONO_RGCTX_INFO_METHOD);
11021 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11023 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11024 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11026 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11028 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11030 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11032 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11033 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11034 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11035 (cmethod->klass == mono_defaults.systemtype_class) &&
11036 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11037 MonoClass *tclass = mono_class_from_mono_type (handle);
11039 mono_class_init (tclass);
11040 if (context_used) {
11041 ins = emit_get_rgctx_klass (cfg, context_used,
11042 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11043 } else if (cfg->compile_aot) {
11044 if (method->wrapper_type) {
11045 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
11046 /* Special case for static synchronized wrappers */
11047 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11049 /* FIXME: n is not a normal token */
11051 EMIT_NEW_PCONST (cfg, ins, NULL);
11054 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11057 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11059 ins->type = STACK_OBJ;
11060 ins->klass = cmethod->klass;
11063 MonoInst *addr, *vtvar;
11065 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11067 if (context_used) {
11068 if (handle_class == mono_defaults.typehandle_class) {
11069 ins = emit_get_rgctx_klass (cfg, context_used,
11070 mono_class_from_mono_type (handle),
11071 MONO_RGCTX_INFO_TYPE);
11072 } else if (handle_class == mono_defaults.methodhandle_class) {
11073 ins = emit_get_rgctx_method (cfg, context_used,
11074 handle, MONO_RGCTX_INFO_METHOD);
11075 } else if (handle_class == mono_defaults.fieldhandle_class) {
11076 ins = emit_get_rgctx_field (cfg, context_used,
11077 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11079 g_assert_not_reached ();
11081 } else if (cfg->compile_aot) {
11082 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11084 EMIT_NEW_PCONST (cfg, ins, handle);
11086 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11087 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11088 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11098 MONO_INST_NEW (cfg, ins, OP_THROW);
11100 ins->sreg1 = sp [0]->dreg;
11102 bblock->out_of_line = TRUE;
11103 MONO_ADD_INS (bblock, ins);
11104 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11105 MONO_ADD_INS (bblock, ins);
11108 link_bblock (cfg, bblock, end_bblock);
11109 start_new_bblock = 1;
11111 case CEE_ENDFINALLY:
11112 /* mono_save_seq_point_info () depends on this */
11113 if (sp != stack_start)
11114 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11115 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11116 MONO_ADD_INS (bblock, ins);
11118 start_new_bblock = 1;
11121 * Control will leave the method so empty the stack, otherwise
11122 * the next basic block will start with a nonempty stack.
11124 while (sp != stack_start) {
11129 case CEE_LEAVE_S: {
11132 if (*ip == CEE_LEAVE) {
11134 target = ip + 5 + (gint32)read32(ip + 1);
11137 target = ip + 2 + (signed char)(ip [1]);
11140 /* empty the stack */
11141 while (sp != stack_start) {
11146 * If this leave statement is in a catch block, check for a
11147 * pending exception, and rethrow it if necessary.
11148 * We avoid doing this in runtime invoke wrappers, since those are called
11149 * by native code which excepts the wrapper to catch all exceptions.
11151 for (i = 0; i < header->num_clauses; ++i) {
11152 MonoExceptionClause *clause = &header->clauses [i];
11155 * Use <= in the final comparison to handle clauses with multiple
11156 * leave statements, like in bug #78024.
11157 * The ordering of the exception clauses guarantees that we find the
11158 * innermost clause.
11160 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11162 MonoBasicBlock *dont_throw;
11167 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11170 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11172 NEW_BBLOCK (cfg, dont_throw);
11175 * Currently, we always rethrow the abort exception, despite the
11176 * fact that this is not correct. See thread6.cs for an example.
11177 * But propagating the abort exception is more important than
11178 * getting the sematics right.
11180 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11181 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11182 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11184 MONO_START_BB (cfg, dont_throw);
11189 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11191 MonoExceptionClause *clause;
11193 for (tmp = handlers; tmp; tmp = tmp->next) {
11194 clause = tmp->data;
11195 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11197 link_bblock (cfg, bblock, tblock);
11198 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11199 ins->inst_target_bb = tblock;
11200 ins->inst_eh_block = clause;
11201 MONO_ADD_INS (bblock, ins);
11202 bblock->has_call_handler = 1;
11203 if (COMPILE_LLVM (cfg)) {
11204 MonoBasicBlock *target_bb;
11207 * Link the finally bblock with the target, since it will
11208 * conceptually branch there.
11209 * FIXME: Have to link the bblock containing the endfinally.
11211 GET_BBLOCK (cfg, target_bb, target);
11212 link_bblock (cfg, tblock, target_bb);
11215 g_list_free (handlers);
11218 MONO_INST_NEW (cfg, ins, OP_BR);
11219 MONO_ADD_INS (bblock, ins);
11220 GET_BBLOCK (cfg, tblock, target);
11221 link_bblock (cfg, bblock, tblock);
11222 ins->inst_target_bb = tblock;
11223 start_new_bblock = 1;
11225 if (*ip == CEE_LEAVE)
11234 * Mono specific opcodes
11236 case MONO_CUSTOM_PREFIX: {
11238 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11242 case CEE_MONO_ICALL: {
11244 MonoJitICallInfo *info;
11246 token = read32 (ip + 2);
11247 func = mono_method_get_wrapper_data (method, token);
11248 info = mono_find_jit_icall_by_addr (func);
11250 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11253 CHECK_STACK (info->sig->param_count);
11254 sp -= info->sig->param_count;
11256 ins = mono_emit_jit_icall (cfg, info->func, sp);
11257 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11261 inline_costs += 10 * num_calls++;
11265 case CEE_MONO_LDPTR: {
11268 CHECK_STACK_OVF (1);
11270 token = read32 (ip + 2);
11272 ptr = mono_method_get_wrapper_data (method, token);
11273 /* FIXME: Generalize this */
11274 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11275 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11280 EMIT_NEW_PCONST (cfg, ins, ptr);
11283 inline_costs += 10 * num_calls++;
11284 /* Can't embed random pointers into AOT code */
11288 case CEE_MONO_JIT_ICALL_ADDR: {
11289 MonoJitICallInfo *callinfo;
11292 CHECK_STACK_OVF (1);
11294 token = read32 (ip + 2);
11296 ptr = mono_method_get_wrapper_data (method, token);
11297 callinfo = mono_find_jit_icall_by_addr (ptr);
11298 g_assert (callinfo);
11299 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11302 inline_costs += 10 * num_calls++;
11305 case CEE_MONO_ICALL_ADDR: {
11306 MonoMethod *cmethod;
11309 CHECK_STACK_OVF (1);
11311 token = read32 (ip + 2);
11313 cmethod = mono_method_get_wrapper_data (method, token);
11315 if (cfg->compile_aot) {
11316 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11318 ptr = mono_lookup_internal_call (cmethod);
11320 EMIT_NEW_PCONST (cfg, ins, ptr);
11326 case CEE_MONO_VTADDR: {
11327 MonoInst *src_var, *src;
11333 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11334 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11339 case CEE_MONO_NEWOBJ: {
11340 MonoInst *iargs [2];
11342 CHECK_STACK_OVF (1);
11344 token = read32 (ip + 2);
11345 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11346 mono_class_init (klass);
11347 NEW_DOMAINCONST (cfg, iargs [0]);
11348 MONO_ADD_INS (cfg->cbb, iargs [0]);
11349 NEW_CLASSCONST (cfg, iargs [1], klass);
11350 MONO_ADD_INS (cfg->cbb, iargs [1]);
11351 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11353 inline_costs += 10 * num_calls++;
11356 case CEE_MONO_OBJADDR:
11359 MONO_INST_NEW (cfg, ins, OP_MOVE);
11360 ins->dreg = alloc_ireg_mp (cfg);
11361 ins->sreg1 = sp [0]->dreg;
11362 ins->type = STACK_MP;
11363 MONO_ADD_INS (cfg->cbb, ins);
11367 case CEE_MONO_LDNATIVEOBJ:
11369 * Similar to LDOBJ, but instead load the unmanaged
11370 * representation of the vtype to the stack.
11375 token = read32 (ip + 2);
11376 klass = mono_method_get_wrapper_data (method, token);
11377 g_assert (klass->valuetype);
11378 mono_class_init (klass);
11381 MonoInst *src, *dest, *temp;
11384 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11385 temp->backend.is_pinvoke = 1;
11386 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11387 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11389 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11390 dest->type = STACK_VTYPE;
11391 dest->klass = klass;
11397 case CEE_MONO_RETOBJ: {
11399 * Same as RET, but return the native representation of a vtype
11402 g_assert (cfg->ret);
11403 g_assert (mono_method_signature (method)->pinvoke);
11408 token = read32 (ip + 2);
11409 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11411 if (!cfg->vret_addr) {
11412 g_assert (cfg->ret_var_is_local);
11414 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11416 EMIT_NEW_RETLOADA (cfg, ins);
11418 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11420 if (sp != stack_start)
11423 MONO_INST_NEW (cfg, ins, OP_BR);
11424 ins->inst_target_bb = end_bblock;
11425 MONO_ADD_INS (bblock, ins);
11426 link_bblock (cfg, bblock, end_bblock);
11427 start_new_bblock = 1;
11431 case CEE_MONO_CISINST:
11432 case CEE_MONO_CCASTCLASS: {
11437 token = read32 (ip + 2);
11438 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11439 if (ip [1] == CEE_MONO_CISINST)
11440 ins = handle_cisinst (cfg, klass, sp [0]);
11442 ins = handle_ccastclass (cfg, klass, sp [0]);
11448 case CEE_MONO_SAVE_LMF:
11449 case CEE_MONO_RESTORE_LMF:
11450 #ifdef MONO_ARCH_HAVE_LMF_OPS
11451 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11452 MONO_ADD_INS (bblock, ins);
11453 cfg->need_lmf_area = TRUE;
11457 case CEE_MONO_CLASSCONST:
11458 CHECK_STACK_OVF (1);
11460 token = read32 (ip + 2);
11461 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11464 inline_costs += 10 * num_calls++;
11466 case CEE_MONO_NOT_TAKEN:
11467 bblock->out_of_line = TRUE;
11470 case CEE_MONO_TLS: {
11473 CHECK_STACK_OVF (1);
11475 key = (gint32)read32 (ip + 2);
11476 g_assert (key < TLS_KEY_NUM);
11478 ins = mono_create_tls_get (cfg, key);
11480 if (cfg->compile_aot) {
11482 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11483 ins->dreg = alloc_preg (cfg);
11484 ins->type = STACK_PTR;
11486 g_assert_not_reached ();
11489 ins->type = STACK_PTR;
11490 MONO_ADD_INS (bblock, ins);
11495 case CEE_MONO_DYN_CALL: {
11496 MonoCallInst *call;
11498 /* It would be easier to call a trampoline, but that would put an
11499 * extra frame on the stack, confusing exception handling. So
11500 * implement it inline using an opcode for now.
11503 if (!cfg->dyn_call_var) {
11504 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11505 /* prevent it from being register allocated */
11506 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11509 /* Has to use a call inst since it local regalloc expects it */
11510 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11511 ins = (MonoInst*)call;
11513 ins->sreg1 = sp [0]->dreg;
11514 ins->sreg2 = sp [1]->dreg;
11515 MONO_ADD_INS (bblock, ins);
11517 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11520 inline_costs += 10 * num_calls++;
11524 case CEE_MONO_MEMORY_BARRIER: {
11526 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11530 case CEE_MONO_JIT_ATTACH: {
11531 MonoInst *args [16];
11532 MonoInst *ad_ins, *lmf_ins;
11533 MonoBasicBlock *next_bb = NULL;
11535 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11537 EMIT_NEW_PCONST (cfg, ins, NULL);
11538 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11544 ad_ins = mono_get_domain_intrinsic (cfg);
11545 lmf_ins = mono_get_lmf_intrinsic (cfg);
11548 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && lmf_ins) {
11549 NEW_BBLOCK (cfg, next_bb);
11551 MONO_ADD_INS (cfg->cbb, ad_ins);
11552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ad_ins->dreg, 0);
11553 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11555 MONO_ADD_INS (cfg->cbb, lmf_ins);
11556 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, lmf_ins->dreg, 0);
11557 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, next_bb);
11560 if (cfg->compile_aot) {
11561 /* AOT code is only used in the root domain */
11562 EMIT_NEW_PCONST (cfg, args [0], NULL);
11564 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11566 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11567 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11570 MONO_START_BB (cfg, next_bb);
11576 case CEE_MONO_JIT_DETACH: {
11577 MonoInst *args [16];
11579 /* Restore the original domain */
11580 dreg = alloc_ireg (cfg);
11581 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11582 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11587 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11593 case CEE_PREFIX1: {
11596 case CEE_ARGLIST: {
11597 /* somewhat similar to LDTOKEN */
11598 MonoInst *addr, *vtvar;
11599 CHECK_STACK_OVF (1);
11600 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11602 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11603 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11605 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11606 ins->type = STACK_VTYPE;
11607 ins->klass = mono_defaults.argumenthandle_class;
11620 * The following transforms:
11621 * CEE_CEQ into OP_CEQ
11622 * CEE_CGT into OP_CGT
11623 * CEE_CGT_UN into OP_CGT_UN
11624 * CEE_CLT into OP_CLT
11625 * CEE_CLT_UN into OP_CLT_UN
11627 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11629 MONO_INST_NEW (cfg, ins, cmp->opcode);
11631 cmp->sreg1 = sp [0]->dreg;
11632 cmp->sreg2 = sp [1]->dreg;
11633 type_from_op (cmp, sp [0], sp [1]);
11635 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11636 cmp->opcode = OP_LCOMPARE;
11637 else if (sp [0]->type == STACK_R8)
11638 cmp->opcode = OP_FCOMPARE;
11640 cmp->opcode = OP_ICOMPARE;
11641 MONO_ADD_INS (bblock, cmp);
11642 ins->type = STACK_I4;
11643 ins->dreg = alloc_dreg (cfg, ins->type);
11644 type_from_op (ins, sp [0], sp [1]);
11646 if (cmp->opcode == OP_FCOMPARE) {
11648 * The backends expect the fceq opcodes to do the
11651 cmp->opcode = OP_NOP;
11652 ins->sreg1 = cmp->sreg1;
11653 ins->sreg2 = cmp->sreg2;
11655 MONO_ADD_INS (bblock, ins);
11661 MonoInst *argconst;
11662 MonoMethod *cil_method;
11664 CHECK_STACK_OVF (1);
11666 n = read32 (ip + 2);
11667 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11668 if (!cmethod || mono_loader_get_last_error ())
11670 mono_class_init (cmethod->klass);
11672 mono_save_token_info (cfg, image, n, cmethod);
11674 context_used = mini_method_check_context_used (cfg, cmethod);
11676 cil_method = cmethod;
11677 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11678 METHOD_ACCESS_FAILURE;
11680 if (mono_security_cas_enabled ()) {
11681 if (check_linkdemand (cfg, method, cmethod))
11682 INLINE_FAILURE ("linkdemand");
11683 CHECK_CFG_EXCEPTION;
11684 } else if (mono_security_core_clr_enabled ()) {
11685 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11689 * Optimize the common case of ldftn+delegate creation
11691 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11692 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11693 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11694 MonoInst *target_ins;
11695 MonoMethod *invoke;
11696 int invoke_context_used;
11698 invoke = mono_get_delegate_invoke (ctor_method->klass);
11699 if (!invoke || !mono_method_signature (invoke))
11702 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11704 target_ins = sp [-1];
11706 if (mono_security_core_clr_enabled ())
11707 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11709 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11710 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11711 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11713 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11717 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11718 /* FIXME: SGEN support */
11719 if (invoke_context_used == 0) {
11721 if (cfg->verbose_level > 3)
11722 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11724 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
11725 CHECK_CFG_EXCEPTION;
11734 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11735 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11739 inline_costs += 10 * num_calls++;
11742 case CEE_LDVIRTFTN: {
11743 MonoInst *args [2];
11747 n = read32 (ip + 2);
11748 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11749 if (!cmethod || mono_loader_get_last_error ())
11751 mono_class_init (cmethod->klass);
11753 context_used = mini_method_check_context_used (cfg, cmethod);
11755 if (mono_security_cas_enabled ()) {
11756 if (check_linkdemand (cfg, method, cmethod))
11757 INLINE_FAILURE ("linkdemand");
11758 CHECK_CFG_EXCEPTION;
11759 } else if (mono_security_core_clr_enabled ()) {
11760 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11766 args [1] = emit_get_rgctx_method (cfg, context_used,
11767 cmethod, MONO_RGCTX_INFO_METHOD);
11770 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11772 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11775 inline_costs += 10 * num_calls++;
11779 CHECK_STACK_OVF (1);
11781 n = read16 (ip + 2);
11783 EMIT_NEW_ARGLOAD (cfg, ins, n);
11788 CHECK_STACK_OVF (1);
11790 n = read16 (ip + 2);
11792 NEW_ARGLOADA (cfg, ins, n);
11793 MONO_ADD_INS (cfg->cbb, ins);
11801 n = read16 (ip + 2);
11803 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11805 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11809 CHECK_STACK_OVF (1);
11811 n = read16 (ip + 2);
11813 EMIT_NEW_LOCLOAD (cfg, ins, n);
11818 unsigned char *tmp_ip;
11819 CHECK_STACK_OVF (1);
11821 n = read16 (ip + 2);
11824 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11830 EMIT_NEW_LOCLOADA (cfg, ins, n);
11839 n = read16 (ip + 2);
11841 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11843 emit_stloc_ir (cfg, sp, header, n);
11850 if (sp != stack_start)
11852 if (cfg->method != method)
11854 * Inlining this into a loop in a parent could lead to
11855 * stack overflows which is different behavior than the
11856 * non-inlined case, thus disable inlining in this case.
11858 goto inline_failure;
11860 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
11861 ins->dreg = alloc_preg (cfg);
11862 ins->sreg1 = sp [0]->dreg;
11863 ins->type = STACK_PTR;
11864 MONO_ADD_INS (cfg->cbb, ins);
11866 cfg->flags |= MONO_CFG_HAS_ALLOCA;
11868 ins->flags |= MONO_INST_INIT;
11873 case CEE_ENDFILTER: {
11874 MonoExceptionClause *clause, *nearest;
11875 int cc, nearest_num;
11879 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
11881 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
11882 ins->sreg1 = (*sp)->dreg;
11883 MONO_ADD_INS (bblock, ins);
11884 start_new_bblock = 1;
11889 for (cc = 0; cc < header->num_clauses; ++cc) {
11890 clause = &header->clauses [cc];
11891 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
11892 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
11893 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
11898 g_assert (nearest);
11899 if ((ip - header->code) != nearest->handler_offset)
11904 case CEE_UNALIGNED_:
11905 ins_flag |= MONO_INST_UNALIGNED;
11906 /* FIXME: record alignment? we can assume 1 for now */
11910 case CEE_VOLATILE_:
11911 ins_flag |= MONO_INST_VOLATILE;
11915 ins_flag |= MONO_INST_TAILCALL;
11916 cfg->flags |= MONO_CFG_HAS_TAIL;
11917 /* Can't inline tail calls at this time */
11918 inline_costs += 100000;
11925 token = read32 (ip + 2);
11926 klass = mini_get_class (method, token, generic_context);
11927 CHECK_TYPELOAD (klass);
11928 if (generic_class_is_reference_type (cfg, klass))
11929 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
11931 mini_emit_initobj (cfg, *sp, NULL, klass);
11935 case CEE_CONSTRAINED_:
11937 token = read32 (ip + 2);
11938 constrained_call = mini_get_class (method, token, generic_context);
11939 CHECK_TYPELOAD (constrained_call);
11943 case CEE_INITBLK: {
11944 MonoInst *iargs [3];
11948 /* Skip optimized paths for volatile operations. */
11949 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
11950 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
11951 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
11952 /* emit_memset only works when val == 0 */
11953 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
11956 iargs [0] = sp [0];
11957 iargs [1] = sp [1];
11958 iargs [2] = sp [2];
11959 if (ip [1] == CEE_CPBLK) {
11961 * FIXME: It's unclear whether we should be emitting both the acquire
11962 * and release barriers for cpblk. It is technically both a load and
11963 * store operation, so it seems like that's the sensible thing to do.
11965 MonoMethod *memcpy_method = get_memcpy_method ();
11966 if (ins_flag & MONO_INST_VOLATILE) {
11967 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11968 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
11969 emit_memory_barrier (cfg, FullBarrier);
11971 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11972 call->flags |= ins_flag;
11973 if (ins_flag & MONO_INST_VOLATILE) {
11974 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11975 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
11976 emit_memory_barrier (cfg, FullBarrier);
11979 MonoMethod *memset_method = get_memset_method ();
11980 if (ins_flag & MONO_INST_VOLATILE) {
11981 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11982 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
11983 emit_memory_barrier (cfg, FullBarrier);
11985 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
11986 call->flags |= ins_flag;
11997 ins_flag |= MONO_INST_NOTYPECHECK;
11999 ins_flag |= MONO_INST_NORANGECHECK;
12000 /* we ignore the no-nullcheck for now since we
12001 * really do it explicitly only when doing callvirt->call
12005 case CEE_RETHROW: {
12007 int handler_offset = -1;
12009 for (i = 0; i < header->num_clauses; ++i) {
12010 MonoExceptionClause *clause = &header->clauses [i];
12011 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12012 handler_offset = clause->handler_offset;
12017 bblock->flags |= BB_EXCEPTION_UNSAFE;
12019 g_assert (handler_offset != -1);
12021 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12022 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12023 ins->sreg1 = load->dreg;
12024 MONO_ADD_INS (bblock, ins);
12026 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12027 MONO_ADD_INS (bblock, ins);
12030 link_bblock (cfg, bblock, end_bblock);
12031 start_new_bblock = 1;
12039 GSHAREDVT_FAILURE (*ip);
12041 CHECK_STACK_OVF (1);
12043 token = read32 (ip + 2);
12044 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
12045 MonoType *type = mono_type_create_from_typespec (image, token);
12046 val = mono_type_size (type, &ialign);
12048 MonoClass *klass = mono_class_get_full (image, token, generic_context);
12049 CHECK_TYPELOAD (klass);
12050 mono_class_init (klass);
12051 val = mono_type_size (&klass->byval_arg, &ialign);
12053 EMIT_NEW_ICONST (cfg, ins, val);
12058 case CEE_REFANYTYPE: {
12059 MonoInst *src_var, *src;
12061 GSHAREDVT_FAILURE (*ip);
12067 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12069 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12070 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12071 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
12076 case CEE_READONLY_:
12089 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12099 g_warning ("opcode 0x%02x not handled", *ip);
12103 if (start_new_bblock != 1)
12106 bblock->cil_length = ip - bblock->cil_code;
12107 if (bblock->next_bb) {
12108 /* This could already be set because of inlining, #693905 */
12109 MonoBasicBlock *bb = bblock;
12111 while (bb->next_bb)
12113 bb->next_bb = end_bblock;
12115 bblock->next_bb = end_bblock;
12118 if (cfg->method == method && cfg->domainvar) {
12120 MonoInst *get_domain;
12122 cfg->cbb = init_localsbb;
12124 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12125 MONO_ADD_INS (cfg->cbb, get_domain);
12127 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12129 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12130 MONO_ADD_INS (cfg->cbb, store);
12133 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12134 if (cfg->compile_aot)
12135 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12136 mono_get_got_var (cfg);
12139 if (cfg->method == method && cfg->got_var)
12140 mono_emit_load_got_addr (cfg);
12142 if (init_localsbb) {
12143 cfg->cbb = init_localsbb;
12145 for (i = 0; i < header->num_locals; ++i) {
12146 emit_init_local (cfg, i, header->locals [i], init_locals);
12150 if (cfg->init_ref_vars && cfg->method == method) {
12151 /* Emit initialization for ref vars */
12152 // FIXME: Avoid duplication initialization for IL locals.
12153 for (i = 0; i < cfg->num_varinfo; ++i) {
12154 MonoInst *ins = cfg->varinfo [i];
12156 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12157 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12161 if (cfg->lmf_var && cfg->method == method) {
12162 cfg->cbb = init_localsbb;
12163 emit_push_lmf (cfg);
12167 MonoBasicBlock *bb;
12170 * Make seq points at backward branch targets interruptable.
12172 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12173 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12174 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12177 /* Add a sequence point for method entry/exit events */
12179 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12180 MONO_ADD_INS (init_localsbb, ins);
12181 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12182 MONO_ADD_INS (cfg->bb_exit, ins);
12186 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12187 * the code they refer to was dead (#11880).
12189 if (sym_seq_points) {
12190 for (i = 0; i < header->code_size; ++i) {
12191 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12194 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12195 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12202 if (cfg->method == method) {
12203 MonoBasicBlock *bb;
12204 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12205 bb->region = mono_find_block_region (cfg, bb->real_offset);
12207 mono_create_spvar_for_region (cfg, bb->region);
12208 if (cfg->verbose_level > 2)
12209 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12213 g_slist_free (class_inits);
12214 dont_inline = g_list_remove (dont_inline, method);
12216 if (inline_costs < 0) {
12219 /* Method is too large */
12220 mname = mono_method_full_name (method, TRUE);
12221 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12222 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12224 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12225 mono_basic_block_free (original_bb);
12229 if ((cfg->verbose_level > 2) && (cfg->method == method))
12230 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12232 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12233 mono_basic_block_free (original_bb);
12234 return inline_costs;
12237 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12244 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
12248 set_exception_type_from_invalid_il (cfg, method, ip);
12252 g_slist_free (class_inits);
12253 mono_basic_block_free (original_bb);
12254 dont_inline = g_list_remove (dont_inline, method);
12255 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12260 store_membase_reg_to_store_membase_imm (int opcode)
12263 case OP_STORE_MEMBASE_REG:
12264 return OP_STORE_MEMBASE_IMM;
12265 case OP_STOREI1_MEMBASE_REG:
12266 return OP_STOREI1_MEMBASE_IMM;
12267 case OP_STOREI2_MEMBASE_REG:
12268 return OP_STOREI2_MEMBASE_IMM;
12269 case OP_STOREI4_MEMBASE_REG:
12270 return OP_STOREI4_MEMBASE_IMM;
12271 case OP_STOREI8_MEMBASE_REG:
12272 return OP_STOREI8_MEMBASE_IMM;
12274 g_assert_not_reached ();
12281 mono_op_to_op_imm (int opcode)
12285 return OP_IADD_IMM;
12287 return OP_ISUB_IMM;
12289 return OP_IDIV_IMM;
12291 return OP_IDIV_UN_IMM;
12293 return OP_IREM_IMM;
12295 return OP_IREM_UN_IMM;
12297 return OP_IMUL_IMM;
12299 return OP_IAND_IMM;
12303 return OP_IXOR_IMM;
12305 return OP_ISHL_IMM;
12307 return OP_ISHR_IMM;
12309 return OP_ISHR_UN_IMM;
12312 return OP_LADD_IMM;
12314 return OP_LSUB_IMM;
12316 return OP_LAND_IMM;
12320 return OP_LXOR_IMM;
12322 return OP_LSHL_IMM;
12324 return OP_LSHR_IMM;
12326 return OP_LSHR_UN_IMM;
12329 return OP_COMPARE_IMM;
12331 return OP_ICOMPARE_IMM;
12333 return OP_LCOMPARE_IMM;
12335 case OP_STORE_MEMBASE_REG:
12336 return OP_STORE_MEMBASE_IMM;
12337 case OP_STOREI1_MEMBASE_REG:
12338 return OP_STOREI1_MEMBASE_IMM;
12339 case OP_STOREI2_MEMBASE_REG:
12340 return OP_STOREI2_MEMBASE_IMM;
12341 case OP_STOREI4_MEMBASE_REG:
12342 return OP_STOREI4_MEMBASE_IMM;
12344 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12346 return OP_X86_PUSH_IMM;
12347 case OP_X86_COMPARE_MEMBASE_REG:
12348 return OP_X86_COMPARE_MEMBASE_IMM;
12350 #if defined(TARGET_AMD64)
12351 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12352 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12354 case OP_VOIDCALL_REG:
12355 return OP_VOIDCALL;
12363 return OP_LOCALLOC_IMM;
12370 ldind_to_load_membase (int opcode)
12374 return OP_LOADI1_MEMBASE;
12376 return OP_LOADU1_MEMBASE;
12378 return OP_LOADI2_MEMBASE;
12380 return OP_LOADU2_MEMBASE;
12382 return OP_LOADI4_MEMBASE;
12384 return OP_LOADU4_MEMBASE;
12386 return OP_LOAD_MEMBASE;
12387 case CEE_LDIND_REF:
12388 return OP_LOAD_MEMBASE;
12390 return OP_LOADI8_MEMBASE;
12392 return OP_LOADR4_MEMBASE;
12394 return OP_LOADR8_MEMBASE;
12396 g_assert_not_reached ();
12403 stind_to_store_membase (int opcode)
12407 return OP_STOREI1_MEMBASE_REG;
12409 return OP_STOREI2_MEMBASE_REG;
12411 return OP_STOREI4_MEMBASE_REG;
12413 case CEE_STIND_REF:
12414 return OP_STORE_MEMBASE_REG;
12416 return OP_STOREI8_MEMBASE_REG;
12418 return OP_STORER4_MEMBASE_REG;
12420 return OP_STORER8_MEMBASE_REG;
12422 g_assert_not_reached ();
12429 mono_load_membase_to_load_mem (int opcode)
12431 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12432 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12434 case OP_LOAD_MEMBASE:
12435 return OP_LOAD_MEM;
12436 case OP_LOADU1_MEMBASE:
12437 return OP_LOADU1_MEM;
12438 case OP_LOADU2_MEMBASE:
12439 return OP_LOADU2_MEM;
12440 case OP_LOADI4_MEMBASE:
12441 return OP_LOADI4_MEM;
12442 case OP_LOADU4_MEMBASE:
12443 return OP_LOADU4_MEM;
12444 #if SIZEOF_REGISTER == 8
12445 case OP_LOADI8_MEMBASE:
12446 return OP_LOADI8_MEM;
12455 op_to_op_dest_membase (int store_opcode, int opcode)
12457 #if defined(TARGET_X86)
12458 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12463 return OP_X86_ADD_MEMBASE_REG;
12465 return OP_X86_SUB_MEMBASE_REG;
12467 return OP_X86_AND_MEMBASE_REG;
12469 return OP_X86_OR_MEMBASE_REG;
12471 return OP_X86_XOR_MEMBASE_REG;
12474 return OP_X86_ADD_MEMBASE_IMM;
12477 return OP_X86_SUB_MEMBASE_IMM;
12480 return OP_X86_AND_MEMBASE_IMM;
12483 return OP_X86_OR_MEMBASE_IMM;
12486 return OP_X86_XOR_MEMBASE_IMM;
12492 #if defined(TARGET_AMD64)
12493 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12498 return OP_X86_ADD_MEMBASE_REG;
12500 return OP_X86_SUB_MEMBASE_REG;
12502 return OP_X86_AND_MEMBASE_REG;
12504 return OP_X86_OR_MEMBASE_REG;
12506 return OP_X86_XOR_MEMBASE_REG;
12508 return OP_X86_ADD_MEMBASE_IMM;
12510 return OP_X86_SUB_MEMBASE_IMM;
12512 return OP_X86_AND_MEMBASE_IMM;
12514 return OP_X86_OR_MEMBASE_IMM;
12516 return OP_X86_XOR_MEMBASE_IMM;
12518 return OP_AMD64_ADD_MEMBASE_REG;
12520 return OP_AMD64_SUB_MEMBASE_REG;
12522 return OP_AMD64_AND_MEMBASE_REG;
12524 return OP_AMD64_OR_MEMBASE_REG;
12526 return OP_AMD64_XOR_MEMBASE_REG;
12529 return OP_AMD64_ADD_MEMBASE_IMM;
12532 return OP_AMD64_SUB_MEMBASE_IMM;
12535 return OP_AMD64_AND_MEMBASE_IMM;
12538 return OP_AMD64_OR_MEMBASE_IMM;
12541 return OP_AMD64_XOR_MEMBASE_IMM;
12551 op_to_op_store_membase (int store_opcode, int opcode)
12553 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12556 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12557 return OP_X86_SETEQ_MEMBASE;
12559 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12560 return OP_X86_SETNE_MEMBASE;
12568 op_to_op_src1_membase (int load_opcode, int opcode)
12571 /* FIXME: This has sign extension issues */
12573 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12574 return OP_X86_COMPARE_MEMBASE8_IMM;
12577 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12582 return OP_X86_PUSH_MEMBASE;
12583 case OP_COMPARE_IMM:
12584 case OP_ICOMPARE_IMM:
12585 return OP_X86_COMPARE_MEMBASE_IMM;
12588 return OP_X86_COMPARE_MEMBASE_REG;
12592 #ifdef TARGET_AMD64
12593 /* FIXME: This has sign extension issues */
12595 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12596 return OP_X86_COMPARE_MEMBASE8_IMM;
12601 #ifdef __mono_ilp32__
12602 if (load_opcode == OP_LOADI8_MEMBASE)
12604 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12606 return OP_X86_PUSH_MEMBASE;
12608 /* FIXME: This only works for 32 bit immediates
12609 case OP_COMPARE_IMM:
12610 case OP_LCOMPARE_IMM:
12611 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12612 return OP_AMD64_COMPARE_MEMBASE_IMM;
12614 case OP_ICOMPARE_IMM:
12615 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12616 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12620 #ifdef __mono_ilp32__
12621 if (load_opcode == OP_LOAD_MEMBASE)
12622 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12623 if (load_opcode == OP_LOADI8_MEMBASE)
12625 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12627 return OP_AMD64_COMPARE_MEMBASE_REG;
12630 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12631 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12640 op_to_op_src2_membase (int load_opcode, int opcode)
12643 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12649 return OP_X86_COMPARE_REG_MEMBASE;
12651 return OP_X86_ADD_REG_MEMBASE;
12653 return OP_X86_SUB_REG_MEMBASE;
12655 return OP_X86_AND_REG_MEMBASE;
12657 return OP_X86_OR_REG_MEMBASE;
12659 return OP_X86_XOR_REG_MEMBASE;
12663 #ifdef TARGET_AMD64
12664 #ifdef __mono_ilp32__
12665 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12667 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12671 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12673 return OP_X86_ADD_REG_MEMBASE;
12675 return OP_X86_SUB_REG_MEMBASE;
12677 return OP_X86_AND_REG_MEMBASE;
12679 return OP_X86_OR_REG_MEMBASE;
12681 return OP_X86_XOR_REG_MEMBASE;
12683 #ifdef __mono_ilp32__
12684 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12686 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12691 return OP_AMD64_COMPARE_REG_MEMBASE;
12693 return OP_AMD64_ADD_REG_MEMBASE;
12695 return OP_AMD64_SUB_REG_MEMBASE;
12697 return OP_AMD64_AND_REG_MEMBASE;
12699 return OP_AMD64_OR_REG_MEMBASE;
12701 return OP_AMD64_XOR_REG_MEMBASE;
12710 mono_op_to_op_imm_noemul (int opcode)
12713 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12719 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12726 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12731 return mono_op_to_op_imm (opcode);
12736 * mono_handle_global_vregs:
12738 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12742 mono_handle_global_vregs (MonoCompile *cfg)
12744 gint32 *vreg_to_bb;
12745 MonoBasicBlock *bb;
12748 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12750 #ifdef MONO_ARCH_SIMD_INTRINSICS
12751 if (cfg->uses_simd_intrinsics)
12752 mono_simd_simplify_indirection (cfg);
12755 /* Find local vregs used in more than one bb */
12756 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12757 MonoInst *ins = bb->code;
12758 int block_num = bb->block_num;
12760 if (cfg->verbose_level > 2)
12761 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12764 for (; ins; ins = ins->next) {
12765 const char *spec = INS_INFO (ins->opcode);
12766 int regtype = 0, regindex;
12769 if (G_UNLIKELY (cfg->verbose_level > 2))
12770 mono_print_ins (ins);
12772 g_assert (ins->opcode >= MONO_CEE_LAST);
12774 for (regindex = 0; regindex < 4; regindex ++) {
12777 if (regindex == 0) {
12778 regtype = spec [MONO_INST_DEST];
12779 if (regtype == ' ')
12782 } else if (regindex == 1) {
12783 regtype = spec [MONO_INST_SRC1];
12784 if (regtype == ' ')
12787 } else if (regindex == 2) {
12788 regtype = spec [MONO_INST_SRC2];
12789 if (regtype == ' ')
12792 } else if (regindex == 3) {
12793 regtype = spec [MONO_INST_SRC3];
12794 if (regtype == ' ')
12799 #if SIZEOF_REGISTER == 4
12800 /* In the LLVM case, the long opcodes are not decomposed */
12801 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12803 * Since some instructions reference the original long vreg,
12804 * and some reference the two component vregs, it is quite hard
12805 * to determine when it needs to be global. So be conservative.
12807 if (!get_vreg_to_inst (cfg, vreg)) {
12808 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12810 if (cfg->verbose_level > 2)
12811 printf ("LONG VREG R%d made global.\n", vreg);
12815 * Make the component vregs volatile since the optimizations can
12816 * get confused otherwise.
12818 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12819 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12823 g_assert (vreg != -1);
12825 prev_bb = vreg_to_bb [vreg];
12826 if (prev_bb == 0) {
12827 /* 0 is a valid block num */
12828 vreg_to_bb [vreg] = block_num + 1;
12829 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12830 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12833 if (!get_vreg_to_inst (cfg, vreg)) {
12834 if (G_UNLIKELY (cfg->verbose_level > 2))
12835 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12839 if (vreg_is_ref (cfg, vreg))
12840 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12842 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12845 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12848 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
12851 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
12854 g_assert_not_reached ();
12858 /* Flag as having been used in more than one bb */
12859 vreg_to_bb [vreg] = -1;
12865 /* If a variable is used in only one bblock, convert it into a local vreg */
12866 for (i = 0; i < cfg->num_varinfo; i++) {
12867 MonoInst *var = cfg->varinfo [i];
12868 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
12870 switch (var->type) {
12876 #if SIZEOF_REGISTER == 8
12879 #if !defined(TARGET_X86)
12880 /* Enabling this screws up the fp stack on x86 */
12883 if (mono_arch_is_soft_float ())
12886 /* Arguments are implicitly global */
12887 /* Putting R4 vars into registers doesn't work currently */
12888 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
12889 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
12891 * Make that the variable's liveness interval doesn't contain a call, since
12892 * that would cause the lvreg to be spilled, making the whole optimization
12895 /* This is too slow for JIT compilation */
12897 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
12899 int def_index, call_index, ins_index;
12900 gboolean spilled = FALSE;
12905 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
12906 const char *spec = INS_INFO (ins->opcode);
12908 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
12909 def_index = ins_index;
12911 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
12912 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
12913 if (call_index > def_index) {
12919 if (MONO_IS_CALL (ins))
12920 call_index = ins_index;
12930 if (G_UNLIKELY (cfg->verbose_level > 2))
12931 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
12932 var->flags |= MONO_INST_IS_DEAD;
12933 cfg->vreg_to_inst [var->dreg] = NULL;
12940 * Compress the varinfo and vars tables so the liveness computation is faster and
12941 * takes up less space.
12944 for (i = 0; i < cfg->num_varinfo; ++i) {
12945 MonoInst *var = cfg->varinfo [i];
12946 if (pos < i && cfg->locals_start == i)
12947 cfg->locals_start = pos;
12948 if (!(var->flags & MONO_INST_IS_DEAD)) {
12950 cfg->varinfo [pos] = cfg->varinfo [i];
12951 cfg->varinfo [pos]->inst_c0 = pos;
12952 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
12953 cfg->vars [pos].idx = pos;
12954 #if SIZEOF_REGISTER == 4
12955 if (cfg->varinfo [pos]->type == STACK_I8) {
12956 /* Modify the two component vars too */
12959 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
12960 var1->inst_c0 = pos;
12961 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
12962 var1->inst_c0 = pos;
12969 cfg->num_varinfo = pos;
12970 if (cfg->locals_start > cfg->num_varinfo)
12971 cfg->locals_start = cfg->num_varinfo;
12975 * mono_spill_global_vars:
12977 * Generate spill code for variables which are not allocated to registers,
12978 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
12979 * code is generated which could be optimized by the local optimization passes.
12982 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
12984 MonoBasicBlock *bb;
12986 int orig_next_vreg;
12987 guint32 *vreg_to_lvreg;
12989 guint32 i, lvregs_len;
12990 gboolean dest_has_lvreg = FALSE;
12991 guint32 stacktypes [128];
12992 MonoInst **live_range_start, **live_range_end;
12993 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
12994 int *gsharedvt_vreg_to_idx = NULL;
12996 *need_local_opts = FALSE;
12998 memset (spec2, 0, sizeof (spec2));
13000 /* FIXME: Move this function to mini.c */
13001 stacktypes ['i'] = STACK_PTR;
13002 stacktypes ['l'] = STACK_I8;
13003 stacktypes ['f'] = STACK_R8;
13004 #ifdef MONO_ARCH_SIMD_INTRINSICS
13005 stacktypes ['x'] = STACK_VTYPE;
13008 #if SIZEOF_REGISTER == 4
13009 /* Create MonoInsts for longs */
13010 for (i = 0; i < cfg->num_varinfo; i++) {
13011 MonoInst *ins = cfg->varinfo [i];
13013 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13014 switch (ins->type) {
13019 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13022 g_assert (ins->opcode == OP_REGOFFSET);
13024 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13026 tree->opcode = OP_REGOFFSET;
13027 tree->inst_basereg = ins->inst_basereg;
13028 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13030 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13032 tree->opcode = OP_REGOFFSET;
13033 tree->inst_basereg = ins->inst_basereg;
13034 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13044 if (cfg->compute_gc_maps) {
13045 /* registers need liveness info even for !non refs */
13046 for (i = 0; i < cfg->num_varinfo; i++) {
13047 MonoInst *ins = cfg->varinfo [i];
13049 if (ins->opcode == OP_REGVAR)
13050 ins->flags |= MONO_INST_GC_TRACK;
13054 if (cfg->gsharedvt) {
13055 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13057 for (i = 0; i < cfg->num_varinfo; ++i) {
13058 MonoInst *ins = cfg->varinfo [i];
13061 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13062 if (i >= cfg->locals_start) {
13064 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13065 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13066 ins->opcode = OP_GSHAREDVT_LOCAL;
13067 ins->inst_imm = idx;
13070 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13071 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13077 /* FIXME: widening and truncation */
13080 * As an optimization, when a variable allocated to the stack is first loaded into
13081 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13082 * the variable again.
13084 orig_next_vreg = cfg->next_vreg;
13085 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13086 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13090 * These arrays contain the first and last instructions accessing a given
13092 * Since we emit bblocks in the same order we process them here, and we
13093 * don't split live ranges, these will precisely describe the live range of
13094 * the variable, i.e. the instruction range where a valid value can be found
13095 * in the variables location.
13096 * The live range is computed using the liveness info computed by the liveness pass.
13097 * We can't use vmv->range, since that is an abstract live range, and we need
13098 * one which is instruction precise.
13099 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13101 /* FIXME: Only do this if debugging info is requested */
13102 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13103 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13104 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13105 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13107 /* Add spill loads/stores */
13108 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13111 if (cfg->verbose_level > 2)
13112 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13114 /* Clear vreg_to_lvreg array */
13115 for (i = 0; i < lvregs_len; i++)
13116 vreg_to_lvreg [lvregs [i]] = 0;
13120 MONO_BB_FOR_EACH_INS (bb, ins) {
13121 const char *spec = INS_INFO (ins->opcode);
13122 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13123 gboolean store, no_lvreg;
13124 int sregs [MONO_MAX_SRC_REGS];
13126 if (G_UNLIKELY (cfg->verbose_level > 2))
13127 mono_print_ins (ins);
13129 if (ins->opcode == OP_NOP)
13133 * We handle LDADDR here as well, since it can only be decomposed
13134 * when variable addresses are known.
13136 if (ins->opcode == OP_LDADDR) {
13137 MonoInst *var = ins->inst_p0;
13139 if (var->opcode == OP_VTARG_ADDR) {
13140 /* Happens on SPARC/S390 where vtypes are passed by reference */
13141 MonoInst *vtaddr = var->inst_left;
13142 if (vtaddr->opcode == OP_REGVAR) {
13143 ins->opcode = OP_MOVE;
13144 ins->sreg1 = vtaddr->dreg;
13146 else if (var->inst_left->opcode == OP_REGOFFSET) {
13147 ins->opcode = OP_LOAD_MEMBASE;
13148 ins->inst_basereg = vtaddr->inst_basereg;
13149 ins->inst_offset = vtaddr->inst_offset;
13152 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13153 /* gsharedvt arg passed by ref */
13154 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13156 ins->opcode = OP_LOAD_MEMBASE;
13157 ins->inst_basereg = var->inst_basereg;
13158 ins->inst_offset = var->inst_offset;
13159 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13160 MonoInst *load, *load2, *load3;
13161 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13162 int reg1, reg2, reg3;
13163 MonoInst *info_var = cfg->gsharedvt_info_var;
13164 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13168 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13171 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13173 g_assert (info_var);
13174 g_assert (locals_var);
13176 /* Mark the instruction used to compute the locals var as used */
13177 cfg->gsharedvt_locals_var_ins = NULL;
13179 /* Load the offset */
13180 if (info_var->opcode == OP_REGOFFSET) {
13181 reg1 = alloc_ireg (cfg);
13182 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13183 } else if (info_var->opcode == OP_REGVAR) {
13185 reg1 = info_var->dreg;
13187 g_assert_not_reached ();
13189 reg2 = alloc_ireg (cfg);
13190 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, G_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13191 /* Load the locals area address */
13192 reg3 = alloc_ireg (cfg);
13193 if (locals_var->opcode == OP_REGOFFSET) {
13194 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13195 } else if (locals_var->opcode == OP_REGVAR) {
13196 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13198 g_assert_not_reached ();
13200 /* Compute the address */
13201 ins->opcode = OP_PADD;
13205 mono_bblock_insert_before_ins (bb, ins, load3);
13206 mono_bblock_insert_before_ins (bb, load3, load2);
13208 mono_bblock_insert_before_ins (bb, load2, load);
13210 g_assert (var->opcode == OP_REGOFFSET);
13212 ins->opcode = OP_ADD_IMM;
13213 ins->sreg1 = var->inst_basereg;
13214 ins->inst_imm = var->inst_offset;
13217 *need_local_opts = TRUE;
13218 spec = INS_INFO (ins->opcode);
13221 if (ins->opcode < MONO_CEE_LAST) {
13222 mono_print_ins (ins);
13223 g_assert_not_reached ();
13227 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13231 if (MONO_IS_STORE_MEMBASE (ins)) {
13232 tmp_reg = ins->dreg;
13233 ins->dreg = ins->sreg2;
13234 ins->sreg2 = tmp_reg;
13237 spec2 [MONO_INST_DEST] = ' ';
13238 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13239 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13240 spec2 [MONO_INST_SRC3] = ' ';
13242 } else if (MONO_IS_STORE_MEMINDEX (ins))
13243 g_assert_not_reached ();
13248 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13249 printf ("\t %.3s %d", spec, ins->dreg);
13250 num_sregs = mono_inst_get_src_registers (ins, sregs);
13251 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13252 printf (" %d", sregs [srcindex]);
13259 regtype = spec [MONO_INST_DEST];
13260 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13263 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13264 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13265 MonoInst *store_ins;
13267 MonoInst *def_ins = ins;
13268 int dreg = ins->dreg; /* The original vreg */
13270 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13272 if (var->opcode == OP_REGVAR) {
13273 ins->dreg = var->dreg;
13274 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13276 * Instead of emitting a load+store, use a _membase opcode.
13278 g_assert (var->opcode == OP_REGOFFSET);
13279 if (ins->opcode == OP_MOVE) {
13283 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13284 ins->inst_basereg = var->inst_basereg;
13285 ins->inst_offset = var->inst_offset;
13288 spec = INS_INFO (ins->opcode);
13292 g_assert (var->opcode == OP_REGOFFSET);
13294 prev_dreg = ins->dreg;
13296 /* Invalidate any previous lvreg for this vreg */
13297 vreg_to_lvreg [ins->dreg] = 0;
13301 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13303 store_opcode = OP_STOREI8_MEMBASE_REG;
13306 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13308 #if SIZEOF_REGISTER != 8
13309 if (regtype == 'l') {
13310 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13311 mono_bblock_insert_after_ins (bb, ins, store_ins);
13312 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13313 mono_bblock_insert_after_ins (bb, ins, store_ins);
13314 def_ins = store_ins;
13319 g_assert (store_opcode != OP_STOREV_MEMBASE);
13321 /* Try to fuse the store into the instruction itself */
13322 /* FIXME: Add more instructions */
13323 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13324 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13325 ins->inst_imm = ins->inst_c0;
13326 ins->inst_destbasereg = var->inst_basereg;
13327 ins->inst_offset = var->inst_offset;
13328 spec = INS_INFO (ins->opcode);
13329 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13330 ins->opcode = store_opcode;
13331 ins->inst_destbasereg = var->inst_basereg;
13332 ins->inst_offset = var->inst_offset;
13336 tmp_reg = ins->dreg;
13337 ins->dreg = ins->sreg2;
13338 ins->sreg2 = tmp_reg;
13341 spec2 [MONO_INST_DEST] = ' ';
13342 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13343 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13344 spec2 [MONO_INST_SRC3] = ' ';
13346 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13347 // FIXME: The backends expect the base reg to be in inst_basereg
13348 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13350 ins->inst_basereg = var->inst_basereg;
13351 ins->inst_offset = var->inst_offset;
13352 spec = INS_INFO (ins->opcode);
13354 /* printf ("INS: "); mono_print_ins (ins); */
13355 /* Create a store instruction */
13356 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13358 /* Insert it after the instruction */
13359 mono_bblock_insert_after_ins (bb, ins, store_ins);
13361 def_ins = store_ins;
13364 * We can't assign ins->dreg to var->dreg here, since the
13365 * sregs could use it. So set a flag, and do it after
13368 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13369 dest_has_lvreg = TRUE;
13374 if (def_ins && !live_range_start [dreg]) {
13375 live_range_start [dreg] = def_ins;
13376 live_range_start_bb [dreg] = bb;
13379 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13382 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13383 tmp->inst_c1 = dreg;
13384 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13391 num_sregs = mono_inst_get_src_registers (ins, sregs);
13392 for (srcindex = 0; srcindex < 3; ++srcindex) {
13393 regtype = spec [MONO_INST_SRC1 + srcindex];
13394 sreg = sregs [srcindex];
13396 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13397 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13398 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13399 MonoInst *use_ins = ins;
13400 MonoInst *load_ins;
13401 guint32 load_opcode;
13403 if (var->opcode == OP_REGVAR) {
13404 sregs [srcindex] = var->dreg;
13405 //mono_inst_set_src_registers (ins, sregs);
13406 live_range_end [sreg] = use_ins;
13407 live_range_end_bb [sreg] = bb;
13409 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13412 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13413 /* var->dreg is a hreg */
13414 tmp->inst_c1 = sreg;
13415 mono_bblock_insert_after_ins (bb, ins, tmp);
13421 g_assert (var->opcode == OP_REGOFFSET);
13423 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13425 g_assert (load_opcode != OP_LOADV_MEMBASE);
13427 if (vreg_to_lvreg [sreg]) {
13428 g_assert (vreg_to_lvreg [sreg] != -1);
13430 /* The variable is already loaded to an lvreg */
13431 if (G_UNLIKELY (cfg->verbose_level > 2))
13432 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13433 sregs [srcindex] = vreg_to_lvreg [sreg];
13434 //mono_inst_set_src_registers (ins, sregs);
13438 /* Try to fuse the load into the instruction */
13439 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13440 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13441 sregs [0] = var->inst_basereg;
13442 //mono_inst_set_src_registers (ins, sregs);
13443 ins->inst_offset = var->inst_offset;
13444 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13445 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13446 sregs [1] = var->inst_basereg;
13447 //mono_inst_set_src_registers (ins, sregs);
13448 ins->inst_offset = var->inst_offset;
13450 if (MONO_IS_REAL_MOVE (ins)) {
13451 ins->opcode = OP_NOP;
13454 //printf ("%d ", srcindex); mono_print_ins (ins);
13456 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13458 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13459 if (var->dreg == prev_dreg) {
13461 * sreg refers to the value loaded by the load
13462 * emitted below, but we need to use ins->dreg
13463 * since it refers to the store emitted earlier.
13467 g_assert (sreg != -1);
13468 vreg_to_lvreg [var->dreg] = sreg;
13469 g_assert (lvregs_len < 1024);
13470 lvregs [lvregs_len ++] = var->dreg;
13474 sregs [srcindex] = sreg;
13475 //mono_inst_set_src_registers (ins, sregs);
13477 #if SIZEOF_REGISTER != 8
13478 if (regtype == 'l') {
13479 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13480 mono_bblock_insert_before_ins (bb, ins, load_ins);
13481 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13482 mono_bblock_insert_before_ins (bb, ins, load_ins);
13483 use_ins = load_ins;
13488 #if SIZEOF_REGISTER == 4
13489 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13491 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13492 mono_bblock_insert_before_ins (bb, ins, load_ins);
13493 use_ins = load_ins;
13497 if (var->dreg < orig_next_vreg) {
13498 live_range_end [var->dreg] = use_ins;
13499 live_range_end_bb [var->dreg] = bb;
13502 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13505 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13506 tmp->inst_c1 = var->dreg;
13507 mono_bblock_insert_after_ins (bb, ins, tmp);
13511 mono_inst_set_src_registers (ins, sregs);
13513 if (dest_has_lvreg) {
13514 g_assert (ins->dreg != -1);
13515 vreg_to_lvreg [prev_dreg] = ins->dreg;
13516 g_assert (lvregs_len < 1024);
13517 lvregs [lvregs_len ++] = prev_dreg;
13518 dest_has_lvreg = FALSE;
13522 tmp_reg = ins->dreg;
13523 ins->dreg = ins->sreg2;
13524 ins->sreg2 = tmp_reg;
13527 if (MONO_IS_CALL (ins)) {
13528 /* Clear vreg_to_lvreg array */
13529 for (i = 0; i < lvregs_len; i++)
13530 vreg_to_lvreg [lvregs [i]] = 0;
13532 } else if (ins->opcode == OP_NOP) {
13534 MONO_INST_NULLIFY_SREGS (ins);
13537 if (cfg->verbose_level > 2)
13538 mono_print_ins_index (1, ins);
13541 /* Extend the live range based on the liveness info */
13542 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13543 for (i = 0; i < cfg->num_varinfo; i ++) {
13544 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13546 if (vreg_is_volatile (cfg, vi->vreg))
13547 /* The liveness info is incomplete */
13550 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13551 /* Live from at least the first ins of this bb */
13552 live_range_start [vi->vreg] = bb->code;
13553 live_range_start_bb [vi->vreg] = bb;
13556 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13557 /* Live at least until the last ins of this bb */
13558 live_range_end [vi->vreg] = bb->last_ins;
13559 live_range_end_bb [vi->vreg] = bb;
13565 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13567 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13568 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13570 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13571 for (i = 0; i < cfg->num_varinfo; ++i) {
13572 int vreg = MONO_VARINFO (cfg, i)->vreg;
13575 if (live_range_start [vreg]) {
13576 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13578 ins->inst_c1 = vreg;
13579 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13581 if (live_range_end [vreg]) {
13582 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13584 ins->inst_c1 = vreg;
13585 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13586 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13588 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13594 if (cfg->gsharedvt_locals_var_ins) {
13595 /* Nullify if unused */
13596 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13597 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13600 g_free (live_range_start);
13601 g_free (live_range_end);
13602 g_free (live_range_start_bb);
13603 g_free (live_range_end_bb);
13608 * - use 'iadd' instead of 'int_add'
13609 * - handling ovf opcodes: decompose in method_to_ir.
13610 * - unify iregs/fregs
13611 * -> partly done, the missing parts are:
13612 * - a more complete unification would involve unifying the hregs as well, so
13613 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13614 * would no longer map to the machine hregs, so the code generators would need to
13615 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13616 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13617 * fp/non-fp branches speeds it up by about 15%.
13618 * - use sext/zext opcodes instead of shifts
13620 * - get rid of TEMPLOADs if possible and use vregs instead
13621 * - clean up usage of OP_P/OP_ opcodes
13622 * - cleanup usage of DUMMY_USE
13623 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13625 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13626 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13627 * - make sure handle_stack_args () is called before the branch is emitted
13628 * - when the new IR is done, get rid of all unused stuff
13629 * - COMPARE/BEQ as separate instructions or unify them ?
13630 * - keeping them separate allows specialized compare instructions like
13631 * compare_imm, compare_membase
13632 * - most back ends unify fp compare+branch, fp compare+ceq
13633 * - integrate mono_save_args into inline_method
13634 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13635 * - handle long shift opts on 32 bit platforms somehow: they require
13636 * 3 sregs (2 for arg1 and 1 for arg2)
13637 * - make byref a 'normal' type.
13638 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13639 * variable if needed.
13640 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13641 * like inline_method.
13642 * - remove inlining restrictions
13643 * - fix LNEG and enable cfold of INEG
13644 * - generalize x86 optimizations like ldelema as a peephole optimization
13645 * - add store_mem_imm for amd64
13646 * - optimize the loading of the interruption flag in the managed->native wrappers
13647 * - avoid special handling of OP_NOP in passes
13648 * - move code inserting instructions into one function/macro.
13649 * - try a coalescing phase after liveness analysis
13650 * - add float -> vreg conversion + local optimizations on !x86
13651 * - figure out how to handle decomposed branches during optimizations, ie.
13652 * compare+branch, op_jump_table+op_br etc.
13653 * - promote RuntimeXHandles to vregs
13654 * - vtype cleanups:
13655 * - add a NEW_VARLOADA_VREG macro
13656 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13657 * accessing vtype fields.
13658 * - get rid of I8CONST on 64 bit platforms
13659 * - dealing with the increase in code size due to branches created during opcode
13661 * - use extended basic blocks
13662 * - all parts of the JIT
13663 * - handle_global_vregs () && local regalloc
13664 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13665 * - sources of increase in code size:
13668 * - isinst and castclass
13669 * - lvregs not allocated to global registers even if used multiple times
13670 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13672 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13673 * - add all micro optimizations from the old JIT
13674 * - put tree optimizations into the deadce pass
13675 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13676 * specific function.
13677 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13678 * fcompare + branchCC.
13679 * - create a helper function for allocating a stack slot, taking into account
13680 * MONO_CFG_HAS_SPILLUP.
13682 * - merge the ia64 switch changes.
13683 * - optimize mono_regstate2_alloc_int/float.
13684 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13685 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13686 * parts of the tree could be separated by other instructions, killing the tree
13687 * arguments, or stores killing loads etc. Also, should we fold loads into other
13688 * instructions if the result of the load is used multiple times ?
13689 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13690 * - LAST MERGE: 108395.
13691 * - when returning vtypes in registers, generate IR and append it to the end of the
13692 * last bb instead of doing it in the epilog.
13693 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13701 - When to decompose opcodes:
13702 - earlier: this makes some optimizations hard to implement, since the low level IR
13703 no longer contains the neccessary information. But it is easier to do.
13704 - later: harder to implement, enables more optimizations.
13705 - Branches inside bblocks:
13706 - created when decomposing complex opcodes.
13707 - branches to another bblock: harmless, but not tracked by the branch
13708 optimizations, so need to branch to a label at the start of the bblock.
13709 - branches to inside the same bblock: very problematic, trips up the local
13710 reg allocator. Can be fixed by spitting the current bblock, but that is a
13711 complex operation, since some local vregs can become global vregs etc.
13712 - Local/global vregs:
13713 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13714 local register allocator.
13715 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13716 structure, created by mono_create_var (). Assigned to hregs or the stack by
13717 the global register allocator.
13718 - When to do optimizations like alu->alu_imm:
13719 - earlier -> saves work later on since the IR will be smaller/simpler
13720 - later -> can work on more instructions
13721 - Handling of valuetypes:
13722 - When a vtype is pushed on the stack, a new temporary is created, an
13723 instruction computing its address (LDADDR) is emitted and pushed on
13724 the stack. Need to optimize cases when the vtype is used immediately as in
13725 argument passing, stloc etc.
13726 - Instead of the to_end stuff in the old JIT, simply call the function handling
13727 the values on the stack before emitting the last instruction of the bb.
13730 #endif /* DISABLE_JIT */