2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 * mono_alloc_ireg_ref:
209 * Allocate an IREG, and mark it as holding a GC ref.
212 mono_alloc_ireg_ref (MonoCompile *cfg)
214 return alloc_ireg_ref (cfg);
218 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
224 switch (type->type) {
227 case MONO_TYPE_BOOLEAN:
239 case MONO_TYPE_FNPTR:
241 case MONO_TYPE_CLASS:
242 case MONO_TYPE_STRING:
243 case MONO_TYPE_OBJECT:
244 case MONO_TYPE_SZARRAY:
245 case MONO_TYPE_ARRAY:
249 #if SIZEOF_REGISTER == 8
258 case MONO_TYPE_VALUETYPE:
259 if (type->data.klass->enumtype) {
260 type = mono_class_enum_basetype (type->data.klass);
263 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
266 case MONO_TYPE_TYPEDBYREF:
268 case MONO_TYPE_GENERICINST:
269 type = &type->data.generic_class->container_class->byval_arg;
273 g_assert (cfg->generic_sharing_context);
276 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
282 mono_print_bb (MonoBasicBlock *bb, const char *msg)
287 printf ("\n%s %d: [IN: ", msg, bb->block_num);
288 for (i = 0; i < bb->in_count; ++i)
289 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
291 for (i = 0; i < bb->out_count; ++i)
292 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
294 for (tree = bb->code; tree; tree = tree->next)
295 mono_print_ins_index (-1, tree);
299 mono_create_helper_signatures (void)
301 helper_sig_domain_get = mono_create_icall_signature ("ptr");
302 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
303 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
304 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
305 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
306 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
307 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
311 * Can't put this at the beginning, since other files reference stuff from this
316 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
318 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
320 #define GET_BBLOCK(cfg,tblock,ip) do { \
321 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
323 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
324 NEW_BBLOCK (cfg, (tblock)); \
325 (tblock)->cil_code = (ip); \
326 ADD_BBLOCK (cfg, (tblock)); \
330 #if defined(TARGET_X86) || defined(TARGET_AMD64)
331 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
332 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
333 (dest)->dreg = alloc_preg ((cfg)); \
334 (dest)->sreg1 = (sr1); \
335 (dest)->sreg2 = (sr2); \
336 (dest)->inst_imm = (imm); \
337 (dest)->backend.shift_amount = (shift); \
338 MONO_ADD_INS ((cfg)->cbb, (dest)); \
342 #if SIZEOF_REGISTER == 8
343 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
344 /* FIXME: Need to add many more cases */ \
345 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
347 int dr = alloc_preg (cfg); \
348 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
349 (ins)->sreg2 = widen->dreg; \
353 #define ADD_WIDEN_OP(ins, arg1, arg2)
356 #define ADD_BINOP(op) do { \
357 MONO_INST_NEW (cfg, ins, (op)); \
359 ins->sreg1 = sp [0]->dreg; \
360 ins->sreg2 = sp [1]->dreg; \
361 type_from_op (ins, sp [0], sp [1]); \
363 /* Have to insert a widening op */ \
364 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
365 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
366 MONO_ADD_INS ((cfg)->cbb, (ins)); \
367 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
370 #define ADD_UNOP(op) do { \
371 MONO_INST_NEW (cfg, ins, (op)); \
373 ins->sreg1 = sp [0]->dreg; \
374 type_from_op (ins, sp [0], NULL); \
376 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
377 MONO_ADD_INS ((cfg)->cbb, (ins)); \
378 *sp++ = mono_decompose_opcode (cfg, ins); \
381 #define ADD_BINCOND(next_block) do { \
384 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
385 cmp->sreg1 = sp [0]->dreg; \
386 cmp->sreg2 = sp [1]->dreg; \
387 type_from_op (cmp, sp [0], sp [1]); \
389 type_from_op (ins, sp [0], sp [1]); \
390 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
391 GET_BBLOCK (cfg, tblock, target); \
392 link_bblock (cfg, bblock, tblock); \
393 ins->inst_true_bb = tblock; \
394 if ((next_block)) { \
395 link_bblock (cfg, bblock, (next_block)); \
396 ins->inst_false_bb = (next_block); \
397 start_new_bblock = 1; \
399 GET_BBLOCK (cfg, tblock, ip); \
400 link_bblock (cfg, bblock, tblock); \
401 ins->inst_false_bb = tblock; \
402 start_new_bblock = 2; \
404 if (sp != stack_start) { \
405 handle_stack_args (cfg, stack_start, sp - stack_start); \
406 CHECK_UNVERIFIABLE (cfg); \
408 MONO_ADD_INS (bblock, cmp); \
409 MONO_ADD_INS (bblock, ins); \
413 * link_bblock: Links two basic blocks
415 * links two basic blocks in the control flow graph, the 'from'
416 * argument is the starting block and the 'to' argument is the block
417 * the control flow ends to after 'from'.
420 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
422 MonoBasicBlock **newa;
426 if (from->cil_code) {
428 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
430 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
433 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
435 printf ("edge from entry to exit\n");
440 for (i = 0; i < from->out_count; ++i) {
441 if (to == from->out_bb [i]) {
447 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
448 for (i = 0; i < from->out_count; ++i) {
449 newa [i] = from->out_bb [i];
457 for (i = 0; i < to->in_count; ++i) {
458 if (from == to->in_bb [i]) {
464 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
465 for (i = 0; i < to->in_count; ++i) {
466 newa [i] = to->in_bb [i];
475 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
477 link_bblock (cfg, from, to);
481 * mono_find_block_region:
483 * We mark each basic block with a region ID. We use that to avoid BB
484 * optimizations when blocks are in different regions.
487 * A region token that encodes where this region is, and information
488 * about the clause owner for this block.
490 * The region encodes the try/catch/filter clause that owns this block
491 * as well as the type. -1 is a special value that represents a block
492 * that is in none of try/catch/filter.
495 mono_find_block_region (MonoCompile *cfg, int offset)
497 MonoMethodHeader *header = cfg->header;
498 MonoExceptionClause *clause;
501 for (i = 0; i < header->num_clauses; ++i) {
502 clause = &header->clauses [i];
503 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
504 (offset < (clause->handler_offset)))
505 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
507 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
508 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
509 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
510 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
511 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
513 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
516 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
517 return ((i + 1) << 8) | clause->flags;
524 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
526 MonoMethodHeader *header = cfg->header;
527 MonoExceptionClause *clause;
531 for (i = 0; i < header->num_clauses; ++i) {
532 clause = &header->clauses [i];
533 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
534 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
535 if (clause->flags == type)
536 res = g_list_append (res, clause);
543 mono_create_spvar_for_region (MonoCompile *cfg, int region)
547 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
551 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
552 /* prevent it from being register allocated */
553 var->flags |= MONO_INST_INDIRECT;
555 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
559 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
561 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
565 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
569 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
573 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
574 /* prevent it from being register allocated */
575 var->flags |= MONO_INST_INDIRECT;
577 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
583 * Returns the type used in the eval stack when @type is loaded.
584 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
587 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
591 inst->klass = klass = mono_class_from_mono_type (type);
593 inst->type = STACK_MP;
598 switch (type->type) {
600 inst->type = STACK_INV;
604 case MONO_TYPE_BOOLEAN:
610 inst->type = STACK_I4;
615 case MONO_TYPE_FNPTR:
616 inst->type = STACK_PTR;
618 case MONO_TYPE_CLASS:
619 case MONO_TYPE_STRING:
620 case MONO_TYPE_OBJECT:
621 case MONO_TYPE_SZARRAY:
622 case MONO_TYPE_ARRAY:
623 inst->type = STACK_OBJ;
627 inst->type = STACK_I8;
631 inst->type = STACK_R8;
633 case MONO_TYPE_VALUETYPE:
634 if (type->data.klass->enumtype) {
635 type = mono_class_enum_basetype (type->data.klass);
639 inst->type = STACK_VTYPE;
642 case MONO_TYPE_TYPEDBYREF:
643 inst->klass = mono_defaults.typed_reference_class;
644 inst->type = STACK_VTYPE;
646 case MONO_TYPE_GENERICINST:
647 type = &type->data.generic_class->container_class->byval_arg;
650 case MONO_TYPE_MVAR :
651 /* FIXME: all the arguments must be references for now,
652 * later look inside cfg and see if the arg num is
655 g_assert (cfg->generic_sharing_context);
656 inst->type = STACK_OBJ;
659 g_error ("unknown type 0x%02x in eval stack type", type->type);
664 * The following tables are used to quickly validate the IL code in type_from_op ().
667 bin_num_table [STACK_MAX] [STACK_MAX] = {
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
672 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
673 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
674 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
675 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
680 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
683 /* reduce the size of this table */
685 bin_int_table [STACK_MAX] [STACK_MAX] = {
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
697 bin_comp_table [STACK_MAX] [STACK_MAX] = {
698 /* Inv i L p F & O vt */
700 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
701 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
702 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
703 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
704 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
705 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
706 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
709 /* reduce the size of this table */
711 shift_table [STACK_MAX] [STACK_MAX] = {
712 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
713 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
714 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
723 * Tables to map from the non-specific opcode to the matching
724 * type-specific opcode.
726 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
728 binops_op_map [STACK_MAX] = {
729 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
732 /* handles from CEE_NEG to CEE_CONV_U8 */
734 unops_op_map [STACK_MAX] = {
735 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
738 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
740 ovfops_op_map [STACK_MAX] = {
741 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
744 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
746 ovf2ops_op_map [STACK_MAX] = {
747 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
750 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
752 ovf3ops_op_map [STACK_MAX] = {
753 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
756 /* handles from CEE_BEQ to CEE_BLT_UN */
758 beqops_op_map [STACK_MAX] = {
759 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
762 /* handles from CEE_CEQ to CEE_CLT_UN */
764 ceqops_op_map [STACK_MAX] = {
765 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
769 * Sets ins->type (the type on the eval stack) according to the
770 * type of the opcode and the arguments to it.
771 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
773 * FIXME: this function sets ins->type unconditionally in some cases, but
774 * it should set it to invalid for some types (a conv.x on an object)
777 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
779 switch (ins->opcode) {
786 /* FIXME: check unverifiable args for STACK_MP */
787 ins->type = bin_num_table [src1->type] [src2->type];
788 ins->opcode += binops_op_map [ins->type];
795 ins->type = bin_int_table [src1->type] [src2->type];
796 ins->opcode += binops_op_map [ins->type];
801 ins->type = shift_table [src1->type] [src2->type];
802 ins->opcode += binops_op_map [ins->type];
807 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
808 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
809 ins->opcode = OP_LCOMPARE;
810 else if (src1->type == STACK_R8)
811 ins->opcode = OP_FCOMPARE;
813 ins->opcode = OP_ICOMPARE;
815 case OP_ICOMPARE_IMM:
816 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
817 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
818 ins->opcode = OP_LCOMPARE_IMM;
830 ins->opcode += beqops_op_map [src1->type];
833 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
834 ins->opcode += ceqops_op_map [src1->type];
840 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
841 ins->opcode += ceqops_op_map [src1->type];
845 ins->type = neg_table [src1->type];
846 ins->opcode += unops_op_map [ins->type];
849 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
850 ins->type = src1->type;
852 ins->type = STACK_INV;
853 ins->opcode += unops_op_map [ins->type];
859 ins->type = STACK_I4;
860 ins->opcode += unops_op_map [src1->type];
863 ins->type = STACK_R8;
864 switch (src1->type) {
867 ins->opcode = OP_ICONV_TO_R_UN;
870 ins->opcode = OP_LCONV_TO_R_UN;
874 case CEE_CONV_OVF_I1:
875 case CEE_CONV_OVF_U1:
876 case CEE_CONV_OVF_I2:
877 case CEE_CONV_OVF_U2:
878 case CEE_CONV_OVF_I4:
879 case CEE_CONV_OVF_U4:
880 ins->type = STACK_I4;
881 ins->opcode += ovf3ops_op_map [src1->type];
883 case CEE_CONV_OVF_I_UN:
884 case CEE_CONV_OVF_U_UN:
885 ins->type = STACK_PTR;
886 ins->opcode += ovf2ops_op_map [src1->type];
888 case CEE_CONV_OVF_I1_UN:
889 case CEE_CONV_OVF_I2_UN:
890 case CEE_CONV_OVF_I4_UN:
891 case CEE_CONV_OVF_U1_UN:
892 case CEE_CONV_OVF_U2_UN:
893 case CEE_CONV_OVF_U4_UN:
894 ins->type = STACK_I4;
895 ins->opcode += ovf2ops_op_map [src1->type];
898 ins->type = STACK_PTR;
899 switch (src1->type) {
901 ins->opcode = OP_ICONV_TO_U;
905 #if SIZEOF_REGISTER == 8
906 ins->opcode = OP_LCONV_TO_U;
908 ins->opcode = OP_MOVE;
912 ins->opcode = OP_LCONV_TO_U;
915 ins->opcode = OP_FCONV_TO_U;
921 ins->type = STACK_I8;
922 ins->opcode += unops_op_map [src1->type];
924 case CEE_CONV_OVF_I8:
925 case CEE_CONV_OVF_U8:
926 ins->type = STACK_I8;
927 ins->opcode += ovf3ops_op_map [src1->type];
929 case CEE_CONV_OVF_U8_UN:
930 case CEE_CONV_OVF_I8_UN:
931 ins->type = STACK_I8;
932 ins->opcode += ovf2ops_op_map [src1->type];
936 ins->type = STACK_R8;
937 ins->opcode += unops_op_map [src1->type];
940 ins->type = STACK_R8;
944 ins->type = STACK_I4;
945 ins->opcode += ovfops_op_map [src1->type];
950 ins->type = STACK_PTR;
951 ins->opcode += ovfops_op_map [src1->type];
959 ins->type = bin_num_table [src1->type] [src2->type];
960 ins->opcode += ovfops_op_map [src1->type];
961 if (ins->type == STACK_R8)
962 ins->type = STACK_INV;
964 case OP_LOAD_MEMBASE:
965 ins->type = STACK_PTR;
967 case OP_LOADI1_MEMBASE:
968 case OP_LOADU1_MEMBASE:
969 case OP_LOADI2_MEMBASE:
970 case OP_LOADU2_MEMBASE:
971 case OP_LOADI4_MEMBASE:
972 case OP_LOADU4_MEMBASE:
973 ins->type = STACK_PTR;
975 case OP_LOADI8_MEMBASE:
976 ins->type = STACK_I8;
978 case OP_LOADR4_MEMBASE:
979 case OP_LOADR8_MEMBASE:
980 ins->type = STACK_R8;
983 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
987 if (ins->type == STACK_MP)
988 ins->klass = mono_defaults.object_class;
993 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
999 param_table [STACK_MAX] [STACK_MAX] = {
1004 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1008 switch (args->type) {
1018 for (i = 0; i < sig->param_count; ++i) {
1019 switch (args [i].type) {
1023 if (!sig->params [i]->byref)
1027 if (sig->params [i]->byref)
1029 switch (sig->params [i]->type) {
1030 case MONO_TYPE_CLASS:
1031 case MONO_TYPE_STRING:
1032 case MONO_TYPE_OBJECT:
1033 case MONO_TYPE_SZARRAY:
1034 case MONO_TYPE_ARRAY:
1041 if (sig->params [i]->byref)
1043 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1052 /*if (!param_table [args [i].type] [sig->params [i]->type])
1060 * When we need a pointer to the current domain many times in a method, we
1061 * call mono_domain_get() once and we store the result in a local variable.
1062 * This function returns the variable that represents the MonoDomain*.
1064 inline static MonoInst *
1065 mono_get_domainvar (MonoCompile *cfg)
1067 if (!cfg->domainvar)
1068 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1069 return cfg->domainvar;
1073 * The got_var contains the address of the Global Offset Table when AOT
1077 mono_get_got_var (MonoCompile *cfg)
1079 #ifdef MONO_ARCH_NEED_GOT_VAR
1080 if (!cfg->compile_aot)
1082 if (!cfg->got_var) {
1083 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1085 return cfg->got_var;
1092 mono_get_vtable_var (MonoCompile *cfg)
1094 g_assert (cfg->generic_sharing_context);
1096 if (!cfg->rgctx_var) {
1097 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1098 /* force the var to be stack allocated */
1099 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1102 return cfg->rgctx_var;
1106 type_from_stack_type (MonoInst *ins) {
1107 switch (ins->type) {
1108 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1109 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1110 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1111 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1113 return &ins->klass->this_arg;
1114 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1115 case STACK_VTYPE: return &ins->klass->byval_arg;
1117 g_error ("stack type %d to monotype not handled\n", ins->type);
1122 static G_GNUC_UNUSED int
1123 type_to_stack_type (MonoType *t)
1125 t = mono_type_get_underlying_type (t);
1129 case MONO_TYPE_BOOLEAN:
1132 case MONO_TYPE_CHAR:
1139 case MONO_TYPE_FNPTR:
1141 case MONO_TYPE_CLASS:
1142 case MONO_TYPE_STRING:
1143 case MONO_TYPE_OBJECT:
1144 case MONO_TYPE_SZARRAY:
1145 case MONO_TYPE_ARRAY:
1153 case MONO_TYPE_VALUETYPE:
1154 case MONO_TYPE_TYPEDBYREF:
1156 case MONO_TYPE_GENERICINST:
1157 if (mono_type_generic_inst_is_valuetype (t))
1163 g_assert_not_reached ();
1170 array_access_to_klass (int opcode)
1174 return mono_defaults.byte_class;
1176 return mono_defaults.uint16_class;
1179 return mono_defaults.int_class;
1182 return mono_defaults.sbyte_class;
1185 return mono_defaults.int16_class;
1188 return mono_defaults.int32_class;
1190 return mono_defaults.uint32_class;
1193 return mono_defaults.int64_class;
1196 return mono_defaults.single_class;
1199 return mono_defaults.double_class;
1200 case CEE_LDELEM_REF:
1201 case CEE_STELEM_REF:
1202 return mono_defaults.object_class;
1204 g_assert_not_reached ();
1210 * We try to share variables when possible
1213 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1218 /* inlining can result in deeper stacks */
1219 if (slot >= cfg->header->max_stack)
1220 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1222 pos = ins->type - 1 + slot * STACK_MAX;
1224 switch (ins->type) {
1231 if ((vnum = cfg->intvars [pos]))
1232 return cfg->varinfo [vnum];
1233 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1234 cfg->intvars [pos] = res->inst_c0;
1237 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1243 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1246 * Don't use this if a generic_context is set, since that means AOT can't
1247 * look up the method using just the image+token.
1248 * table == 0 means this is a reference made from a wrapper.
1250 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1251 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1252 jump_info_token->image = image;
1253 jump_info_token->token = token;
1254 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1259 * This function is called to handle items that are left on the evaluation stack
1260 * at basic block boundaries. What happens is that we save the values to local variables
1261 * and we reload them later when first entering the target basic block (with the
1262 * handle_loaded_temps () function).
1263 * A single joint point will use the same variables (stored in the array bb->out_stack or
1264 * bb->in_stack, if the basic block is before or after the joint point).
1266 * This function needs to be called _before_ emitting the last instruction of
1267 * the bb (i.e. before emitting a branch).
1268 * If the stack merge fails at a join point, cfg->unverifiable is set.
1271 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1274 MonoBasicBlock *bb = cfg->cbb;
1275 MonoBasicBlock *outb;
1276 MonoInst *inst, **locals;
1281 if (cfg->verbose_level > 3)
1282 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1283 if (!bb->out_scount) {
1284 bb->out_scount = count;
1285 //printf ("bblock %d has out:", bb->block_num);
1287 for (i = 0; i < bb->out_count; ++i) {
1288 outb = bb->out_bb [i];
1289 /* exception handlers are linked, but they should not be considered for stack args */
1290 if (outb->flags & BB_EXCEPTION_HANDLER)
1292 //printf (" %d", outb->block_num);
1293 if (outb->in_stack) {
1295 bb->out_stack = outb->in_stack;
1301 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1302 for (i = 0; i < count; ++i) {
1304 * try to reuse temps already allocated for this purpouse, if they occupy the same
1305 * stack slot and if they are of the same type.
1306 * This won't cause conflicts since if 'local' is used to
1307 * store one of the values in the in_stack of a bblock, then
1308 * the same variable will be used for the same outgoing stack
1310 * This doesn't work when inlining methods, since the bblocks
1311 * in the inlined methods do not inherit their in_stack from
1312 * the bblock they are inlined to. See bug #58863 for an
1315 if (cfg->inlined_method)
1316 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1318 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1323 for (i = 0; i < bb->out_count; ++i) {
1324 outb = bb->out_bb [i];
1325 /* exception handlers are linked, but they should not be considered for stack args */
1326 if (outb->flags & BB_EXCEPTION_HANDLER)
1328 if (outb->in_scount) {
1329 if (outb->in_scount != bb->out_scount) {
1330 cfg->unverifiable = TRUE;
1333 continue; /* check they are the same locals */
1335 outb->in_scount = count;
1336 outb->in_stack = bb->out_stack;
1339 locals = bb->out_stack;
1341 for (i = 0; i < count; ++i) {
1342 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1343 inst->cil_code = sp [i]->cil_code;
1344 sp [i] = locals [i];
1345 if (cfg->verbose_level > 3)
1346 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1350 * It is possible that the out bblocks already have in_stack assigned, and
1351 * the in_stacks differ. In this case, we will store to all the different
1358 /* Find a bblock which has a different in_stack */
1360 while (bindex < bb->out_count) {
1361 outb = bb->out_bb [bindex];
1362 /* exception handlers are linked, but they should not be considered for stack args */
1363 if (outb->flags & BB_EXCEPTION_HANDLER) {
1367 if (outb->in_stack != locals) {
1368 for (i = 0; i < count; ++i) {
1369 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1370 inst->cil_code = sp [i]->cil_code;
1371 sp [i] = locals [i];
1372 if (cfg->verbose_level > 3)
1373 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1375 locals = outb->in_stack;
1384 /* Emit code which loads interface_offsets [klass->interface_id]
1385 * The array is stored in memory before vtable.
1388 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1390 if (cfg->compile_aot) {
1391 int ioffset_reg = alloc_preg (cfg);
1392 int iid_reg = alloc_preg (cfg);
1394 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1395 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1396 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1404 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1406 int ibitmap_reg = alloc_preg (cfg);
1407 #ifdef COMPRESSED_INTERFACE_BITMAP
1409 MonoInst *res, *ins;
1410 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1411 MONO_ADD_INS (cfg->cbb, ins);
1413 if (cfg->compile_aot)
1414 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1416 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1417 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1418 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1420 int ibitmap_byte_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1424 if (cfg->compile_aot) {
1425 int iid_reg = alloc_preg (cfg);
1426 int shifted_iid_reg = alloc_preg (cfg);
1427 int ibitmap_byte_address_reg = alloc_preg (cfg);
1428 int masked_iid_reg = alloc_preg (cfg);
1429 int iid_one_bit_reg = alloc_preg (cfg);
1430 int iid_bit_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1436 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1447 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1448 * stored in "klass_reg" implements the interface "klass".
1451 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1453 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1457 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1458 * stored in "vtable_reg" implements the interface "klass".
1461 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1463 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1467 * Emit code which checks whenever the interface id of @klass is smaller than
1468 * than the value given by max_iid_reg.
1471 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1472 MonoBasicBlock *false_target)
1474 if (cfg->compile_aot) {
1475 int iid_reg = alloc_preg (cfg);
1476 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1477 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1480 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1482 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1484 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1487 /* Same as above, but obtains max_iid from a vtable */
1489 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1490 MonoBasicBlock *false_target)
1492 int max_iid_reg = alloc_preg (cfg);
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1495 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1498 /* Same as above, but obtains max_iid from a klass */
1500 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1501 MonoBasicBlock *false_target)
1503 int max_iid_reg = alloc_preg (cfg);
1505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1506 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1510 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1512 int idepth_reg = alloc_preg (cfg);
1513 int stypes_reg = alloc_preg (cfg);
1514 int stype = alloc_preg (cfg);
1516 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1517 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1522 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1524 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1525 } else if (cfg->compile_aot) {
1526 int const_reg = alloc_preg (cfg);
1527 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1528 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1530 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1532 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1536 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1538 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1542 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1544 int intf_reg = alloc_preg (cfg);
1546 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1547 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1550 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1552 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1556 * Variant of the above that takes a register to the class, not the vtable.
1559 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1561 int intf_bit_reg = alloc_preg (cfg);
1563 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1564 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1569 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1573 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1576 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1577 } else if (cfg->compile_aot) {
1578 int const_reg = alloc_preg (cfg);
1579 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1580 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1584 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1588 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1590 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1594 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1596 if (cfg->compile_aot) {
1597 int const_reg = alloc_preg (cfg);
1598 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1603 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1607 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1610 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1613 int rank_reg = alloc_preg (cfg);
1614 int eclass_reg = alloc_preg (cfg);
1616 g_assert (!klass_inst);
1617 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1619 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1620 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1622 if (klass->cast_class == mono_defaults.object_class) {
1623 int parent_reg = alloc_preg (cfg);
1624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1625 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1626 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1627 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1628 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1629 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1630 } else if (klass->cast_class == mono_defaults.enum_class) {
1631 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1632 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1633 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1635 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1636 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1639 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1640 /* Check that the object is a vector too */
1641 int bounds_reg = alloc_preg (cfg);
1642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1644 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1647 int idepth_reg = alloc_preg (cfg);
1648 int stypes_reg = alloc_preg (cfg);
1649 int stype = alloc_preg (cfg);
1651 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1652 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1654 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1657 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1658 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1663 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1665 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1669 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1673 g_assert (val == 0);
1678 if ((size <= 4) && (size <= align)) {
1681 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1684 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1687 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1689 #if SIZEOF_REGISTER == 8
1691 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1697 val_reg = alloc_preg (cfg);
1699 if (SIZEOF_REGISTER == 8)
1700 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1702 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1705 /* This could be optimized further if neccesary */
1707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1714 #if !NO_UNALIGNED_ACCESS
1715 if (SIZEOF_REGISTER == 8) {
1717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1740 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1747 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1754 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1755 g_assert (size < 10000);
1758 /* This could be optimized further if neccesary */
1760 cur_reg = alloc_preg (cfg);
1761 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1769 #if !NO_UNALIGNED_ACCESS
1770 if (SIZEOF_REGISTER == 8) {
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1783 cur_reg = alloc_preg (cfg);
1784 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1785 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1791 cur_reg = alloc_preg (cfg);
1792 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1793 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1799 cur_reg = alloc_preg (cfg);
1800 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1809 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1812 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1815 type = mini_get_basic_type_from_generic (gsctx, type);
1816 switch (type->type) {
1817 case MONO_TYPE_VOID:
1818 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1821 case MONO_TYPE_BOOLEAN:
1824 case MONO_TYPE_CHAR:
1827 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1831 case MONO_TYPE_FNPTR:
1832 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1833 case MONO_TYPE_CLASS:
1834 case MONO_TYPE_STRING:
1835 case MONO_TYPE_OBJECT:
1836 case MONO_TYPE_SZARRAY:
1837 case MONO_TYPE_ARRAY:
1838 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1841 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1844 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1845 case MONO_TYPE_VALUETYPE:
1846 if (type->data.klass->enumtype) {
1847 type = mono_class_enum_basetype (type->data.klass);
1850 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1851 case MONO_TYPE_TYPEDBYREF:
1852 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1853 case MONO_TYPE_GENERICINST:
1854 type = &type->data.generic_class->container_class->byval_arg;
1857 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1863 * target_type_is_incompatible:
1864 * @cfg: MonoCompile context
1866 * Check that the item @arg on the evaluation stack can be stored
1867 * in the target type (can be a local, or field, etc).
1868 * The cfg arg can be used to check if we need verification or just
1871 * Returns: non-0 value if arg can't be stored on a target.
1874 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1876 MonoType *simple_type;
1879 if (target->byref) {
1880 /* FIXME: check that the pointed to types match */
1881 if (arg->type == STACK_MP)
1882 return arg->klass != mono_class_from_mono_type (target);
1883 if (arg->type == STACK_PTR)
1888 simple_type = mono_type_get_underlying_type (target);
1889 switch (simple_type->type) {
1890 case MONO_TYPE_VOID:
1894 case MONO_TYPE_BOOLEAN:
1897 case MONO_TYPE_CHAR:
1900 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1904 /* STACK_MP is needed when setting pinned locals */
1905 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1910 case MONO_TYPE_FNPTR:
1911 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1914 case MONO_TYPE_CLASS:
1915 case MONO_TYPE_STRING:
1916 case MONO_TYPE_OBJECT:
1917 case MONO_TYPE_SZARRAY:
1918 case MONO_TYPE_ARRAY:
1919 if (arg->type != STACK_OBJ)
1921 /* FIXME: check type compatibility */
1925 if (arg->type != STACK_I8)
1930 if (arg->type != STACK_R8)
1933 case MONO_TYPE_VALUETYPE:
1934 if (arg->type != STACK_VTYPE)
1936 klass = mono_class_from_mono_type (simple_type);
1937 if (klass != arg->klass)
1940 case MONO_TYPE_TYPEDBYREF:
1941 if (arg->type != STACK_VTYPE)
1943 klass = mono_class_from_mono_type (simple_type);
1944 if (klass != arg->klass)
1947 case MONO_TYPE_GENERICINST:
1948 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1949 if (arg->type != STACK_VTYPE)
1951 klass = mono_class_from_mono_type (simple_type);
1952 if (klass != arg->klass)
1956 if (arg->type != STACK_OBJ)
1958 /* FIXME: check type compatibility */
1962 case MONO_TYPE_MVAR:
1963 /* FIXME: all the arguments must be references for now,
1964 * later look inside cfg and see if the arg num is
1965 * really a reference
1967 g_assert (cfg->generic_sharing_context);
1968 if (arg->type != STACK_OBJ)
1972 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1978 * Prepare arguments for passing to a function call.
1979 * Return a non-zero value if the arguments can't be passed to the given
1981 * The type checks are not yet complete and some conversions may need
1982 * casts on 32 or 64 bit architectures.
1984 * FIXME: implement this using target_type_is_incompatible ()
1987 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1989 MonoType *simple_type;
1993 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1997 for (i = 0; i < sig->param_count; ++i) {
1998 if (sig->params [i]->byref) {
1999 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2003 simple_type = sig->params [i];
2004 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2006 switch (simple_type->type) {
2007 case MONO_TYPE_VOID:
2012 case MONO_TYPE_BOOLEAN:
2015 case MONO_TYPE_CHAR:
2018 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2024 case MONO_TYPE_FNPTR:
2025 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2028 case MONO_TYPE_CLASS:
2029 case MONO_TYPE_STRING:
2030 case MONO_TYPE_OBJECT:
2031 case MONO_TYPE_SZARRAY:
2032 case MONO_TYPE_ARRAY:
2033 if (args [i]->type != STACK_OBJ)
2038 if (args [i]->type != STACK_I8)
2043 if (args [i]->type != STACK_R8)
2046 case MONO_TYPE_VALUETYPE:
2047 if (simple_type->data.klass->enumtype) {
2048 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2051 if (args [i]->type != STACK_VTYPE)
2054 case MONO_TYPE_TYPEDBYREF:
2055 if (args [i]->type != STACK_VTYPE)
2058 case MONO_TYPE_GENERICINST:
2059 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2063 g_error ("unknown type 0x%02x in check_call_signature",
2071 callvirt_to_call (int opcode)
2076 case OP_VOIDCALLVIRT:
2085 g_assert_not_reached ();
2092 callvirt_to_call_membase (int opcode)
2096 return OP_CALL_MEMBASE;
2097 case OP_VOIDCALLVIRT:
2098 return OP_VOIDCALL_MEMBASE;
2100 return OP_FCALL_MEMBASE;
2102 return OP_LCALL_MEMBASE;
2104 return OP_VCALL_MEMBASE;
2106 g_assert_not_reached ();
2112 #ifdef MONO_ARCH_HAVE_IMT
2114 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2118 if (COMPILE_LLVM (cfg)) {
2119 method_reg = alloc_preg (cfg);
2122 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2123 } else if (cfg->compile_aot) {
2124 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2127 MONO_INST_NEW (cfg, ins, OP_PCONST);
2128 ins->inst_p0 = call->method;
2129 ins->dreg = method_reg;
2130 MONO_ADD_INS (cfg->cbb, ins);
2134 call->imt_arg_reg = method_reg;
2136 #ifdef MONO_ARCH_IMT_REG
2137 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2139 /* Need this to keep the IMT arg alive */
2140 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2145 #ifdef MONO_ARCH_IMT_REG
2146 method_reg = alloc_preg (cfg);
2149 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2150 } else if (cfg->compile_aot) {
2151 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2154 MONO_INST_NEW (cfg, ins, OP_PCONST);
2155 ins->inst_p0 = call->method;
2156 ins->dreg = method_reg;
2157 MONO_ADD_INS (cfg->cbb, ins);
2160 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2162 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2167 static MonoJumpInfo *
2168 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2170 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2174 ji->data.target = target;
2179 inline static MonoCallInst *
2180 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2181 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2184 #ifdef MONO_ARCH_SOFT_FLOAT
2189 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2191 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2194 call->signature = sig;
2195 call->rgctx_reg = rgctx;
2197 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2200 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2201 call->vret_var = cfg->vret_addr;
2202 //g_assert_not_reached ();
2204 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2205 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2208 temp->backend.is_pinvoke = sig->pinvoke;
2211 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2212 * address of return value to increase optimization opportunities.
2213 * Before vtype decomposition, the dreg of the call ins itself represents the
2214 * fact the call modifies the return value. After decomposition, the call will
2215 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2216 * will be transformed into an LDADDR.
2218 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2219 loada->dreg = alloc_preg (cfg);
2220 loada->inst_p0 = temp;
2221 /* We reference the call too since call->dreg could change during optimization */
2222 loada->inst_p1 = call;
2223 MONO_ADD_INS (cfg->cbb, loada);
2225 call->inst.dreg = temp->dreg;
2227 call->vret_var = loada;
2228 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2229 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2231 #ifdef MONO_ARCH_SOFT_FLOAT
2232 if (COMPILE_SOFT_FLOAT (cfg)) {
2234 * If the call has a float argument, we would need to do an r8->r4 conversion using
2235 * an icall, but that cannot be done during the call sequence since it would clobber
2236 * the call registers + the stack. So we do it before emitting the call.
2238 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2240 MonoInst *in = call->args [i];
2242 if (i >= sig->hasthis)
2243 t = sig->params [i - sig->hasthis];
2245 t = &mono_defaults.int_class->byval_arg;
2246 t = mono_type_get_underlying_type (t);
2248 if (!t->byref && t->type == MONO_TYPE_R4) {
2249 MonoInst *iargs [1];
2253 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2255 /* The result will be in an int vreg */
2256 call->args [i] = conv;
2263 if (COMPILE_LLVM (cfg))
2264 mono_llvm_emit_call (cfg, call);
2266 mono_arch_emit_call (cfg, call);
2268 mono_arch_emit_call (cfg, call);
2271 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2272 cfg->flags |= MONO_CFG_HAS_CALLS;
2278 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2280 #ifdef MONO_ARCH_RGCTX_REG
2281 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2282 cfg->uses_rgctx_reg = TRUE;
2283 call->rgctx_reg = TRUE;
2285 call->rgctx_arg_reg = rgctx_reg;
2292 inline static MonoInst*
2293 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2299 rgctx_reg = mono_alloc_preg (cfg);
2300 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2306 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2308 call->inst.sreg1 = addr->dreg;
2310 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2313 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2315 return (MonoInst*)call;
2319 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2321 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2324 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2325 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2327 gboolean might_be_remote;
2328 gboolean virtual = this != NULL;
2329 gboolean enable_for_aot = TRUE;
2335 rgctx_reg = mono_alloc_preg (cfg);
2336 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2339 if (method->string_ctor) {
2340 /* Create the real signature */
2341 /* FIXME: Cache these */
2342 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2343 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2348 might_be_remote = this && sig->hasthis &&
2349 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2350 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2352 context_used = mono_method_check_context_used (method);
2353 if (might_be_remote && context_used) {
2356 g_assert (cfg->generic_sharing_context);
2358 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2360 return mono_emit_calli (cfg, sig, args, addr, NULL);
2363 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2365 if (might_be_remote)
2366 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2368 call->method = method;
2369 call->inst.flags |= MONO_INST_HAS_METHOD;
2370 call->inst.inst_left = this;
2373 int vtable_reg, slot_reg, this_reg;
2375 this_reg = this->dreg;
2377 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2378 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2379 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2381 /* Make a call to delegate->invoke_impl */
2382 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2383 call->inst.inst_basereg = this_reg;
2384 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2385 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2387 return (MonoInst*)call;
2391 if ((!cfg->compile_aot || enable_for_aot) &&
2392 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2393 (MONO_METHOD_IS_FINAL (method) &&
2394 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2395 !(method->klass->marshalbyref && context_used)) {
2397 * the method is not virtual, we just need to ensure this is not null
2398 * and then we can call the method directly.
2400 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2402 * The check above ensures method is not gshared, this is needed since
2403 * gshared methods can't have wrappers.
2405 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2408 if (!method->string_ctor)
2409 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2411 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2413 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2415 return (MonoInst*)call;
2418 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2420 * the method is virtual, but we can statically dispatch since either
2421 * it's class or the method itself are sealed.
2422 * But first we need to ensure it's not a null reference.
2424 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2426 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2427 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2429 return (MonoInst*)call;
2432 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2434 vtable_reg = alloc_preg (cfg);
2435 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2436 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2438 #ifdef MONO_ARCH_HAVE_IMT
2440 guint32 imt_slot = mono_method_get_imt_slot (method);
2441 emit_imt_argument (cfg, call, imt_arg);
2442 slot_reg = vtable_reg;
2443 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2446 if (slot_reg == -1) {
2447 slot_reg = alloc_preg (cfg);
2448 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2449 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2452 slot_reg = vtable_reg;
2453 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2454 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2455 #ifdef MONO_ARCH_HAVE_IMT
2457 g_assert (mono_method_signature (method)->generic_param_count);
2458 emit_imt_argument (cfg, call, imt_arg);
2463 call->inst.sreg1 = slot_reg;
2464 call->virtual = TRUE;
2467 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2470 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2472 return (MonoInst*)call;
2476 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2478 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2482 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2489 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2492 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2494 return (MonoInst*)call;
2498 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2500 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2504 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2508 * mono_emit_abs_call:
2510 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2512 inline static MonoInst*
2513 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2514 MonoMethodSignature *sig, MonoInst **args)
2516 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2520 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2523 if (cfg->abs_patches == NULL)
2524 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2525 g_hash_table_insert (cfg->abs_patches, ji, ji);
2526 ins = mono_emit_native_call (cfg, ji, sig, args);
2527 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2532 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2534 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2535 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2539 * Native code might return non register sized integers
2540 * without initializing the upper bits.
2542 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2543 case OP_LOADI1_MEMBASE:
2544 widen_op = OP_ICONV_TO_I1;
2546 case OP_LOADU1_MEMBASE:
2547 widen_op = OP_ICONV_TO_U1;
2549 case OP_LOADI2_MEMBASE:
2550 widen_op = OP_ICONV_TO_I2;
2552 case OP_LOADU2_MEMBASE:
2553 widen_op = OP_ICONV_TO_U2;
2559 if (widen_op != -1) {
2560 int dreg = alloc_preg (cfg);
2563 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2564 widen->type = ins->type;
2574 get_memcpy_method (void)
2576 static MonoMethod *memcpy_method = NULL;
2577 if (!memcpy_method) {
2578 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2580 g_error ("Old corlib found. Install a new one");
2582 return memcpy_method;
2586 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2588 MonoClassField *field;
2589 gpointer iter = NULL;
2591 while ((field = mono_class_get_fields (klass, &iter))) {
2594 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2596 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2597 if (mono_type_is_reference (field->type)) {
2598 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2599 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2601 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2602 MonoClass *field_class = mono_class_from_mono_type (field->type);
2603 if (field_class->has_references)
2604 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2610 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2612 int card_table_shift_bits;
2613 gpointer card_table_mask;
2615 MonoInst *dummy_use;
2616 int nursery_shift_bits;
2617 size_t nursery_size;
2618 gboolean has_card_table_wb = FALSE;
2620 if (!cfg->gen_write_barriers)
2623 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2625 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2627 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2628 has_card_table_wb = TRUE;
2631 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2634 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2635 wbarrier->sreg1 = ptr->dreg;
2637 wbarrier->sreg2 = value->dreg;
2639 wbarrier->sreg2 = value_reg;
2640 MONO_ADD_INS (cfg->cbb, wbarrier);
2641 } else if (card_table) {
2642 int offset_reg = alloc_preg (cfg);
2643 int card_reg = alloc_preg (cfg);
2646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2647 if (card_table_mask)
2648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2650 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2651 * IMM's larger than 32bits.
2653 if (cfg->compile_aot) {
2654 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2656 MONO_INST_NEW (cfg, ins, OP_PCONST);
2657 ins->inst_p0 = card_table;
2658 ins->dreg = card_reg;
2659 MONO_ADD_INS (cfg->cbb, ins);
2662 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2663 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2665 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2666 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2670 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2672 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2673 dummy_use->sreg1 = value_reg;
2674 MONO_ADD_INS (cfg->cbb, dummy_use);
2679 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2681 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2682 unsigned need_wb = 0;
2687 /*types with references can't have alignment smaller than sizeof(void*) */
2688 if (align < SIZEOF_VOID_P)
2691 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2692 if (size > 32 * SIZEOF_VOID_P)
2695 create_write_barrier_bitmap (klass, &need_wb, 0);
2697 /* We don't unroll more than 5 stores to avoid code bloat. */
2698 if (size > 5 * SIZEOF_VOID_P) {
2699 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2700 size += (SIZEOF_VOID_P - 1);
2701 size &= ~(SIZEOF_VOID_P - 1);
2703 EMIT_NEW_ICONST (cfg, iargs [2], size);
2704 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2705 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2709 destreg = iargs [0]->dreg;
2710 srcreg = iargs [1]->dreg;
2713 dest_ptr_reg = alloc_preg (cfg);
2714 tmp_reg = alloc_preg (cfg);
2717 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2719 while (size >= SIZEOF_VOID_P) {
2720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2721 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2724 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2726 offset += SIZEOF_VOID_P;
2727 size -= SIZEOF_VOID_P;
2730 /*tmp += sizeof (void*)*/
2731 if (size >= SIZEOF_VOID_P) {
2732 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2733 MONO_ADD_INS (cfg->cbb, iargs [0]);
2737 /* Those cannot be references since size < sizeof (void*) */
2739 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2740 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2746 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2753 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2754 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2763 * Emit code to copy a valuetype of type @klass whose address is stored in
2764 * @src->dreg to memory whose address is stored at @dest->dreg.
2767 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2769 MonoInst *iargs [4];
2772 MonoMethod *memcpy_method;
2776 * This check breaks with spilled vars... need to handle it during verification anyway.
2777 * g_assert (klass && klass == src->klass && klass == dest->klass);
2781 n = mono_class_native_size (klass, &align);
2783 n = mono_class_value_size (klass, &align);
2785 /* if native is true there should be no references in the struct */
2786 if (cfg->gen_write_barriers && klass->has_references && !native) {
2787 /* Avoid barriers when storing to the stack */
2788 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2789 (dest->opcode == OP_LDADDR))) {
2790 int context_used = 0;
2795 if (cfg->generic_sharing_context)
2796 context_used = mono_class_check_context_used (klass);
2798 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2799 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2801 } else if (context_used) {
2802 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2804 if (cfg->compile_aot) {
2805 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2807 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2808 mono_class_compute_gc_descriptor (klass);
2812 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2817 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2818 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2819 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2823 EMIT_NEW_ICONST (cfg, iargs [2], n);
2825 memcpy_method = get_memcpy_method ();
2826 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2831 get_memset_method (void)
2833 static MonoMethod *memset_method = NULL;
2834 if (!memset_method) {
2835 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2837 g_error ("Old corlib found. Install a new one");
2839 return memset_method;
2843 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2845 MonoInst *iargs [3];
2848 MonoMethod *memset_method;
2850 /* FIXME: Optimize this for the case when dest is an LDADDR */
2852 mono_class_init (klass);
2853 n = mono_class_value_size (klass, &align);
2855 if (n <= sizeof (gpointer) * 5) {
2856 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2859 memset_method = get_memset_method ();
2861 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2862 EMIT_NEW_ICONST (cfg, iargs [2], n);
2863 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2868 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2870 MonoInst *this = NULL;
2872 g_assert (cfg->generic_sharing_context);
2874 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2875 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2876 !method->klass->valuetype)
2877 EMIT_NEW_ARGLOAD (cfg, this, 0);
2879 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2880 MonoInst *mrgctx_loc, *mrgctx_var;
2883 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2885 mrgctx_loc = mono_get_vtable_var (cfg);
2886 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2889 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2890 MonoInst *vtable_loc, *vtable_var;
2894 vtable_loc = mono_get_vtable_var (cfg);
2895 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2897 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2898 MonoInst *mrgctx_var = vtable_var;
2901 vtable_reg = alloc_preg (cfg);
2902 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2903 vtable_var->type = STACK_PTR;
2909 int vtable_reg, res_reg;
2911 vtable_reg = alloc_preg (cfg);
2912 res_reg = alloc_preg (cfg);
2913 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2918 static MonoJumpInfoRgctxEntry *
2919 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2921 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2922 res->method = method;
2923 res->in_mrgctx = in_mrgctx;
2924 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2925 res->data->type = patch_type;
2926 res->data->data.target = patch_data;
2927 res->info_type = info_type;
2932 static inline MonoInst*
2933 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2935 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2939 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2940 MonoClass *klass, int rgctx_type)
2942 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2943 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2945 return emit_rgctx_fetch (cfg, rgctx, entry);
2949 * emit_get_rgctx_method:
2951 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2952 * normal constants, else emit a load from the rgctx.
2955 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2956 MonoMethod *cmethod, int rgctx_type)
2958 if (!context_used) {
2961 switch (rgctx_type) {
2962 case MONO_RGCTX_INFO_METHOD:
2963 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2965 case MONO_RGCTX_INFO_METHOD_RGCTX:
2966 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2969 g_assert_not_reached ();
2972 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2973 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2975 return emit_rgctx_fetch (cfg, rgctx, entry);
2980 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2981 MonoClassField *field, int rgctx_type)
2983 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2984 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2986 return emit_rgctx_fetch (cfg, rgctx, entry);
2990 * On return the caller must check @klass for load errors.
2993 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2995 MonoInst *vtable_arg;
2997 int context_used = 0;
2999 if (cfg->generic_sharing_context)
3000 context_used = mono_class_check_context_used (klass);
3003 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3004 klass, MONO_RGCTX_INFO_VTABLE);
3006 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3010 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3013 if (COMPILE_LLVM (cfg))
3014 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3016 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3017 #ifdef MONO_ARCH_VTABLE_REG
3018 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3019 cfg->uses_vtable_reg = TRUE;
3026 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3028 if (mini_get_debug_options ()->better_cast_details) {
3029 int to_klass_reg = alloc_preg (cfg);
3030 int vtable_reg = alloc_preg (cfg);
3031 int klass_reg = alloc_preg (cfg);
3032 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3035 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3039 MONO_ADD_INS (cfg->cbb, tls_get);
3040 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3044 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3045 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3050 reset_cast_details (MonoCompile *cfg)
3052 /* Reset the variables holding the cast details */
3053 if (mini_get_debug_options ()->better_cast_details) {
3054 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3056 MONO_ADD_INS (cfg->cbb, tls_get);
3057 /* It is enough to reset the from field */
3058 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3063 * On return the caller must check @array_class for load errors
3066 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3068 int vtable_reg = alloc_preg (cfg);
3069 int context_used = 0;
3071 if (cfg->generic_sharing_context)
3072 context_used = mono_class_check_context_used (array_class);
3074 save_cast_details (cfg, array_class, obj->dreg);
3076 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3078 if (cfg->opt & MONO_OPT_SHARED) {
3079 int class_reg = alloc_preg (cfg);
3080 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3081 if (cfg->compile_aot) {
3082 int klass_reg = alloc_preg (cfg);
3083 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3084 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3086 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3088 } else if (context_used) {
3089 MonoInst *vtable_ins;
3091 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3092 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3094 if (cfg->compile_aot) {
3098 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3100 vt_reg = alloc_preg (cfg);
3101 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3102 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3105 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3111 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3113 reset_cast_details (cfg);
3117 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3118 * generic code is generated.
3121 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3123 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3126 MonoInst *rgctx, *addr;
3128 /* FIXME: What if the class is shared? We might not
3129 have to get the address of the method from the
3131 addr = emit_get_rgctx_method (cfg, context_used, method,
3132 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3134 rgctx = emit_get_rgctx (cfg, method, context_used);
3136 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3138 return mono_emit_method_call (cfg, method, &val, NULL);
3143 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3147 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3148 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3149 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3150 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3152 obj_reg = sp [0]->dreg;
3153 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3154 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3156 /* FIXME: generics */
3157 g_assert (klass->rank == 0);
3160 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3161 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3167 MonoInst *element_class;
3169 /* This assertion is from the unboxcast insn */
3170 g_assert (klass->rank == 0);
3172 element_class = emit_get_rgctx_klass (cfg, context_used,
3173 klass->element_class, MONO_RGCTX_INFO_KLASS);
3175 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3176 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3178 save_cast_details (cfg, klass->element_class, obj_reg);
3179 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3180 reset_cast_details (cfg);
3183 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3184 MONO_ADD_INS (cfg->cbb, add);
3185 add->type = STACK_MP;
3192 * Returns NULL and set the cfg exception on error.
3195 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3197 MonoInst *iargs [2];
3203 MonoInst *iargs [2];
3206 FIXME: we cannot get managed_alloc here because we can't get
3207 the class's vtable (because it's not a closed class)
3209 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3210 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3213 if (cfg->opt & MONO_OPT_SHARED)
3214 rgctx_info = MONO_RGCTX_INFO_KLASS;
3216 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3217 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3219 if (cfg->opt & MONO_OPT_SHARED) {
3220 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3222 alloc_ftn = mono_object_new;
3225 alloc_ftn = mono_object_new_specific;
3228 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3231 if (cfg->opt & MONO_OPT_SHARED) {
3232 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3233 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3235 alloc_ftn = mono_object_new;
3236 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3237 /* This happens often in argument checking code, eg. throw new FooException... */
3238 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3239 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3240 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3242 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3243 MonoMethod *managed_alloc = NULL;
3247 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3248 cfg->exception_ptr = klass;
3252 #ifndef MONO_CROSS_COMPILE
3253 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3256 if (managed_alloc) {
3257 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3258 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3260 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3262 guint32 lw = vtable->klass->instance_size;
3263 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3264 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3265 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3268 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3272 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3276 * Returns NULL and set the cfg exception on error.
3279 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3281 MonoInst *alloc, *ins;
3283 if (mono_class_is_nullable (klass)) {
3284 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3287 /* FIXME: What if the class is shared? We might not
3288 have to get the method address from the RGCTX. */
3289 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3290 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3291 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3293 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3295 return mono_emit_method_call (cfg, method, &val, NULL);
3299 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3303 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3310 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3313 MonoGenericContainer *container;
3314 MonoGenericInst *ginst;
3316 if (klass->generic_class) {
3317 container = klass->generic_class->container_class->generic_container;
3318 ginst = klass->generic_class->context.class_inst;
3319 } else if (klass->generic_container && context_used) {
3320 container = klass->generic_container;
3321 ginst = container->context.class_inst;
3326 for (i = 0; i < container->type_argc; ++i) {
3328 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3330 type = ginst->type_argv [i];
3331 if (MONO_TYPE_IS_REFERENCE (type))
3334 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3340 // FIXME: This doesn't work yet (class libs tests fail?)
3341 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3344 * Returns NULL and set the cfg exception on error.
3347 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3349 MonoBasicBlock *is_null_bb;
3350 int obj_reg = src->dreg;
3351 int vtable_reg = alloc_preg (cfg);
3352 MonoInst *klass_inst = NULL;
3357 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3358 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3359 MonoInst *cache_ins;
3361 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3366 /* klass - it's the second element of the cache entry*/
3367 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3370 args [2] = cache_ins;
3372 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3375 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3377 if (is_complex_isinst (klass)) {
3378 /* Complex case, handle by an icall */
3384 args [1] = klass_inst;
3386 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3388 /* Simple case, handled by the code below */
3392 NEW_BBLOCK (cfg, is_null_bb);
3394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3395 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3397 save_cast_details (cfg, klass, obj_reg);
3399 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3401 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3403 int klass_reg = alloc_preg (cfg);
3405 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3407 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3408 /* the remoting code is broken, access the class for now */
3409 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3410 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3412 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3413 cfg->exception_ptr = klass;
3416 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3418 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3419 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3421 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3424 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3428 MONO_START_BB (cfg, is_null_bb);
3430 reset_cast_details (cfg);
3436 * Returns NULL and set the cfg exception on error.
3439 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3442 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3443 int obj_reg = src->dreg;
3444 int vtable_reg = alloc_preg (cfg);
3445 int res_reg = alloc_preg (cfg);
3446 MonoInst *klass_inst = NULL;
3451 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3452 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3453 MonoInst *cache_ins;
3455 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3460 /* klass - it's the second element of the cache entry*/
3461 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3464 args [2] = cache_ins;
3466 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3469 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3471 if (is_complex_isinst (klass)) {
3472 /* Complex case, handle by an icall */
3478 args [1] = klass_inst;
3480 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3482 /* Simple case, the code below can handle it */
3486 NEW_BBLOCK (cfg, is_null_bb);
3487 NEW_BBLOCK (cfg, false_bb);
3488 NEW_BBLOCK (cfg, end_bb);
3490 /* Do the assignment at the beginning, so the other assignment can be if converted */
3491 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3492 ins->type = STACK_OBJ;
3495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3500 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3501 g_assert (!context_used);
3502 /* the is_null_bb target simply copies the input register to the output */
3503 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3505 int klass_reg = alloc_preg (cfg);
3508 int rank_reg = alloc_preg (cfg);
3509 int eclass_reg = alloc_preg (cfg);
3511 g_assert (!context_used);
3512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3514 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3516 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3517 if (klass->cast_class == mono_defaults.object_class) {
3518 int parent_reg = alloc_preg (cfg);
3519 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3520 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3521 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3522 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3523 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3524 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3525 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3526 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3527 } else if (klass->cast_class == mono_defaults.enum_class) {
3528 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3529 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3530 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3531 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3533 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3534 /* Check that the object is a vector too */
3535 int bounds_reg = alloc_preg (cfg);
3536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3541 /* the is_null_bb target simply copies the input register to the output */
3542 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3544 } else if (mono_class_is_nullable (klass)) {
3545 g_assert (!context_used);
3546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3547 /* the is_null_bb target simply copies the input register to the output */
3548 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3550 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3551 g_assert (!context_used);
3552 /* the remoting code is broken, access the class for now */
3553 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3554 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3556 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3557 cfg->exception_ptr = klass;
3560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3562 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3569 /* the is_null_bb target simply copies the input register to the output */
3570 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3575 MONO_START_BB (cfg, false_bb);
3577 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3580 MONO_START_BB (cfg, is_null_bb);
3582 MONO_START_BB (cfg, end_bb);
3588 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3590 /* This opcode takes as input an object reference and a class, and returns:
3591 0) if the object is an instance of the class,
3592 1) if the object is not instance of the class,
3593 2) if the object is a proxy whose type cannot be determined */
3596 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3597 int obj_reg = src->dreg;
3598 int dreg = alloc_ireg (cfg);
3600 int klass_reg = alloc_preg (cfg);
3602 NEW_BBLOCK (cfg, true_bb);
3603 NEW_BBLOCK (cfg, false_bb);
3604 NEW_BBLOCK (cfg, false2_bb);
3605 NEW_BBLOCK (cfg, end_bb);
3606 NEW_BBLOCK (cfg, no_proxy_bb);
3608 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3609 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3611 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3612 NEW_BBLOCK (cfg, interface_fail_bb);
3614 tmp_reg = alloc_preg (cfg);
3615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3616 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3617 MONO_START_BB (cfg, interface_fail_bb);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3620 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3622 tmp_reg = alloc_preg (cfg);
3623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3624 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3625 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3627 tmp_reg = alloc_preg (cfg);
3628 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3629 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3631 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3632 tmp_reg = alloc_preg (cfg);
3633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3634 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3636 tmp_reg = alloc_preg (cfg);
3637 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3639 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3641 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3644 MONO_START_BB (cfg, no_proxy_bb);
3646 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3649 MONO_START_BB (cfg, false_bb);
3651 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3652 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3654 MONO_START_BB (cfg, false2_bb);
3656 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3657 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3659 MONO_START_BB (cfg, true_bb);
3661 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3663 MONO_START_BB (cfg, end_bb);
3666 MONO_INST_NEW (cfg, ins, OP_ICONST);
3668 ins->type = STACK_I4;
3674 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3676 /* This opcode takes as input an object reference and a class, and returns:
3677 0) if the object is an instance of the class,
3678 1) if the object is a proxy whose type cannot be determined
3679 an InvalidCastException exception is thrown otherwhise*/
3682 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3683 int obj_reg = src->dreg;
3684 int dreg = alloc_ireg (cfg);
3685 int tmp_reg = alloc_preg (cfg);
3686 int klass_reg = alloc_preg (cfg);
3688 NEW_BBLOCK (cfg, end_bb);
3689 NEW_BBLOCK (cfg, ok_result_bb);
3691 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3694 save_cast_details (cfg, klass, obj_reg);
3696 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3697 NEW_BBLOCK (cfg, interface_fail_bb);
3699 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3700 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3701 MONO_START_BB (cfg, interface_fail_bb);
3702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3704 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3706 tmp_reg = alloc_preg (cfg);
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3708 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3709 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3711 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3712 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3715 NEW_BBLOCK (cfg, no_proxy_bb);
3717 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3718 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3719 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3721 tmp_reg = alloc_preg (cfg);
3722 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3723 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3725 tmp_reg = alloc_preg (cfg);
3726 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3728 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3730 NEW_BBLOCK (cfg, fail_1_bb);
3732 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3734 MONO_START_BB (cfg, fail_1_bb);
3736 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3737 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3739 MONO_START_BB (cfg, no_proxy_bb);
3741 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3744 MONO_START_BB (cfg, ok_result_bb);
3746 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3748 MONO_START_BB (cfg, end_bb);
3751 MONO_INST_NEW (cfg, ins, OP_ICONST);
3753 ins->type = STACK_I4;
3759 * Returns NULL and set the cfg exception on error.
3761 static G_GNUC_UNUSED MonoInst*
3762 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3766 gpointer *trampoline;
3767 MonoInst *obj, *method_ins, *tramp_ins;
3771 obj = handle_alloc (cfg, klass, FALSE, 0);
3775 /* Inline the contents of mono_delegate_ctor */
3777 /* Set target field */
3778 /* Optimize away setting of NULL target */
3779 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3781 if (cfg->gen_write_barriers) {
3782 dreg = alloc_preg (cfg);
3783 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3784 emit_write_barrier (cfg, ptr, target, 0);
3788 /* Set method field */
3789 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3791 if (cfg->gen_write_barriers) {
3792 dreg = alloc_preg (cfg);
3793 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3794 emit_write_barrier (cfg, ptr, method_ins, 0);
3797 * To avoid looking up the compiled code belonging to the target method
3798 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3799 * store it, and we fill it after the method has been compiled.
3801 if (!cfg->compile_aot && !method->dynamic) {
3802 MonoInst *code_slot_ins;
3805 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3807 domain = mono_domain_get ();
3808 mono_domain_lock (domain);
3809 if (!domain_jit_info (domain)->method_code_hash)
3810 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3811 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3813 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3814 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3816 mono_domain_unlock (domain);
3818 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3823 /* Set invoke_impl field */
3824 if (cfg->compile_aot) {
3825 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3827 trampoline = mono_create_delegate_trampoline (klass);
3828 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3832 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3838 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3840 MonoJitICallInfo *info;
3842 /* Need to register the icall so it gets an icall wrapper */
3843 info = mono_get_array_new_va_icall (rank);
3845 cfg->flags |= MONO_CFG_HAS_VARARGS;
3847 /* mono_array_new_va () needs a vararg calling convention */
3848 cfg->disable_llvm = TRUE;
3850 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3851 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3855 mono_emit_load_got_addr (MonoCompile *cfg)
3857 MonoInst *getaddr, *dummy_use;
3859 if (!cfg->got_var || cfg->got_var_allocated)
3862 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3863 getaddr->dreg = cfg->got_var->dreg;
3865 /* Add it to the start of the first bblock */
3866 if (cfg->bb_entry->code) {
3867 getaddr->next = cfg->bb_entry->code;
3868 cfg->bb_entry->code = getaddr;
3871 MONO_ADD_INS (cfg->bb_entry, getaddr);
3873 cfg->got_var_allocated = TRUE;
3876 * Add a dummy use to keep the got_var alive, since real uses might
3877 * only be generated by the back ends.
3878 * Add it to end_bblock, so the variable's lifetime covers the whole
3880 * It would be better to make the usage of the got var explicit in all
3881 * cases when the backend needs it (i.e. calls, throw etc.), so this
3882 * wouldn't be needed.
3884 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3885 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3888 static int inline_limit;
3889 static gboolean inline_limit_inited;
3892 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3894 MonoMethodHeaderSummary header;
3896 #ifdef MONO_ARCH_SOFT_FLOAT
3897 MonoMethodSignature *sig = mono_method_signature (method);
3901 if (cfg->generic_sharing_context)
3904 if (cfg->inline_depth > 10)
3907 #ifdef MONO_ARCH_HAVE_LMF_OPS
3908 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3909 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3910 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3915 if (!mono_method_get_header_summary (method, &header))
3918 /*runtime, icall and pinvoke are checked by summary call*/
3919 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3920 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3921 (method->klass->marshalbyref) ||
3925 /* also consider num_locals? */
3926 /* Do the size check early to avoid creating vtables */
3927 if (!inline_limit_inited) {
3928 if (getenv ("MONO_INLINELIMIT"))
3929 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3931 inline_limit = INLINE_LENGTH_LIMIT;
3932 inline_limit_inited = TRUE;
3934 if (header.code_size >= inline_limit)
3938 * if we can initialize the class of the method right away, we do,
3939 * otherwise we don't allow inlining if the class needs initialization,
3940 * since it would mean inserting a call to mono_runtime_class_init()
3941 * inside the inlined code
3943 if (!(cfg->opt & MONO_OPT_SHARED)) {
3944 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3945 if (cfg->run_cctors && method->klass->has_cctor) {
3946 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3947 if (!method->klass->runtime_info)
3948 /* No vtable created yet */
3950 vtable = mono_class_vtable (cfg->domain, method->klass);
3953 /* This makes so that inline cannot trigger */
3954 /* .cctors: too many apps depend on them */
3955 /* running with a specific order... */
3956 if (! vtable->initialized)
3958 mono_runtime_class_init (vtable);
3960 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3961 if (!method->klass->runtime_info)
3962 /* No vtable created yet */
3964 vtable = mono_class_vtable (cfg->domain, method->klass);
3967 if (!vtable->initialized)
3972 * If we're compiling for shared code
3973 * the cctor will need to be run at aot method load time, for example,
3974 * or at the end of the compilation of the inlining method.
3976 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3981 * CAS - do not inline methods with declarative security
3982 * Note: this has to be before any possible return TRUE;
3984 if (mono_method_has_declsec (method))
3987 #ifdef MONO_ARCH_SOFT_FLOAT
3989 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3991 for (i = 0; i < sig->param_count; ++i)
3992 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4000 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4002 if (vtable->initialized && !cfg->compile_aot)
4005 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4008 if (!mono_class_needs_cctor_run (vtable->klass, method))
4011 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4012 /* The initialization is already done before the method is called */
4019 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4023 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4025 mono_class_init (klass);
4026 size = mono_class_array_element_size (klass);
4028 mult_reg = alloc_preg (cfg);
4029 array_reg = arr->dreg;
4030 index_reg = index->dreg;
4032 #if SIZEOF_REGISTER == 8
4033 /* The array reg is 64 bits but the index reg is only 32 */
4034 if (COMPILE_LLVM (cfg)) {
4036 index2_reg = index_reg;
4038 index2_reg = alloc_preg (cfg);
4039 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4042 if (index->type == STACK_I8) {
4043 index2_reg = alloc_preg (cfg);
4044 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4046 index2_reg = index_reg;
4051 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4053 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4054 if (size == 1 || size == 2 || size == 4 || size == 8) {
4055 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4057 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4058 ins->type = STACK_PTR;
4064 add_reg = alloc_preg (cfg);
4066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4067 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4068 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4069 ins->type = STACK_PTR;
4070 MONO_ADD_INS (cfg->cbb, ins);
4075 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4077 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4079 int bounds_reg = alloc_preg (cfg);
4080 int add_reg = alloc_preg (cfg);
4081 int mult_reg = alloc_preg (cfg);
4082 int mult2_reg = alloc_preg (cfg);
4083 int low1_reg = alloc_preg (cfg);
4084 int low2_reg = alloc_preg (cfg);
4085 int high1_reg = alloc_preg (cfg);
4086 int high2_reg = alloc_preg (cfg);
4087 int realidx1_reg = alloc_preg (cfg);
4088 int realidx2_reg = alloc_preg (cfg);
4089 int sum_reg = alloc_preg (cfg);
4094 mono_class_init (klass);
4095 size = mono_class_array_element_size (klass);
4097 index1 = index_ins1->dreg;
4098 index2 = index_ins2->dreg;
4100 /* range checking */
4101 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4102 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4104 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4105 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4106 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4107 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4108 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4109 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4110 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4112 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4113 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4114 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4115 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4116 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4117 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4118 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4120 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4121 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4123 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4124 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4126 ins->type = STACK_MP;
4128 MONO_ADD_INS (cfg->cbb, ins);
4135 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4139 MonoMethod *addr_method;
4142 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4145 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4147 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4148 /* emit_ldelema_2 depends on OP_LMUL */
4149 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4150 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4154 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4155 addr_method = mono_marshal_get_array_address (rank, element_size);
4156 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4161 static MonoBreakPolicy
4162 always_insert_breakpoint (MonoMethod *method)
4164 return MONO_BREAK_POLICY_ALWAYS;
4167 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4170 * mono_set_break_policy:
4171 * policy_callback: the new callback function
4173 * Allow embedders to decide wherther to actually obey breakpoint instructions
4174 * (both break IL instructions and Debugger.Break () method calls), for example
4175 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4176 * untrusted or semi-trusted code.
4178 * @policy_callback will be called every time a break point instruction needs to
4179 * be inserted with the method argument being the method that calls Debugger.Break()
4180 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4181 * if it wants the breakpoint to not be effective in the given method.
4182 * #MONO_BREAK_POLICY_ALWAYS is the default.
4185 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4187 if (policy_callback)
4188 break_policy_func = policy_callback;
4190 break_policy_func = always_insert_breakpoint;
4194 should_insert_brekpoint (MonoMethod *method) {
4195 switch (break_policy_func (method)) {
4196 case MONO_BREAK_POLICY_ALWAYS:
4198 case MONO_BREAK_POLICY_NEVER:
4200 case MONO_BREAK_POLICY_ON_DBG:
4201 return mono_debug_using_mono_debugger ();
4203 g_warning ("Incorrect value returned from break policy callback");
4208 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4210 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4212 MonoInst *addr, *store, *load;
4213 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4215 /* the bounds check is already done by the callers */
4216 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4218 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4219 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4221 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4222 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4228 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4230 MonoInst *ins = NULL;
4231 #ifdef MONO_ARCH_SIMD_INTRINSICS
4232 if (cfg->opt & MONO_OPT_SIMD) {
4233 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4243 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4245 MonoInst *ins = NULL;
4247 static MonoClass *runtime_helpers_class = NULL;
4248 if (! runtime_helpers_class)
4249 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4250 "System.Runtime.CompilerServices", "RuntimeHelpers");
4252 if (cmethod->klass == mono_defaults.string_class) {
4253 if (strcmp (cmethod->name, "get_Chars") == 0) {
4254 int dreg = alloc_ireg (cfg);
4255 int index_reg = alloc_preg (cfg);
4256 int mult_reg = alloc_preg (cfg);
4257 int add_reg = alloc_preg (cfg);
4259 #if SIZEOF_REGISTER == 8
4260 /* The array reg is 64 bits but the index reg is only 32 */
4261 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4263 index_reg = args [1]->dreg;
4265 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4267 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4268 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4269 add_reg = ins->dreg;
4270 /* Avoid a warning */
4272 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4275 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4276 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4277 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4278 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4280 type_from_op (ins, NULL, NULL);
4282 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4283 int dreg = alloc_ireg (cfg);
4284 /* Decompose later to allow more optimizations */
4285 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4286 ins->type = STACK_I4;
4287 ins->flags |= MONO_INST_FAULT;
4288 cfg->cbb->has_array_access = TRUE;
4289 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4292 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4293 int mult_reg = alloc_preg (cfg);
4294 int add_reg = alloc_preg (cfg);
4296 /* The corlib functions check for oob already. */
4297 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4298 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4299 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4300 return cfg->cbb->last_ins;
4303 } else if (cmethod->klass == mono_defaults.object_class) {
4305 if (strcmp (cmethod->name, "GetType") == 0) {
4306 int dreg = alloc_preg (cfg);
4307 int vt_reg = alloc_preg (cfg);
4308 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4309 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4310 type_from_op (ins, NULL, NULL);
4313 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4314 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4315 int dreg = alloc_ireg (cfg);
4316 int t1 = alloc_ireg (cfg);
4318 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4319 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4320 ins->type = STACK_I4;
4324 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4325 MONO_INST_NEW (cfg, ins, OP_NOP);
4326 MONO_ADD_INS (cfg->cbb, ins);
4330 } else if (cmethod->klass == mono_defaults.array_class) {
4331 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4332 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4334 #ifndef MONO_BIG_ARRAYS
4336 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4339 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4340 int dreg = alloc_ireg (cfg);
4341 int bounds_reg = alloc_ireg (cfg);
4342 MonoBasicBlock *end_bb, *szarray_bb;
4343 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4345 NEW_BBLOCK (cfg, end_bb);
4346 NEW_BBLOCK (cfg, szarray_bb);
4348 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4349 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4350 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4352 /* Non-szarray case */
4354 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4355 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4357 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4358 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4359 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4360 MONO_START_BB (cfg, szarray_bb);
4363 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4364 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4366 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4367 MONO_START_BB (cfg, end_bb);
4369 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4370 ins->type = STACK_I4;
4376 if (cmethod->name [0] != 'g')
4379 if (strcmp (cmethod->name, "get_Rank") == 0) {
4380 int dreg = alloc_ireg (cfg);
4381 int vtable_reg = alloc_preg (cfg);
4382 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4383 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4384 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4385 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4386 type_from_op (ins, NULL, NULL);
4389 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4390 int dreg = alloc_ireg (cfg);
4392 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4393 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4394 type_from_op (ins, NULL, NULL);
4399 } else if (cmethod->klass == runtime_helpers_class) {
4401 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4402 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4406 } else if (cmethod->klass == mono_defaults.thread_class) {
4407 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4408 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4409 MONO_ADD_INS (cfg->cbb, ins);
4411 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4412 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4413 MONO_ADD_INS (cfg->cbb, ins);
4416 } else if (cmethod->klass == mono_defaults.monitor_class) {
4417 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4418 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4421 if (COMPILE_LLVM (cfg)) {
4423 * Pass the argument normally, the LLVM backend will handle the
4424 * calling convention problems.
4426 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4428 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4429 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4430 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4431 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4434 return (MonoInst*)call;
4435 } else if (strcmp (cmethod->name, "Exit") == 0) {
4438 if (COMPILE_LLVM (cfg)) {
4439 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4441 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4442 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4443 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4444 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4447 return (MonoInst*)call;
4449 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4450 MonoMethod *fast_method = NULL;
4452 /* Avoid infinite recursion */
4453 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4454 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4455 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4458 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4459 strcmp (cmethod->name, "Exit") == 0)
4460 fast_method = mono_monitor_get_fast_path (cmethod);
4464 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4466 } else if (cmethod->klass->image == mono_defaults.corlib &&
4467 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4468 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4471 #if SIZEOF_REGISTER == 8
4472 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4473 /* 64 bit reads are already atomic */
4474 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4475 ins->dreg = mono_alloc_preg (cfg);
4476 ins->inst_basereg = args [0]->dreg;
4477 ins->inst_offset = 0;
4478 MONO_ADD_INS (cfg->cbb, ins);
4482 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4483 if (strcmp (cmethod->name, "Increment") == 0) {
4484 MonoInst *ins_iconst;
4487 if (fsig->params [0]->type == MONO_TYPE_I4)
4488 opcode = OP_ATOMIC_ADD_NEW_I4;
4489 #if SIZEOF_REGISTER == 8
4490 else if (fsig->params [0]->type == MONO_TYPE_I8)
4491 opcode = OP_ATOMIC_ADD_NEW_I8;
4494 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4495 ins_iconst->inst_c0 = 1;
4496 ins_iconst->dreg = mono_alloc_ireg (cfg);
4497 MONO_ADD_INS (cfg->cbb, ins_iconst);
4499 MONO_INST_NEW (cfg, ins, opcode);
4500 ins->dreg = mono_alloc_ireg (cfg);
4501 ins->inst_basereg = args [0]->dreg;
4502 ins->inst_offset = 0;
4503 ins->sreg2 = ins_iconst->dreg;
4504 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4505 MONO_ADD_INS (cfg->cbb, ins);
4507 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4508 MonoInst *ins_iconst;
4511 if (fsig->params [0]->type == MONO_TYPE_I4)
4512 opcode = OP_ATOMIC_ADD_NEW_I4;
4513 #if SIZEOF_REGISTER == 8
4514 else if (fsig->params [0]->type == MONO_TYPE_I8)
4515 opcode = OP_ATOMIC_ADD_NEW_I8;
4518 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4519 ins_iconst->inst_c0 = -1;
4520 ins_iconst->dreg = mono_alloc_ireg (cfg);
4521 MONO_ADD_INS (cfg->cbb, ins_iconst);
4523 MONO_INST_NEW (cfg, ins, opcode);
4524 ins->dreg = mono_alloc_ireg (cfg);
4525 ins->inst_basereg = args [0]->dreg;
4526 ins->inst_offset = 0;
4527 ins->sreg2 = ins_iconst->dreg;
4528 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4529 MONO_ADD_INS (cfg->cbb, ins);
4531 } else if (strcmp (cmethod->name, "Add") == 0) {
4534 if (fsig->params [0]->type == MONO_TYPE_I4)
4535 opcode = OP_ATOMIC_ADD_NEW_I4;
4536 #if SIZEOF_REGISTER == 8
4537 else if (fsig->params [0]->type == MONO_TYPE_I8)
4538 opcode = OP_ATOMIC_ADD_NEW_I8;
4542 MONO_INST_NEW (cfg, ins, opcode);
4543 ins->dreg = mono_alloc_ireg (cfg);
4544 ins->inst_basereg = args [0]->dreg;
4545 ins->inst_offset = 0;
4546 ins->sreg2 = args [1]->dreg;
4547 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4548 MONO_ADD_INS (cfg->cbb, ins);
4551 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4553 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4554 if (strcmp (cmethod->name, "Exchange") == 0) {
4556 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4558 if (fsig->params [0]->type == MONO_TYPE_I4)
4559 opcode = OP_ATOMIC_EXCHANGE_I4;
4560 #if SIZEOF_REGISTER == 8
4561 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4562 (fsig->params [0]->type == MONO_TYPE_I))
4563 opcode = OP_ATOMIC_EXCHANGE_I8;
4565 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4566 opcode = OP_ATOMIC_EXCHANGE_I4;
4571 MONO_INST_NEW (cfg, ins, opcode);
4572 ins->dreg = mono_alloc_ireg (cfg);
4573 ins->inst_basereg = args [0]->dreg;
4574 ins->inst_offset = 0;
4575 ins->sreg2 = args [1]->dreg;
4576 MONO_ADD_INS (cfg->cbb, ins);
4578 switch (fsig->params [0]->type) {
4580 ins->type = STACK_I4;
4584 ins->type = STACK_I8;
4586 case MONO_TYPE_OBJECT:
4587 ins->type = STACK_OBJ;
4590 g_assert_not_reached ();
4593 if (cfg->gen_write_barriers && is_ref)
4594 emit_write_barrier (cfg, args [0], args [1], -1);
4596 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4598 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4599 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4601 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4602 if (fsig->params [1]->type == MONO_TYPE_I4)
4604 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4605 size = sizeof (gpointer);
4606 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4609 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4610 ins->dreg = alloc_ireg (cfg);
4611 ins->sreg1 = args [0]->dreg;
4612 ins->sreg2 = args [1]->dreg;
4613 ins->sreg3 = args [2]->dreg;
4614 ins->type = STACK_I4;
4615 MONO_ADD_INS (cfg->cbb, ins);
4616 } else if (size == 8) {
4617 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4618 ins->dreg = alloc_ireg (cfg);
4619 ins->sreg1 = args [0]->dreg;
4620 ins->sreg2 = args [1]->dreg;
4621 ins->sreg3 = args [2]->dreg;
4622 ins->type = STACK_I8;
4623 MONO_ADD_INS (cfg->cbb, ins);
4625 /* g_assert_not_reached (); */
4627 if (cfg->gen_write_barriers && is_ref)
4628 emit_write_barrier (cfg, args [0], args [1], -1);
4630 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4634 } else if (cmethod->klass->image == mono_defaults.corlib) {
4635 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4636 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4637 if (should_insert_brekpoint (cfg->method))
4638 MONO_INST_NEW (cfg, ins, OP_BREAK);
4640 MONO_INST_NEW (cfg, ins, OP_NOP);
4641 MONO_ADD_INS (cfg->cbb, ins);
4644 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4645 && strcmp (cmethod->klass->name, "Environment") == 0) {
4647 EMIT_NEW_ICONST (cfg, ins, 1);
4649 EMIT_NEW_ICONST (cfg, ins, 0);
4653 } else if (cmethod->klass == mono_defaults.math_class) {
4655 * There is general branches code for Min/Max, but it does not work for
4657 * http://everything2.com/?node_id=1051618
4661 #ifdef MONO_ARCH_SIMD_INTRINSICS
4662 if (cfg->opt & MONO_OPT_SIMD) {
4663 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4669 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4673 * This entry point could be used later for arbitrary method
4676 inline static MonoInst*
4677 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4678 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4680 if (method->klass == mono_defaults.string_class) {
4681 /* managed string allocation support */
4682 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4683 MonoInst *iargs [2];
4684 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4685 MonoMethod *managed_alloc = NULL;
4687 g_assert (vtable); /*Should not fail since it System.String*/
4688 #ifndef MONO_CROSS_COMPILE
4689 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4693 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4694 iargs [1] = args [0];
4695 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4702 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4704 MonoInst *store, *temp;
4707 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4708 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4711 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4712 * would be different than the MonoInst's used to represent arguments, and
4713 * the ldelema implementation can't deal with that.
4714 * Solution: When ldelema is used on an inline argument, create a var for
4715 * it, emit ldelema on that var, and emit the saving code below in
4716 * inline_method () if needed.
4718 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4719 cfg->args [i] = temp;
4720 /* This uses cfg->args [i] which is set by the preceeding line */
4721 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4722 store->cil_code = sp [0]->cil_code;
4727 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4728 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4730 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4732 check_inline_called_method_name_limit (MonoMethod *called_method)
4735 static char *limit = NULL;
4737 if (limit == NULL) {
4738 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4740 if (limit_string != NULL)
4741 limit = limit_string;
4743 limit = (char *) "";
4746 if (limit [0] != '\0') {
4747 char *called_method_name = mono_method_full_name (called_method, TRUE);
4749 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4750 g_free (called_method_name);
4752 //return (strncmp_result <= 0);
4753 return (strncmp_result == 0);
4760 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4762 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4765 static char *limit = NULL;
4767 if (limit == NULL) {
4768 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4769 if (limit_string != NULL) {
4770 limit = limit_string;
4772 limit = (char *) "";
4776 if (limit [0] != '\0') {
4777 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4779 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4780 g_free (caller_method_name);
4782 //return (strncmp_result <= 0);
4783 return (strncmp_result == 0);
4791 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4792 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4794 MonoInst *ins, *rvar = NULL;
4795 MonoMethodHeader *cheader;
4796 MonoBasicBlock *ebblock, *sbblock;
4798 MonoMethod *prev_inlined_method;
4799 MonoInst **prev_locals, **prev_args;
4800 MonoType **prev_arg_types;
4801 guint prev_real_offset;
4802 GHashTable *prev_cbb_hash;
4803 MonoBasicBlock **prev_cil_offset_to_bb;
4804 MonoBasicBlock *prev_cbb;
4805 unsigned char* prev_cil_start;
4806 guint32 prev_cil_offset_to_bb_len;
4807 MonoMethod *prev_current_method;
4808 MonoGenericContext *prev_generic_context;
4809 gboolean ret_var_set, prev_ret_var_set;
4811 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4813 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4814 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4817 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4818 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4822 if (cfg->verbose_level > 2)
4823 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4825 if (!cmethod->inline_info) {
4826 mono_jit_stats.inlineable_methods++;
4827 cmethod->inline_info = 1;
4830 /* allocate local variables */
4831 cheader = mono_method_get_header (cmethod);
4833 if (cheader == NULL || mono_loader_get_last_error ()) {
4834 MonoLoaderError *error = mono_loader_get_last_error ();
4837 mono_metadata_free_mh (cheader);
4838 if (inline_always && error)
4839 mono_cfg_set_exception (cfg, error->exception_type);
4841 mono_loader_clear_error ();
4845 /*Must verify before creating locals as it can cause the JIT to assert.*/
4846 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4847 mono_metadata_free_mh (cheader);
4851 /* allocate space to store the return value */
4852 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4853 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4857 prev_locals = cfg->locals;
4858 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4859 for (i = 0; i < cheader->num_locals; ++i)
4860 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4862 /* allocate start and end blocks */
4863 /* This is needed so if the inline is aborted, we can clean up */
4864 NEW_BBLOCK (cfg, sbblock);
4865 sbblock->real_offset = real_offset;
4867 NEW_BBLOCK (cfg, ebblock);
4868 ebblock->block_num = cfg->num_bblocks++;
4869 ebblock->real_offset = real_offset;
4871 prev_args = cfg->args;
4872 prev_arg_types = cfg->arg_types;
4873 prev_inlined_method = cfg->inlined_method;
4874 cfg->inlined_method = cmethod;
4875 cfg->ret_var_set = FALSE;
4876 cfg->inline_depth ++;
4877 prev_real_offset = cfg->real_offset;
4878 prev_cbb_hash = cfg->cbb_hash;
4879 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4880 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4881 prev_cil_start = cfg->cil_start;
4882 prev_cbb = cfg->cbb;
4883 prev_current_method = cfg->current_method;
4884 prev_generic_context = cfg->generic_context;
4885 prev_ret_var_set = cfg->ret_var_set;
4887 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4889 ret_var_set = cfg->ret_var_set;
4891 cfg->inlined_method = prev_inlined_method;
4892 cfg->real_offset = prev_real_offset;
4893 cfg->cbb_hash = prev_cbb_hash;
4894 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4895 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4896 cfg->cil_start = prev_cil_start;
4897 cfg->locals = prev_locals;
4898 cfg->args = prev_args;
4899 cfg->arg_types = prev_arg_types;
4900 cfg->current_method = prev_current_method;
4901 cfg->generic_context = prev_generic_context;
4902 cfg->ret_var_set = prev_ret_var_set;
4903 cfg->inline_depth --;
4905 if ((costs >= 0 && costs < 60) || inline_always) {
4906 if (cfg->verbose_level > 2)
4907 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4909 mono_jit_stats.inlined_methods++;
4911 /* always add some code to avoid block split failures */
4912 MONO_INST_NEW (cfg, ins, OP_NOP);
4913 MONO_ADD_INS (prev_cbb, ins);
4915 prev_cbb->next_bb = sbblock;
4916 link_bblock (cfg, prev_cbb, sbblock);
4919 * Get rid of the begin and end bblocks if possible to aid local
4922 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4924 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4925 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4927 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4928 MonoBasicBlock *prev = ebblock->in_bb [0];
4929 mono_merge_basic_blocks (cfg, prev, ebblock);
4931 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4932 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4933 cfg->cbb = prev_cbb;
4941 * If the inlined method contains only a throw, then the ret var is not
4942 * set, so set it to a dummy value.
4945 static double r8_0 = 0.0;
4947 switch (rvar->type) {
4949 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4952 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4957 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4960 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4961 ins->type = STACK_R8;
4962 ins->inst_p0 = (void*)&r8_0;
4963 ins->dreg = rvar->dreg;
4964 MONO_ADD_INS (cfg->cbb, ins);
4967 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4970 g_assert_not_reached ();
4974 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4977 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4980 if (cfg->verbose_level > 2)
4981 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4982 cfg->exception_type = MONO_EXCEPTION_NONE;
4983 mono_loader_clear_error ();
4985 /* This gets rid of the newly added bblocks */
4986 cfg->cbb = prev_cbb;
4988 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4993 * Some of these comments may well be out-of-date.
4994 * Design decisions: we do a single pass over the IL code (and we do bblock
4995 * splitting/merging in the few cases when it's required: a back jump to an IL
4996 * address that was not already seen as bblock starting point).
4997 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4998 * Complex operations are decomposed in simpler ones right away. We need to let the
4999 * arch-specific code peek and poke inside this process somehow (except when the
5000 * optimizations can take advantage of the full semantic info of coarse opcodes).
5001 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5002 * MonoInst->opcode initially is the IL opcode or some simplification of that
5003 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5004 * opcode with value bigger than OP_LAST.
5005 * At this point the IR can be handed over to an interpreter, a dumb code generator
5006 * or to the optimizing code generator that will translate it to SSA form.
5008 * Profiling directed optimizations.
5009 * We may compile by default with few or no optimizations and instrument the code
5010 * or the user may indicate what methods to optimize the most either in a config file
5011 * or through repeated runs where the compiler applies offline the optimizations to
5012 * each method and then decides if it was worth it.
5015 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5016 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5017 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5018 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5019 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5020 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5021 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5022 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5024 /* offset from br.s -> br like opcodes */
5025 #define BIG_BRANCH_OFFSET 13
5028 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5030 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5032 return b == NULL || b == bb;
5036 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5038 unsigned char *ip = start;
5039 unsigned char *target;
5042 MonoBasicBlock *bblock;
5043 const MonoOpcode *opcode;
5046 cli_addr = ip - start;
5047 i = mono_opcode_value ((const guint8 **)&ip, end);
5050 opcode = &mono_opcodes [i];
5051 switch (opcode->argument) {
5052 case MonoInlineNone:
5055 case MonoInlineString:
5056 case MonoInlineType:
5057 case MonoInlineField:
5058 case MonoInlineMethod:
5061 case MonoShortInlineR:
5068 case MonoShortInlineVar:
5069 case MonoShortInlineI:
5072 case MonoShortInlineBrTarget:
5073 target = start + cli_addr + 2 + (signed char)ip [1];
5074 GET_BBLOCK (cfg, bblock, target);
5077 GET_BBLOCK (cfg, bblock, ip);
5079 case MonoInlineBrTarget:
5080 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5081 GET_BBLOCK (cfg, bblock, target);
5084 GET_BBLOCK (cfg, bblock, ip);
5086 case MonoInlineSwitch: {
5087 guint32 n = read32 (ip + 1);
5090 cli_addr += 5 + 4 * n;
5091 target = start + cli_addr;
5092 GET_BBLOCK (cfg, bblock, target);
5094 for (j = 0; j < n; ++j) {
5095 target = start + cli_addr + (gint32)read32 (ip);
5096 GET_BBLOCK (cfg, bblock, target);
5106 g_assert_not_reached ();
5109 if (i == CEE_THROW) {
5110 unsigned char *bb_start = ip - 1;
5112 /* Find the start of the bblock containing the throw */
5114 while ((bb_start >= start) && !bblock) {
5115 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5119 bblock->out_of_line = 1;
5128 static inline MonoMethod *
5129 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5133 if (m->wrapper_type != MONO_WRAPPER_NONE)
5134 return mono_method_get_wrapper_data (m, token);
5136 method = mono_get_method_full (m->klass->image, token, klass, context);
5141 static inline MonoMethod *
5142 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5144 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5146 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5152 static inline MonoClass*
5153 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5157 if (method->wrapper_type != MONO_WRAPPER_NONE)
5158 klass = mono_method_get_wrapper_data (method, token);
5160 klass = mono_class_get_full (method->klass->image, token, context);
5162 mono_class_init (klass);
5167 * Returns TRUE if the JIT should abort inlining because "callee"
5168 * is influenced by security attributes.
5171 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5175 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5179 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5180 if (result == MONO_JIT_SECURITY_OK)
5183 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5184 /* Generate code to throw a SecurityException before the actual call/link */
5185 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5188 NEW_ICONST (cfg, args [0], 4);
5189 NEW_METHODCONST (cfg, args [1], caller);
5190 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5191 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5192 /* don't hide previous results */
5193 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5194 cfg->exception_data = result;
5202 throw_exception (void)
5204 static MonoMethod *method = NULL;
5207 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5208 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5215 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5217 MonoMethod *thrower = throw_exception ();
5220 EMIT_NEW_PCONST (cfg, args [0], ex);
5221 mono_emit_method_call (cfg, thrower, args, NULL);
5225 * Return the original method is a wrapper is specified. We can only access
5226 * the custom attributes from the original method.
5229 get_original_method (MonoMethod *method)
5231 if (method->wrapper_type == MONO_WRAPPER_NONE)
5234 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5235 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5238 /* in other cases we need to find the original method */
5239 return mono_marshal_method_from_wrapper (method);
5243 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5244 MonoBasicBlock *bblock, unsigned char *ip)
5246 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5247 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5249 emit_throw_exception (cfg, ex);
5253 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5254 MonoBasicBlock *bblock, unsigned char *ip)
5256 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5257 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5259 emit_throw_exception (cfg, ex);
5263 * Check that the IL instructions at ip are the array initialization
5264 * sequence and return the pointer to the data and the size.
5267 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5270 * newarr[System.Int32]
5272 * ldtoken field valuetype ...
5273 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5275 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5276 guint32 token = read32 (ip + 7);
5277 guint32 field_token = read32 (ip + 2);
5278 guint32 field_index = field_token & 0xffffff;
5280 const char *data_ptr;
5282 MonoMethod *cmethod;
5283 MonoClass *dummy_class;
5284 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5290 *out_field_token = field_token;
5292 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5295 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5297 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5298 case MONO_TYPE_BOOLEAN:
5302 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5303 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5304 case MONO_TYPE_CHAR:
5314 return NULL; /* stupid ARM FP swapped format */
5324 if (size > mono_type_size (field->type, &dummy_align))
5327 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5328 if (!method->klass->image->dynamic) {
5329 field_index = read32 (ip + 2) & 0xffffff;
5330 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5331 data_ptr = mono_image_rva_map (method->klass->image, rva);
5332 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5333 /* for aot code we do the lookup on load */
5334 if (aot && data_ptr)
5335 return GUINT_TO_POINTER (rva);
5337 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5339 data_ptr = mono_field_get_data (field);
5347 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5349 char *method_fname = mono_method_full_name (method, TRUE);
5351 MonoMethodHeader *header = mono_method_get_header (method);
5353 if (header->code_size == 0)
5354 method_code = g_strdup ("method body is empty.");
5356 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5357 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5358 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5359 g_free (method_fname);
5360 g_free (method_code);
5361 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5365 set_exception_object (MonoCompile *cfg, MonoException *exception)
5367 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5368 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5369 cfg->exception_ptr = exception;
5373 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5377 if (cfg->generic_sharing_context)
5378 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5380 type = &klass->byval_arg;
5381 return MONO_TYPE_IS_REFERENCE (type);
5385 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5388 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5389 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5390 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5391 /* Optimize reg-reg moves away */
5393 * Can't optimize other opcodes, since sp[0] might point to
5394 * the last ins of a decomposed opcode.
5396 sp [0]->dreg = (cfg)->locals [n]->dreg;
5398 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5403 * ldloca inhibits many optimizations so try to get rid of it in common
5406 static inline unsigned char *
5407 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5416 local = read16 (ip + 2);
5420 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5421 gboolean skip = FALSE;
5423 /* From the INITOBJ case */
5424 token = read32 (ip + 2);
5425 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5426 CHECK_TYPELOAD (klass);
5427 if (generic_class_is_reference_type (cfg, klass)) {
5428 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5429 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5430 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5431 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5432 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5445 is_exception_class (MonoClass *class)
5448 if (class == mono_defaults.exception_class)
5450 class = class->parent;
5456 * is_jit_optimizer_disabled:
5458 * Determine whenever M's assembly has a DebuggableAttribute with the
5459 * IsJITOptimizerDisabled flag set.
5462 is_jit_optimizer_disabled (MonoMethod *m)
5464 MonoAssembly *ass = m->klass->image->assembly;
5465 MonoCustomAttrInfo* attrs;
5466 static MonoClass *klass;
5468 gboolean val = FALSE;
5471 if (ass->jit_optimizer_disabled_inited)
5472 return ass->jit_optimizer_disabled;
5474 klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5476 attrs = mono_custom_attrs_from_assembly (ass);
5478 for (i = 0; i < attrs->num_attrs; ++i) {
5479 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5482 MonoMethodSignature *sig;
5484 if (!attr->ctor || attr->ctor->klass != klass)
5486 /* Decode the attribute. See reflection.c */
5487 len = attr->data_size;
5488 p = (const char*)attr->data;
5489 g_assert (read16 (p) == 0x0001);
5492 // FIXME: Support named parameters
5493 sig = mono_method_signature (attr->ctor);
5494 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5496 /* Two boolean arguments */
5500 mono_custom_attrs_free (attrs);
5503 ass->jit_optimizer_disabled = val;
5504 mono_memory_barrier ();
5505 ass->jit_optimizer_disabled_inited = TRUE;
5511 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5513 gboolean supported_tail_call;
5516 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5517 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5519 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5522 for (i = 0; i < fsig->param_count; ++i) {
5523 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5524 /* These can point to the current method's stack */
5525 supported_tail_call = FALSE;
5527 if (fsig->hasthis && cmethod->klass->valuetype)
5528 /* this might point to the current method's stack */
5529 supported_tail_call = FALSE;
5530 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5531 supported_tail_call = FALSE;
5532 if (cfg->method->save_lmf)
5533 supported_tail_call = FALSE;
5534 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5535 supported_tail_call = FALSE;
5537 /* Debugging support */
5539 if (supported_tail_call) {
5540 static int count = 0;
5542 if (getenv ("COUNT")) {
5543 if (count == atoi (getenv ("COUNT")))
5544 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5545 if (count > atoi (getenv ("COUNT")))
5546 supported_tail_call = FALSE;
5551 return supported_tail_call;
5555 * mono_method_to_ir:
5557 * Translate the .net IL into linear IR.
5560 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5561 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5562 guint inline_offset, gboolean is_virtual_call)
5565 MonoInst *ins, **sp, **stack_start;
5566 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5567 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5568 MonoMethod *cmethod, *method_definition;
5569 MonoInst **arg_array;
5570 MonoMethodHeader *header;
5572 guint32 token, ins_flag;
5574 MonoClass *constrained_call = NULL;
5575 unsigned char *ip, *end, *target, *err_pos;
5576 static double r8_0 = 0.0;
5577 MonoMethodSignature *sig;
5578 MonoGenericContext *generic_context = NULL;
5579 MonoGenericContainer *generic_container = NULL;
5580 MonoType **param_types;
5581 int i, n, start_new_bblock, dreg;
5582 int num_calls = 0, inline_costs = 0;
5583 int breakpoint_id = 0;
5585 MonoBoolean security, pinvoke;
5586 MonoSecurityManager* secman = NULL;
5587 MonoDeclSecurityActions actions;
5588 GSList *class_inits = NULL;
5589 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5591 gboolean init_locals, seq_points, skip_dead_blocks;
5592 gboolean disable_inline;
5594 disable_inline = is_jit_optimizer_disabled (method);
5596 /* serialization and xdomain stuff may need access to private fields and methods */
5597 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5598 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5599 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5600 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5601 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5602 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5604 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5606 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5607 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5608 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5609 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5611 image = method->klass->image;
5612 header = mono_method_get_header (method);
5614 MonoLoaderError *error;
5616 if ((error = mono_loader_get_last_error ())) {
5617 mono_cfg_set_exception (cfg, error->exception_type);
5619 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5620 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5622 goto exception_exit;
5624 generic_container = mono_method_get_generic_container (method);
5625 sig = mono_method_signature (method);
5626 num_args = sig->hasthis + sig->param_count;
5627 ip = (unsigned char*)header->code;
5628 cfg->cil_start = ip;
5629 end = ip + header->code_size;
5630 mono_jit_stats.cil_code_size += header->code_size;
5631 init_locals = header->init_locals;
5633 seq_points = cfg->gen_seq_points && cfg->method == method;
5636 * Methods without init_locals set could cause asserts in various passes
5641 method_definition = method;
5642 while (method_definition->is_inflated) {
5643 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5644 method_definition = imethod->declaring;
5647 /* SkipVerification is not allowed if core-clr is enabled */
5648 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5650 dont_verify_stloc = TRUE;
5653 if (mono_debug_using_mono_debugger ())
5654 cfg->keep_cil_nops = TRUE;
5656 if (sig->is_inflated)
5657 generic_context = mono_method_get_context (method);
5658 else if (generic_container)
5659 generic_context = &generic_container->context;
5660 cfg->generic_context = generic_context;
5662 if (!cfg->generic_sharing_context)
5663 g_assert (!sig->has_type_parameters);
5665 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5666 g_assert (method->is_inflated);
5667 g_assert (mono_method_get_context (method)->method_inst);
5669 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5670 g_assert (sig->generic_param_count);
5672 if (cfg->method == method) {
5673 cfg->real_offset = 0;
5675 cfg->real_offset = inline_offset;
5678 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5679 cfg->cil_offset_to_bb_len = header->code_size;
5681 cfg->current_method = method;
5683 if (cfg->verbose_level > 2)
5684 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5686 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5688 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5689 for (n = 0; n < sig->param_count; ++n)
5690 param_types [n + sig->hasthis] = sig->params [n];
5691 cfg->arg_types = param_types;
5693 dont_inline = g_list_prepend (dont_inline, method);
5694 if (cfg->method == method) {
5696 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5697 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5700 NEW_BBLOCK (cfg, start_bblock);
5701 cfg->bb_entry = start_bblock;
5702 start_bblock->cil_code = NULL;
5703 start_bblock->cil_length = 0;
5706 NEW_BBLOCK (cfg, end_bblock);
5707 cfg->bb_exit = end_bblock;
5708 end_bblock->cil_code = NULL;
5709 end_bblock->cil_length = 0;
5710 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5711 g_assert (cfg->num_bblocks == 2);
5713 arg_array = cfg->args;
5715 if (header->num_clauses) {
5716 cfg->spvars = g_hash_table_new (NULL, NULL);
5717 cfg->exvars = g_hash_table_new (NULL, NULL);
5719 /* handle exception clauses */
5720 for (i = 0; i < header->num_clauses; ++i) {
5721 MonoBasicBlock *try_bb;
5722 MonoExceptionClause *clause = &header->clauses [i];
5723 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5724 try_bb->real_offset = clause->try_offset;
5725 try_bb->try_start = TRUE;
5726 try_bb->region = ((i + 1) << 8) | clause->flags;
5727 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5728 tblock->real_offset = clause->handler_offset;
5729 tblock->flags |= BB_EXCEPTION_HANDLER;
5731 link_bblock (cfg, try_bb, tblock);
5733 if (*(ip + clause->handler_offset) == CEE_POP)
5734 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5736 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5737 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5738 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5739 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5740 MONO_ADD_INS (tblock, ins);
5743 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5744 MONO_ADD_INS (tblock, ins);
5747 /* todo: is a fault block unsafe to optimize? */
5748 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5749 tblock->flags |= BB_EXCEPTION_UNSAFE;
5753 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5755 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5757 /* catch and filter blocks get the exception object on the stack */
5758 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5759 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5760 MonoInst *dummy_use;
5762 /* mostly like handle_stack_args (), but just sets the input args */
5763 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5764 tblock->in_scount = 1;
5765 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5766 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5769 * Add a dummy use for the exvar so its liveness info will be
5773 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5775 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5776 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5777 tblock->flags |= BB_EXCEPTION_HANDLER;
5778 tblock->real_offset = clause->data.filter_offset;
5779 tblock->in_scount = 1;
5780 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5781 /* The filter block shares the exvar with the handler block */
5782 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5783 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5784 MONO_ADD_INS (tblock, ins);
5788 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5789 clause->data.catch_class &&
5790 cfg->generic_sharing_context &&
5791 mono_class_check_context_used (clause->data.catch_class)) {
5793 * In shared generic code with catch
5794 * clauses containing type variables
5795 * the exception handling code has to
5796 * be able to get to the rgctx.
5797 * Therefore we have to make sure that
5798 * the vtable/mrgctx argument (for
5799 * static or generic methods) or the
5800 * "this" argument (for non-static
5801 * methods) are live.
5803 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5804 mini_method_get_context (method)->method_inst ||
5805 method->klass->valuetype) {
5806 mono_get_vtable_var (cfg);
5808 MonoInst *dummy_use;
5810 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5815 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5816 cfg->cbb = start_bblock;
5817 cfg->args = arg_array;
5818 mono_save_args (cfg, sig, inline_args);
5821 /* FIRST CODE BLOCK */
5822 NEW_BBLOCK (cfg, bblock);
5823 bblock->cil_code = ip;
5827 ADD_BBLOCK (cfg, bblock);
5829 if (cfg->method == method) {
5830 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5831 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5832 MONO_INST_NEW (cfg, ins, OP_BREAK);
5833 MONO_ADD_INS (bblock, ins);
5837 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5838 secman = mono_security_manager_get_methods ();
5840 security = (secman && mono_method_has_declsec (method));
5841 /* at this point having security doesn't mean we have any code to generate */
5842 if (security && (cfg->method == method)) {
5843 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5844 * And we do not want to enter the next section (with allocation) if we
5845 * have nothing to generate */
5846 security = mono_declsec_get_demands (method, &actions);
5849 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5850 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5852 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5853 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5854 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5856 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5857 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5861 mono_custom_attrs_free (custom);
5864 custom = mono_custom_attrs_from_class (wrapped->klass);
5865 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5869 mono_custom_attrs_free (custom);
5872 /* not a P/Invoke after all */
5877 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5878 /* we use a separate basic block for the initialization code */
5879 NEW_BBLOCK (cfg, init_localsbb);
5880 cfg->bb_init = init_localsbb;
5881 init_localsbb->real_offset = cfg->real_offset;
5882 start_bblock->next_bb = init_localsbb;
5883 init_localsbb->next_bb = bblock;
5884 link_bblock (cfg, start_bblock, init_localsbb);
5885 link_bblock (cfg, init_localsbb, bblock);
5887 cfg->cbb = init_localsbb;
5889 start_bblock->next_bb = bblock;
5890 link_bblock (cfg, start_bblock, bblock);
5893 /* at this point we know, if security is TRUE, that some code needs to be generated */
5894 if (security && (cfg->method == method)) {
5897 mono_jit_stats.cas_demand_generation++;
5899 if (actions.demand.blob) {
5900 /* Add code for SecurityAction.Demand */
5901 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5902 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5903 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5904 mono_emit_method_call (cfg, secman->demand, args, NULL);
5906 if (actions.noncasdemand.blob) {
5907 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5908 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5909 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5910 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5911 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5912 mono_emit_method_call (cfg, secman->demand, args, NULL);
5914 if (actions.demandchoice.blob) {
5915 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5916 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5917 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5918 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5919 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5923 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5925 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5928 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5929 /* check if this is native code, e.g. an icall or a p/invoke */
5930 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5931 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5933 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5934 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5936 /* if this ia a native call then it can only be JITted from platform code */
5937 if ((icall || pinvk) && method->klass && method->klass->image) {
5938 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5939 MonoException *ex = icall ? mono_get_exception_security () :
5940 mono_get_exception_method_access ();
5941 emit_throw_exception (cfg, ex);
5948 if (header->code_size == 0)
5951 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5956 if (cfg->method == method)
5957 mono_debug_init_method (cfg, bblock, breakpoint_id);
5959 for (n = 0; n < header->num_locals; ++n) {
5960 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5965 /* We force the vtable variable here for all shared methods
5966 for the possibility that they might show up in a stack
5967 trace where their exact instantiation is needed. */
5968 if (cfg->generic_sharing_context && method == cfg->method) {
5969 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5970 mini_method_get_context (method)->method_inst ||
5971 method->klass->valuetype) {
5972 mono_get_vtable_var (cfg);
5974 /* FIXME: Is there a better way to do this?
5975 We need the variable live for the duration
5976 of the whole method. */
5977 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5981 /* add a check for this != NULL to inlined methods */
5982 if (is_virtual_call) {
5985 NEW_ARGLOAD (cfg, arg_ins, 0);
5986 MONO_ADD_INS (cfg->cbb, arg_ins);
5987 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5990 skip_dead_blocks = !dont_verify;
5991 if (skip_dead_blocks) {
5992 original_bb = bb = mono_basic_block_split (method, &error);
5993 if (!mono_error_ok (&error)) {
5994 mono_error_cleanup (&error);
6000 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6001 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6004 start_new_bblock = 0;
6007 if (cfg->method == method)
6008 cfg->real_offset = ip - header->code;
6010 cfg->real_offset = inline_offset;
6015 if (start_new_bblock) {
6016 bblock->cil_length = ip - bblock->cil_code;
6017 if (start_new_bblock == 2) {
6018 g_assert (ip == tblock->cil_code);
6020 GET_BBLOCK (cfg, tblock, ip);
6022 bblock->next_bb = tblock;
6025 start_new_bblock = 0;
6026 for (i = 0; i < bblock->in_scount; ++i) {
6027 if (cfg->verbose_level > 3)
6028 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6029 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6033 g_slist_free (class_inits);
6036 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6037 link_bblock (cfg, bblock, tblock);
6038 if (sp != stack_start) {
6039 handle_stack_args (cfg, stack_start, sp - stack_start);
6041 CHECK_UNVERIFIABLE (cfg);
6043 bblock->next_bb = tblock;
6046 for (i = 0; i < bblock->in_scount; ++i) {
6047 if (cfg->verbose_level > 3)
6048 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6049 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6052 g_slist_free (class_inits);
6057 if (skip_dead_blocks) {
6058 int ip_offset = ip - header->code;
6060 if (ip_offset == bb->end)
6064 int op_size = mono_opcode_size (ip, end);
6065 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6067 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6069 if (ip_offset + op_size == bb->end) {
6070 MONO_INST_NEW (cfg, ins, OP_NOP);
6071 MONO_ADD_INS (bblock, ins);
6072 start_new_bblock = 1;
6080 * Sequence points are points where the debugger can place a breakpoint.
6081 * Currently, we generate these automatically at points where the IL
6084 if (seq_points && sp == stack_start) {
6085 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6086 MONO_ADD_INS (cfg->cbb, ins);
6089 bblock->real_offset = cfg->real_offset;
6091 if ((cfg->method == method) && cfg->coverage_info) {
6092 guint32 cil_offset = ip - header->code;
6093 cfg->coverage_info->data [cil_offset].cil_code = ip;
6095 /* TODO: Use an increment here */
6096 #if defined(TARGET_X86)
6097 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6098 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6100 MONO_ADD_INS (cfg->cbb, ins);
6102 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6103 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6107 if (cfg->verbose_level > 3)
6108 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6112 if (cfg->keep_cil_nops)
6113 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6115 MONO_INST_NEW (cfg, ins, OP_NOP);
6117 MONO_ADD_INS (bblock, ins);
6120 if (should_insert_brekpoint (cfg->method))
6121 MONO_INST_NEW (cfg, ins, OP_BREAK);
6123 MONO_INST_NEW (cfg, ins, OP_NOP);
6125 MONO_ADD_INS (bblock, ins);
6131 CHECK_STACK_OVF (1);
6132 n = (*ip)-CEE_LDARG_0;
6134 EMIT_NEW_ARGLOAD (cfg, ins, n);
6142 CHECK_STACK_OVF (1);
6143 n = (*ip)-CEE_LDLOC_0;
6145 EMIT_NEW_LOCLOAD (cfg, ins, n);
6154 n = (*ip)-CEE_STLOC_0;
6157 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6159 emit_stloc_ir (cfg, sp, header, n);
6166 CHECK_STACK_OVF (1);
6169 EMIT_NEW_ARGLOAD (cfg, ins, n);
6175 CHECK_STACK_OVF (1);
6178 NEW_ARGLOADA (cfg, ins, n);
6179 MONO_ADD_INS (cfg->cbb, ins);
6189 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6191 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6196 CHECK_STACK_OVF (1);
6199 EMIT_NEW_LOCLOAD (cfg, ins, n);
6203 case CEE_LDLOCA_S: {
6204 unsigned char *tmp_ip;
6206 CHECK_STACK_OVF (1);
6207 CHECK_LOCAL (ip [1]);
6209 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6215 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6224 CHECK_LOCAL (ip [1]);
6225 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6227 emit_stloc_ir (cfg, sp, header, ip [1]);
6232 CHECK_STACK_OVF (1);
6233 EMIT_NEW_PCONST (cfg, ins, NULL);
6234 ins->type = STACK_OBJ;
6239 CHECK_STACK_OVF (1);
6240 EMIT_NEW_ICONST (cfg, ins, -1);
6253 CHECK_STACK_OVF (1);
6254 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6260 CHECK_STACK_OVF (1);
6262 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6268 CHECK_STACK_OVF (1);
6269 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6275 CHECK_STACK_OVF (1);
6276 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6277 ins->type = STACK_I8;
6278 ins->dreg = alloc_dreg (cfg, STACK_I8);
6280 ins->inst_l = (gint64)read64 (ip);
6281 MONO_ADD_INS (bblock, ins);
6287 gboolean use_aotconst = FALSE;
6289 #ifdef TARGET_POWERPC
6290 /* FIXME: Clean this up */
6291 if (cfg->compile_aot)
6292 use_aotconst = TRUE;
6295 /* FIXME: we should really allocate this only late in the compilation process */
6296 f = mono_domain_alloc (cfg->domain, sizeof (float));
6298 CHECK_STACK_OVF (1);
6304 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6306 dreg = alloc_freg (cfg);
6307 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6308 ins->type = STACK_R8;
6310 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6311 ins->type = STACK_R8;
6312 ins->dreg = alloc_dreg (cfg, STACK_R8);
6314 MONO_ADD_INS (bblock, ins);
6324 gboolean use_aotconst = FALSE;
6326 #ifdef TARGET_POWERPC
6327 /* FIXME: Clean this up */
6328 if (cfg->compile_aot)
6329 use_aotconst = TRUE;
6332 /* FIXME: we should really allocate this only late in the compilation process */
6333 d = mono_domain_alloc (cfg->domain, sizeof (double));
6335 CHECK_STACK_OVF (1);
6341 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6343 dreg = alloc_freg (cfg);
6344 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6345 ins->type = STACK_R8;
6347 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6348 ins->type = STACK_R8;
6349 ins->dreg = alloc_dreg (cfg, STACK_R8);
6351 MONO_ADD_INS (bblock, ins);
6360 MonoInst *temp, *store;
6362 CHECK_STACK_OVF (1);
6366 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6367 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6369 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6372 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6385 if (sp [0]->type == STACK_R8)
6386 /* we need to pop the value from the x86 FP stack */
6387 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6396 if (stack_start != sp)
6398 token = read32 (ip + 1);
6399 /* FIXME: check the signature matches */
6400 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6402 if (!cmethod || mono_loader_get_last_error ())
6405 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6406 GENERIC_SHARING_FAILURE (CEE_JMP);
6408 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6409 CHECK_CFG_EXCEPTION;
6411 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6413 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6416 /* Handle tail calls similarly to calls */
6417 n = fsig->param_count + fsig->hasthis;
6419 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6420 call->method = cmethod;
6421 call->tail_call = TRUE;
6422 call->signature = mono_method_signature (cmethod);
6423 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6424 call->inst.inst_p0 = cmethod;
6425 for (i = 0; i < n; ++i)
6426 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6428 mono_arch_emit_call (cfg, call);
6429 MONO_ADD_INS (bblock, (MonoInst*)call);
6432 for (i = 0; i < num_args; ++i)
6433 /* Prevent arguments from being optimized away */
6434 arg_array [i]->flags |= MONO_INST_VOLATILE;
6436 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6437 ins = (MonoInst*)call;
6438 ins->inst_p0 = cmethod;
6439 MONO_ADD_INS (bblock, ins);
6443 start_new_bblock = 1;
6448 case CEE_CALLVIRT: {
6449 MonoInst *addr = NULL;
6450 MonoMethodSignature *fsig = NULL;
6452 int virtual = *ip == CEE_CALLVIRT;
6453 int calli = *ip == CEE_CALLI;
6454 gboolean pass_imt_from_rgctx = FALSE;
6455 MonoInst *imt_arg = NULL;
6456 gboolean pass_vtable = FALSE;
6457 gboolean pass_mrgctx = FALSE;
6458 MonoInst *vtable_arg = NULL;
6459 gboolean check_this = FALSE;
6460 gboolean supported_tail_call = FALSE;
6463 token = read32 (ip + 1);
6470 if (method->wrapper_type != MONO_WRAPPER_NONE)
6471 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6473 fsig = mono_metadata_parse_signature (image, token);
6475 n = fsig->param_count + fsig->hasthis;
6477 if (method->dynamic && fsig->pinvoke) {
6481 * This is a call through a function pointer using a pinvoke
6482 * signature. Have to create a wrapper and call that instead.
6483 * FIXME: This is very slow, need to create a wrapper at JIT time
6484 * instead based on the signature.
6486 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6487 EMIT_NEW_PCONST (cfg, args [1], fsig);
6489 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6492 MonoMethod *cil_method;
6494 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6495 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6496 cil_method = cmethod;
6497 } else if (constrained_call) {
6498 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6500 * This is needed since get_method_constrained can't find
6501 * the method in klass representing a type var.
6502 * The type var is guaranteed to be a reference type in this
6505 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6506 cil_method = cmethod;
6507 g_assert (!cmethod->klass->valuetype);
6509 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6512 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6513 cil_method = cmethod;
6516 if (!cmethod || mono_loader_get_last_error ())
6518 if (!dont_verify && !cfg->skip_visibility) {
6519 MonoMethod *target_method = cil_method;
6520 if (method->is_inflated) {
6521 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6523 if (!mono_method_can_access_method (method_definition, target_method) &&
6524 !mono_method_can_access_method (method, cil_method))
6525 METHOD_ACCESS_FAILURE;
6528 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6529 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6531 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6532 /* MS.NET seems to silently convert this to a callvirt */
6537 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6538 * converts to a callvirt.
6540 * tests/bug-515884.il is an example of this behavior
6542 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6543 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6544 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6548 if (!cmethod->klass->inited)
6549 if (!mono_class_init (cmethod->klass))
6552 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6553 mini_class_is_system_array (cmethod->klass)) {
6554 array_rank = cmethod->klass->rank;
6555 fsig = mono_method_signature (cmethod);
6557 fsig = mono_method_signature (cmethod);
6562 if (fsig->pinvoke) {
6563 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6564 check_for_pending_exc, FALSE);
6565 fsig = mono_method_signature (wrapper);
6566 } else if (constrained_call) {
6567 fsig = mono_method_signature (cmethod);
6569 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6573 mono_save_token_info (cfg, image, token, cil_method);
6575 n = fsig->param_count + fsig->hasthis;
6577 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6578 if (check_linkdemand (cfg, method, cmethod))
6580 CHECK_CFG_EXCEPTION;
6583 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6584 g_assert_not_reached ();
6587 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6590 if (!cfg->generic_sharing_context && cmethod)
6591 g_assert (!mono_method_check_context_used (cmethod));
6595 //g_assert (!virtual || fsig->hasthis);
6599 if (constrained_call) {
6601 * We have the `constrained.' prefix opcode.
6603 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6605 * The type parameter is instantiated as a valuetype,
6606 * but that type doesn't override the method we're
6607 * calling, so we need to box `this'.
6609 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6610 ins->klass = constrained_call;
6611 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6612 CHECK_CFG_EXCEPTION;
6613 } else if (!constrained_call->valuetype) {
6614 int dreg = alloc_preg (cfg);
6617 * The type parameter is instantiated as a reference
6618 * type. We have a managed pointer on the stack, so
6619 * we need to dereference it here.
6621 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6622 ins->type = STACK_OBJ;
6624 } else if (cmethod->klass->valuetype)
6626 constrained_call = NULL;
6629 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6633 * If the callee is a shared method, then its static cctor
6634 * might not get called after the call was patched.
6636 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6637 emit_generic_class_init (cfg, cmethod->klass);
6638 CHECK_TYPELOAD (cmethod->klass);
6641 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6642 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6643 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6644 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6645 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6648 * Pass vtable iff target method might
6649 * be shared, which means that sharing
6650 * is enabled for its class and its
6651 * context is sharable (and it's not a
6654 if (sharing_enabled && context_sharable &&
6655 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6659 if (cmethod && mini_method_get_context (cmethod) &&
6660 mini_method_get_context (cmethod)->method_inst) {
6661 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6662 MonoGenericContext *context = mini_method_get_context (cmethod);
6663 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6665 g_assert (!pass_vtable);
6667 if (sharing_enabled && context_sharable)
6671 if (cfg->generic_sharing_context && cmethod) {
6672 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6674 context_used = mono_method_check_context_used (cmethod);
6676 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6677 /* Generic method interface
6678 calls are resolved via a
6679 helper function and don't
6681 if (!cmethod_context || !cmethod_context->method_inst)
6682 pass_imt_from_rgctx = TRUE;
6686 * If a shared method calls another
6687 * shared method then the caller must
6688 * have a generic sharing context
6689 * because the magic trampoline
6690 * requires it. FIXME: We shouldn't
6691 * have to force the vtable/mrgctx
6692 * variable here. Instead there
6693 * should be a flag in the cfg to
6694 * request a generic sharing context.
6697 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6698 mono_get_vtable_var (cfg);
6703 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6705 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6707 CHECK_TYPELOAD (cmethod->klass);
6708 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6713 g_assert (!vtable_arg);
6715 if (!cfg->compile_aot) {
6717 * emit_get_rgctx_method () calls mono_class_vtable () so check
6718 * for type load errors before.
6720 mono_class_setup_vtable (cmethod->klass);
6721 CHECK_TYPELOAD (cmethod->klass);
6724 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6726 /* !marshalbyref is needed to properly handle generic methods + remoting */
6727 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6728 MONO_METHOD_IS_FINAL (cmethod)) &&
6729 !cmethod->klass->marshalbyref) {
6736 if (pass_imt_from_rgctx) {
6737 g_assert (!pass_vtable);
6740 imt_arg = emit_get_rgctx_method (cfg, context_used,
6741 cmethod, MONO_RGCTX_INFO_METHOD);
6745 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6747 /* Calling virtual generic methods */
6748 if (cmethod && virtual &&
6749 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6750 !(MONO_METHOD_IS_FINAL (cmethod) &&
6751 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6752 mono_method_signature (cmethod)->generic_param_count) {
6753 MonoInst *this_temp, *this_arg_temp, *store;
6754 MonoInst *iargs [4];
6756 g_assert (mono_method_signature (cmethod)->is_inflated);
6758 /* Prevent inlining of methods that contain indirect calls */
6761 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6762 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6763 g_assert (!imt_arg);
6765 g_assert (cmethod->is_inflated);
6766 imt_arg = emit_get_rgctx_method (cfg, context_used,
6767 cmethod, MONO_RGCTX_INFO_METHOD);
6768 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6772 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6773 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6774 MONO_ADD_INS (bblock, store);
6776 /* FIXME: This should be a managed pointer */
6777 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6779 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6780 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6781 cmethod, MONO_RGCTX_INFO_METHOD);
6782 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6783 addr = mono_emit_jit_icall (cfg,
6784 mono_helper_compile_generic_method, iargs);
6786 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6788 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6791 if (!MONO_TYPE_IS_VOID (fsig->ret))
6792 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6794 CHECK_CFG_EXCEPTION;
6802 * Implement a workaround for the inherent races involved in locking:
6808 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6809 * try block, the Exit () won't be executed, see:
6810 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6811 * To work around this, we extend such try blocks to include the last x bytes
6812 * of the Monitor.Enter () call.
6814 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6815 MonoBasicBlock *tbb;
6817 GET_BBLOCK (cfg, tbb, ip + 5);
6819 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6820 * from Monitor.Enter like ArgumentNullException.
6822 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6823 /* Mark this bblock as needing to be extended */
6824 tbb->extend_try_block = TRUE;
6828 /* Conversion to a JIT intrinsic */
6829 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6831 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6832 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6837 CHECK_CFG_EXCEPTION;
6845 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6846 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6847 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6848 !g_list_find (dont_inline, cmethod)) {
6850 gboolean always = FALSE;
6852 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6853 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6854 /* Prevent inlining of methods that call wrappers */
6856 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6860 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6862 cfg->real_offset += 5;
6865 if (!MONO_TYPE_IS_VOID (fsig->ret))
6866 /* *sp is already set by inline_method */
6869 inline_costs += costs;
6875 inline_costs += 10 * num_calls++;
6877 /* Tail recursion elimination */
6878 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6879 gboolean has_vtargs = FALSE;
6882 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6885 /* keep it simple */
6886 for (i = fsig->param_count - 1; i >= 0; i--) {
6887 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6892 for (i = 0; i < n; ++i)
6893 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6894 MONO_INST_NEW (cfg, ins, OP_BR);
6895 MONO_ADD_INS (bblock, ins);
6896 tblock = start_bblock->out_bb [0];
6897 link_bblock (cfg, bblock, tblock);
6898 ins->inst_target_bb = tblock;
6899 start_new_bblock = 1;
6901 /* skip the CEE_RET, too */
6902 if (ip_in_bb (cfg, bblock, ip + 5))
6912 /* Generic sharing */
6913 /* FIXME: only do this for generic methods if
6914 they are not shared! */
6915 if (context_used && !imt_arg && !array_rank &&
6916 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6917 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6918 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6919 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6922 g_assert (cfg->generic_sharing_context && cmethod);
6926 * We are compiling a call to a
6927 * generic method from shared code,
6928 * which means that we have to look up
6929 * the method in the rgctx and do an
6932 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6935 /* Indirect calls */
6937 g_assert (!imt_arg);
6939 if (*ip == CEE_CALL)
6940 g_assert (context_used);
6941 else if (*ip == CEE_CALLI)
6942 g_assert (!vtable_arg);
6944 /* FIXME: what the hell is this??? */
6945 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6946 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6948 /* Prevent inlining of methods with indirect calls */
6954 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
6955 call = (MonoCallInst*)ins;
6957 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6959 * Instead of emitting an indirect call, emit a direct call
6960 * with the contents of the aotconst as the patch info.
6962 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6964 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6965 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6968 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6971 if (!MONO_TYPE_IS_VOID (fsig->ret))
6972 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6974 CHECK_CFG_EXCEPTION;
6985 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6986 MonoInst *val = sp [fsig->param_count];
6988 if (val->type == STACK_OBJ) {
6989 MonoInst *iargs [2];
6994 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6997 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6998 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
6999 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7000 emit_write_barrier (cfg, addr, val, 0);
7001 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7002 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7004 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7007 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7008 if (!cmethod->klass->element_class->valuetype && !readonly)
7009 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7010 CHECK_TYPELOAD (cmethod->klass);
7013 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7016 g_assert_not_reached ();
7019 CHECK_CFG_EXCEPTION;
7026 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7028 if (!MONO_TYPE_IS_VOID (fsig->ret))
7029 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7031 CHECK_CFG_EXCEPTION;
7038 /* Tail prefix / tail call optimization */
7040 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7041 /* FIXME: runtime generic context pointer for jumps? */
7042 /* FIXME: handle this for generic sharing eventually */
7043 supported_tail_call = cmethod &&
7044 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7045 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7046 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7048 if (supported_tail_call) {
7051 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7054 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7056 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7057 /* Handle tail calls similarly to calls */
7058 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7060 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7061 call->tail_call = TRUE;
7062 call->method = cmethod;
7063 call->signature = mono_method_signature (cmethod);
7066 * We implement tail calls by storing the actual arguments into the
7067 * argument variables, then emitting a CEE_JMP.
7069 for (i = 0; i < n; ++i) {
7070 /* Prevent argument from being register allocated */
7071 arg_array [i]->flags |= MONO_INST_VOLATILE;
7072 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7076 ins = (MonoInst*)call;
7077 ins->inst_p0 = cmethod;
7078 ins->inst_p1 = arg_array [0];
7079 MONO_ADD_INS (bblock, ins);
7080 link_bblock (cfg, bblock, end_bblock);
7081 start_new_bblock = 1;
7083 CHECK_CFG_EXCEPTION;
7088 // FIXME: Eliminate unreachable epilogs
7091 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7092 * only reachable from this call.
7094 GET_BBLOCK (cfg, tblock, ip);
7095 if (tblock == bblock || tblock->in_count == 0)
7102 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7103 imt_arg, vtable_arg);
7105 if (!MONO_TYPE_IS_VOID (fsig->ret))
7106 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7108 CHECK_CFG_EXCEPTION;
7115 if (cfg->method != method) {
7116 /* return from inlined method */
7118 * If in_count == 0, that means the ret is unreachable due to
7119 * being preceeded by a throw. In that case, inline_method () will
7120 * handle setting the return value
7121 * (test case: test_0_inline_throw ()).
7123 if (return_var && cfg->cbb->in_count) {
7127 //g_assert (returnvar != -1);
7128 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7129 cfg->ret_var_set = TRUE;
7133 MonoType *ret_type = mono_method_signature (method)->ret;
7137 * Place a seq point here too even through the IL stack is not
7138 * empty, so a step over on
7141 * will work correctly.
7143 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7144 MONO_ADD_INS (cfg->cbb, ins);
7147 g_assert (!return_var);
7151 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7154 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7157 if (!cfg->vret_addr) {
7160 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7162 EMIT_NEW_RETLOADA (cfg, ret_addr);
7164 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7165 ins->klass = mono_class_from_mono_type (ret_type);
7168 #ifdef MONO_ARCH_SOFT_FLOAT
7169 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7170 MonoInst *iargs [1];
7174 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7175 mono_arch_emit_setret (cfg, method, conv);
7177 mono_arch_emit_setret (cfg, method, *sp);
7180 mono_arch_emit_setret (cfg, method, *sp);
7185 if (sp != stack_start)
7187 MONO_INST_NEW (cfg, ins, OP_BR);
7189 ins->inst_target_bb = end_bblock;
7190 MONO_ADD_INS (bblock, ins);
7191 link_bblock (cfg, bblock, end_bblock);
7192 start_new_bblock = 1;
7196 MONO_INST_NEW (cfg, ins, OP_BR);
7198 target = ip + 1 + (signed char)(*ip);
7200 GET_BBLOCK (cfg, tblock, target);
7201 link_bblock (cfg, bblock, tblock);
7202 ins->inst_target_bb = tblock;
7203 if (sp != stack_start) {
7204 handle_stack_args (cfg, stack_start, sp - stack_start);
7206 CHECK_UNVERIFIABLE (cfg);
7208 MONO_ADD_INS (bblock, ins);
7209 start_new_bblock = 1;
7210 inline_costs += BRANCH_COST;
7224 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7226 target = ip + 1 + *(signed char*)ip;
7232 inline_costs += BRANCH_COST;
7236 MONO_INST_NEW (cfg, ins, OP_BR);
7239 target = ip + 4 + (gint32)read32(ip);
7241 GET_BBLOCK (cfg, tblock, target);
7242 link_bblock (cfg, bblock, tblock);
7243 ins->inst_target_bb = tblock;
7244 if (sp != stack_start) {
7245 handle_stack_args (cfg, stack_start, sp - stack_start);
7247 CHECK_UNVERIFIABLE (cfg);
7250 MONO_ADD_INS (bblock, ins);
7252 start_new_bblock = 1;
7253 inline_costs += BRANCH_COST;
7260 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7261 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7262 guint32 opsize = is_short ? 1 : 4;
7264 CHECK_OPSIZE (opsize);
7266 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7269 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7274 GET_BBLOCK (cfg, tblock, target);
7275 link_bblock (cfg, bblock, tblock);
7276 GET_BBLOCK (cfg, tblock, ip);
7277 link_bblock (cfg, bblock, tblock);
7279 if (sp != stack_start) {
7280 handle_stack_args (cfg, stack_start, sp - stack_start);
7281 CHECK_UNVERIFIABLE (cfg);
7284 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7285 cmp->sreg1 = sp [0]->dreg;
7286 type_from_op (cmp, sp [0], NULL);
7289 #if SIZEOF_REGISTER == 4
7290 if (cmp->opcode == OP_LCOMPARE_IMM) {
7291 /* Convert it to OP_LCOMPARE */
7292 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7293 ins->type = STACK_I8;
7294 ins->dreg = alloc_dreg (cfg, STACK_I8);
7296 MONO_ADD_INS (bblock, ins);
7297 cmp->opcode = OP_LCOMPARE;
7298 cmp->sreg2 = ins->dreg;
7301 MONO_ADD_INS (bblock, cmp);
7303 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7304 type_from_op (ins, sp [0], NULL);
7305 MONO_ADD_INS (bblock, ins);
7306 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7307 GET_BBLOCK (cfg, tblock, target);
7308 ins->inst_true_bb = tblock;
7309 GET_BBLOCK (cfg, tblock, ip);
7310 ins->inst_false_bb = tblock;
7311 start_new_bblock = 2;
7314 inline_costs += BRANCH_COST;
7329 MONO_INST_NEW (cfg, ins, *ip);
7331 target = ip + 4 + (gint32)read32(ip);
7337 inline_costs += BRANCH_COST;
7341 MonoBasicBlock **targets;
7342 MonoBasicBlock *default_bblock;
7343 MonoJumpInfoBBTable *table;
7344 int offset_reg = alloc_preg (cfg);
7345 int target_reg = alloc_preg (cfg);
7346 int table_reg = alloc_preg (cfg);
7347 int sum_reg = alloc_preg (cfg);
7348 gboolean use_op_switch;
7352 n = read32 (ip + 1);
7355 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7359 CHECK_OPSIZE (n * sizeof (guint32));
7360 target = ip + n * sizeof (guint32);
7362 GET_BBLOCK (cfg, default_bblock, target);
7363 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7365 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7366 for (i = 0; i < n; ++i) {
7367 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7368 targets [i] = tblock;
7369 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7373 if (sp != stack_start) {
7375 * Link the current bb with the targets as well, so handle_stack_args
7376 * will set their in_stack correctly.
7378 link_bblock (cfg, bblock, default_bblock);
7379 for (i = 0; i < n; ++i)
7380 link_bblock (cfg, bblock, targets [i]);
7382 handle_stack_args (cfg, stack_start, sp - stack_start);
7384 CHECK_UNVERIFIABLE (cfg);
7387 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7388 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7391 for (i = 0; i < n; ++i)
7392 link_bblock (cfg, bblock, targets [i]);
7394 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7395 table->table = targets;
7396 table->table_size = n;
7398 use_op_switch = FALSE;
7400 /* ARM implements SWITCH statements differently */
7401 /* FIXME: Make it use the generic implementation */
7402 if (!cfg->compile_aot)
7403 use_op_switch = TRUE;
7406 if (COMPILE_LLVM (cfg))
7407 use_op_switch = TRUE;
7409 cfg->cbb->has_jump_table = 1;
7411 if (use_op_switch) {
7412 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7413 ins->sreg1 = src1->dreg;
7414 ins->inst_p0 = table;
7415 ins->inst_many_bb = targets;
7416 ins->klass = GUINT_TO_POINTER (n);
7417 MONO_ADD_INS (cfg->cbb, ins);
7419 if (sizeof (gpointer) == 8)
7420 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7422 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7424 #if SIZEOF_REGISTER == 8
7425 /* The upper word might not be zero, and we add it to a 64 bit address later */
7426 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7429 if (cfg->compile_aot) {
7430 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7432 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7433 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7434 ins->inst_p0 = table;
7435 ins->dreg = table_reg;
7436 MONO_ADD_INS (cfg->cbb, ins);
7439 /* FIXME: Use load_memindex */
7440 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7442 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7444 start_new_bblock = 1;
7445 inline_costs += (BRANCH_COST * 2);
7465 dreg = alloc_freg (cfg);
7468 dreg = alloc_lreg (cfg);
7471 dreg = alloc_preg (cfg);
7474 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7475 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7476 ins->flags |= ins_flag;
7478 MONO_ADD_INS (bblock, ins);
7493 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7494 ins->flags |= ins_flag;
7496 MONO_ADD_INS (bblock, ins);
7498 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7499 emit_write_barrier (cfg, sp [0], sp [1], -1);
7508 MONO_INST_NEW (cfg, ins, (*ip));
7510 ins->sreg1 = sp [0]->dreg;
7511 ins->sreg2 = sp [1]->dreg;
7512 type_from_op (ins, sp [0], sp [1]);
7514 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7516 /* Use the immediate opcodes if possible */
7517 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7518 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7519 if (imm_opcode != -1) {
7520 ins->opcode = imm_opcode;
7521 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7524 sp [1]->opcode = OP_NOP;
7528 MONO_ADD_INS ((cfg)->cbb, (ins));
7530 *sp++ = mono_decompose_opcode (cfg, ins);
7547 MONO_INST_NEW (cfg, ins, (*ip));
7549 ins->sreg1 = sp [0]->dreg;
7550 ins->sreg2 = sp [1]->dreg;
7551 type_from_op (ins, sp [0], sp [1]);
7553 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7554 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7556 /* FIXME: Pass opcode to is_inst_imm */
7558 /* Use the immediate opcodes if possible */
7559 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7562 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7563 if (imm_opcode != -1) {
7564 ins->opcode = imm_opcode;
7565 if (sp [1]->opcode == OP_I8CONST) {
7566 #if SIZEOF_REGISTER == 8
7567 ins->inst_imm = sp [1]->inst_l;
7569 ins->inst_ls_word = sp [1]->inst_ls_word;
7570 ins->inst_ms_word = sp [1]->inst_ms_word;
7574 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7577 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7578 if (sp [1]->next == NULL)
7579 sp [1]->opcode = OP_NOP;
7582 MONO_ADD_INS ((cfg)->cbb, (ins));
7584 *sp++ = mono_decompose_opcode (cfg, ins);
7597 case CEE_CONV_OVF_I8:
7598 case CEE_CONV_OVF_U8:
7602 /* Special case this earlier so we have long constants in the IR */
7603 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7604 int data = sp [-1]->inst_c0;
7605 sp [-1]->opcode = OP_I8CONST;
7606 sp [-1]->type = STACK_I8;
7607 #if SIZEOF_REGISTER == 8
7608 if ((*ip) == CEE_CONV_U8)
7609 sp [-1]->inst_c0 = (guint32)data;
7611 sp [-1]->inst_c0 = data;
7613 sp [-1]->inst_ls_word = data;
7614 if ((*ip) == CEE_CONV_U8)
7615 sp [-1]->inst_ms_word = 0;
7617 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7619 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7626 case CEE_CONV_OVF_I4:
7627 case CEE_CONV_OVF_I1:
7628 case CEE_CONV_OVF_I2:
7629 case CEE_CONV_OVF_I:
7630 case CEE_CONV_OVF_U:
7633 if (sp [-1]->type == STACK_R8) {
7634 ADD_UNOP (CEE_CONV_OVF_I8);
7641 case CEE_CONV_OVF_U1:
7642 case CEE_CONV_OVF_U2:
7643 case CEE_CONV_OVF_U4:
7646 if (sp [-1]->type == STACK_R8) {
7647 ADD_UNOP (CEE_CONV_OVF_U8);
7654 case CEE_CONV_OVF_I1_UN:
7655 case CEE_CONV_OVF_I2_UN:
7656 case CEE_CONV_OVF_I4_UN:
7657 case CEE_CONV_OVF_I8_UN:
7658 case CEE_CONV_OVF_U1_UN:
7659 case CEE_CONV_OVF_U2_UN:
7660 case CEE_CONV_OVF_U4_UN:
7661 case CEE_CONV_OVF_U8_UN:
7662 case CEE_CONV_OVF_I_UN:
7663 case CEE_CONV_OVF_U_UN:
7670 CHECK_CFG_EXCEPTION;
7674 case CEE_ADD_OVF_UN:
7676 case CEE_MUL_OVF_UN:
7678 case CEE_SUB_OVF_UN:
7686 token = read32 (ip + 1);
7687 klass = mini_get_class (method, token, generic_context);
7688 CHECK_TYPELOAD (klass);
7690 if (generic_class_is_reference_type (cfg, klass)) {
7691 MonoInst *store, *load;
7692 int dreg = alloc_preg (cfg);
7694 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7695 load->flags |= ins_flag;
7696 MONO_ADD_INS (cfg->cbb, load);
7698 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7699 store->flags |= ins_flag;
7700 MONO_ADD_INS (cfg->cbb, store);
7702 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7703 emit_write_barrier (cfg, sp [0], sp [1], -1);
7705 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7717 token = read32 (ip + 1);
7718 klass = mini_get_class (method, token, generic_context);
7719 CHECK_TYPELOAD (klass);
7721 /* Optimize the common ldobj+stloc combination */
7731 loc_index = ip [5] - CEE_STLOC_0;
7738 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7739 CHECK_LOCAL (loc_index);
7741 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7742 ins->dreg = cfg->locals [loc_index]->dreg;
7748 /* Optimize the ldobj+stobj combination */
7749 /* The reference case ends up being a load+store anyway */
7750 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7755 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7762 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7771 CHECK_STACK_OVF (1);
7773 n = read32 (ip + 1);
7775 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7776 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7777 ins->type = STACK_OBJ;
7780 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7781 MonoInst *iargs [1];
7783 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7784 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7786 if (cfg->opt & MONO_OPT_SHARED) {
7787 MonoInst *iargs [3];
7789 if (cfg->compile_aot) {
7790 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7792 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7793 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7794 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7795 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7796 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7798 if (bblock->out_of_line) {
7799 MonoInst *iargs [2];
7801 if (image == mono_defaults.corlib) {
7803 * Avoid relocations in AOT and save some space by using a
7804 * version of helper_ldstr specialized to mscorlib.
7806 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7807 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7809 /* Avoid creating the string object */
7810 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7811 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7812 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7816 if (cfg->compile_aot) {
7817 NEW_LDSTRCONST (cfg, ins, image, n);
7819 MONO_ADD_INS (bblock, ins);
7822 NEW_PCONST (cfg, ins, NULL);
7823 ins->type = STACK_OBJ;
7824 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7826 OUT_OF_MEMORY_FAILURE;
7829 MONO_ADD_INS (bblock, ins);
7838 MonoInst *iargs [2];
7839 MonoMethodSignature *fsig;
7842 MonoInst *vtable_arg = NULL;
7845 token = read32 (ip + 1);
7846 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7847 if (!cmethod || mono_loader_get_last_error ())
7849 fsig = mono_method_get_signature (cmethod, image, token);
7853 mono_save_token_info (cfg, image, token, cmethod);
7855 if (!mono_class_init (cmethod->klass))
7858 if (cfg->generic_sharing_context)
7859 context_used = mono_method_check_context_used (cmethod);
7861 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7862 if (check_linkdemand (cfg, method, cmethod))
7864 CHECK_CFG_EXCEPTION;
7865 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7866 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7869 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7870 emit_generic_class_init (cfg, cmethod->klass);
7871 CHECK_TYPELOAD (cmethod->klass);
7874 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7875 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7876 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7877 mono_class_vtable (cfg->domain, cmethod->klass);
7878 CHECK_TYPELOAD (cmethod->klass);
7880 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7881 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7884 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7885 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7887 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7889 CHECK_TYPELOAD (cmethod->klass);
7890 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7895 n = fsig->param_count;
7899 * Generate smaller code for the common newobj <exception> instruction in
7900 * argument checking code.
7902 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7903 is_exception_class (cmethod->klass) && n <= 2 &&
7904 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7905 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7906 MonoInst *iargs [3];
7908 g_assert (!vtable_arg);
7912 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7915 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7919 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7924 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7927 g_assert_not_reached ();
7935 /* move the args to allow room for 'this' in the first position */
7941 /* check_call_signature () requires sp[0] to be set */
7942 this_ins.type = STACK_OBJ;
7944 if (check_call_signature (cfg, fsig, sp))
7949 if (mini_class_is_system_array (cmethod->klass)) {
7950 g_assert (!vtable_arg);
7952 *sp = emit_get_rgctx_method (cfg, context_used,
7953 cmethod, MONO_RGCTX_INFO_METHOD);
7955 /* Avoid varargs in the common case */
7956 if (fsig->param_count == 1)
7957 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7958 else if (fsig->param_count == 2)
7959 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7960 else if (fsig->param_count == 3)
7961 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7963 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7964 } else if (cmethod->string_ctor) {
7965 g_assert (!context_used);
7966 g_assert (!vtable_arg);
7967 /* we simply pass a null pointer */
7968 EMIT_NEW_PCONST (cfg, *sp, NULL);
7969 /* now call the string ctor */
7970 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
7972 MonoInst* callvirt_this_arg = NULL;
7974 if (cmethod->klass->valuetype) {
7975 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7976 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7977 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7982 * The code generated by mini_emit_virtual_call () expects
7983 * iargs [0] to be a boxed instance, but luckily the vcall
7984 * will be transformed into a normal call there.
7986 } else if (context_used) {
7987 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7990 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7992 CHECK_TYPELOAD (cmethod->klass);
7995 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7996 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7997 * As a workaround, we call class cctors before allocating objects.
7999 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8000 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8001 if (cfg->verbose_level > 2)
8002 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8003 class_inits = g_slist_prepend (class_inits, vtable);
8006 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8009 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8012 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8014 /* Now call the actual ctor */
8015 /* Avoid virtual calls to ctors if possible */
8016 if (cmethod->klass->marshalbyref)
8017 callvirt_this_arg = sp [0];
8020 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8021 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8022 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8027 CHECK_CFG_EXCEPTION;
8028 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8029 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8030 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8031 !g_list_find (dont_inline, cmethod)) {
8034 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8035 cfg->real_offset += 5;
8038 inline_costs += costs - 5;
8041 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8043 } else if (context_used &&
8044 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8045 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8046 MonoInst *cmethod_addr;
8048 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8049 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8051 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8054 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8055 callvirt_this_arg, NULL, vtable_arg);
8059 if (alloc == NULL) {
8061 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8062 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8076 token = read32 (ip + 1);
8077 klass = mini_get_class (method, token, generic_context);
8078 CHECK_TYPELOAD (klass);
8079 if (sp [0]->type != STACK_OBJ)
8082 if (cfg->generic_sharing_context)
8083 context_used = mono_class_check_context_used (klass);
8085 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8086 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8093 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8096 /*FIXME AOT support*/
8097 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8099 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8100 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8103 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8104 MonoMethod *mono_castclass;
8105 MonoInst *iargs [1];
8108 mono_castclass = mono_marshal_get_castclass (klass);
8111 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8112 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8113 CHECK_CFG_EXCEPTION;
8114 g_assert (costs > 0);
8117 cfg->real_offset += 5;
8122 inline_costs += costs;
8125 ins = handle_castclass (cfg, klass, *sp, context_used);
8126 CHECK_CFG_EXCEPTION;
8136 token = read32 (ip + 1);
8137 klass = mini_get_class (method, token, generic_context);
8138 CHECK_TYPELOAD (klass);
8139 if (sp [0]->type != STACK_OBJ)
8142 if (cfg->generic_sharing_context)
8143 context_used = mono_class_check_context_used (klass);
8145 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8146 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8153 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8156 /*FIXME AOT support*/
8157 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8159 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8162 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8163 MonoMethod *mono_isinst;
8164 MonoInst *iargs [1];
8167 mono_isinst = mono_marshal_get_isinst (klass);
8170 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8171 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8172 CHECK_CFG_EXCEPTION;
8173 g_assert (costs > 0);
8176 cfg->real_offset += 5;
8181 inline_costs += costs;
8184 ins = handle_isinst (cfg, klass, *sp, context_used);
8185 CHECK_CFG_EXCEPTION;
8192 case CEE_UNBOX_ANY: {
8196 token = read32 (ip + 1);
8197 klass = mini_get_class (method, token, generic_context);
8198 CHECK_TYPELOAD (klass);
8200 mono_save_token_info (cfg, image, token, klass);
8202 if (cfg->generic_sharing_context)
8203 context_used = mono_class_check_context_used (klass);
8205 if (generic_class_is_reference_type (cfg, klass)) {
8206 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8207 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8208 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8215 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8218 /*FIXME AOT support*/
8219 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8221 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8222 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8225 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8226 MonoMethod *mono_castclass;
8227 MonoInst *iargs [1];
8230 mono_castclass = mono_marshal_get_castclass (klass);
8233 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8234 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8235 CHECK_CFG_EXCEPTION;
8236 g_assert (costs > 0);
8239 cfg->real_offset += 5;
8243 inline_costs += costs;
8245 ins = handle_castclass (cfg, klass, *sp, context_used);
8246 CHECK_CFG_EXCEPTION;
8254 if (mono_class_is_nullable (klass)) {
8255 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8262 ins = handle_unbox (cfg, klass, sp, context_used);
8268 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8281 token = read32 (ip + 1);
8282 klass = mini_get_class (method, token, generic_context);
8283 CHECK_TYPELOAD (klass);
8285 mono_save_token_info (cfg, image, token, klass);
8287 if (cfg->generic_sharing_context)
8288 context_used = mono_class_check_context_used (klass);
8290 if (generic_class_is_reference_type (cfg, klass)) {
8296 if (klass == mono_defaults.void_class)
8298 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8300 /* frequent check in generic code: box (struct), brtrue */
8302 // FIXME: LLVM can't handle the inconsistent bb linking
8303 if (!mono_class_is_nullable (klass) &&
8304 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8305 (ip [5] == CEE_BRTRUE ||
8306 ip [5] == CEE_BRTRUE_S ||
8307 ip [5] == CEE_BRFALSE ||
8308 ip [5] == CEE_BRFALSE_S)) {
8309 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8311 MonoBasicBlock *true_bb, *false_bb;
8315 if (cfg->verbose_level > 3) {
8316 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8317 printf ("<box+brtrue opt>\n");
8325 target = ip + 1 + (signed char)(*ip);
8332 target = ip + 4 + (gint)(read32 (ip));
8336 g_assert_not_reached ();
8340 * We need to link both bblocks, since it is needed for handling stack
8341 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8342 * Branching to only one of them would lead to inconsistencies, so
8343 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8345 GET_BBLOCK (cfg, true_bb, target);
8346 GET_BBLOCK (cfg, false_bb, ip);
8348 mono_link_bblock (cfg, cfg->cbb, true_bb);
8349 mono_link_bblock (cfg, cfg->cbb, false_bb);
8351 if (sp != stack_start) {
8352 handle_stack_args (cfg, stack_start, sp - stack_start);
8354 CHECK_UNVERIFIABLE (cfg);
8357 if (COMPILE_LLVM (cfg)) {
8358 dreg = alloc_ireg (cfg);
8359 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8360 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8362 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8364 /* The JIT can't eliminate the iconst+compare */
8365 MONO_INST_NEW (cfg, ins, OP_BR);
8366 ins->inst_target_bb = is_true ? true_bb : false_bb;
8367 MONO_ADD_INS (cfg->cbb, ins);
8370 start_new_bblock = 1;
8374 *sp++ = handle_box (cfg, val, klass, context_used);
8376 CHECK_CFG_EXCEPTION;
8385 token = read32 (ip + 1);
8386 klass = mini_get_class (method, token, generic_context);
8387 CHECK_TYPELOAD (klass);
8389 mono_save_token_info (cfg, image, token, klass);
8391 if (cfg->generic_sharing_context)
8392 context_used = mono_class_check_context_used (klass);
8394 if (mono_class_is_nullable (klass)) {
8397 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8398 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8402 ins = handle_unbox (cfg, klass, sp, context_used);
8412 MonoClassField *field;
8416 if (*ip == CEE_STFLD) {
8423 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8425 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8428 token = read32 (ip + 1);
8429 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8430 field = mono_method_get_wrapper_data (method, token);
8431 klass = field->parent;
8434 field = mono_field_from_token (image, token, &klass, generic_context);
8438 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8439 FIELD_ACCESS_FAILURE;
8440 mono_class_init (klass);
8442 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8443 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8444 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8445 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8448 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8449 if (*ip == CEE_STFLD) {
8450 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8452 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8453 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8454 MonoInst *iargs [5];
8457 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8458 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8459 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8463 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8464 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8465 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8466 CHECK_CFG_EXCEPTION;
8467 g_assert (costs > 0);
8469 cfg->real_offset += 5;
8472 inline_costs += costs;
8474 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8479 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8481 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8482 if (sp [0]->opcode != OP_LDADDR)
8483 store->flags |= MONO_INST_FAULT;
8485 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8486 /* insert call to write barrier */
8490 dreg = alloc_preg (cfg);
8491 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8492 emit_write_barrier (cfg, ptr, sp [1], -1);
8495 store->flags |= ins_flag;
8502 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8503 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8504 MonoInst *iargs [4];
8507 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8508 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8509 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8510 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8511 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8512 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8513 CHECK_CFG_EXCEPTION;
8515 g_assert (costs > 0);
8517 cfg->real_offset += 5;
8521 inline_costs += costs;
8523 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8527 if (sp [0]->type == STACK_VTYPE) {
8530 /* Have to compute the address of the variable */
8532 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8534 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8536 g_assert (var->klass == klass);
8538 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8542 if (*ip == CEE_LDFLDA) {
8543 if (sp [0]->type == STACK_OBJ) {
8544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8545 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8548 dreg = alloc_preg (cfg);
8550 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8551 ins->klass = mono_class_from_mono_type (field->type);
8552 ins->type = STACK_MP;
8557 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8559 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8560 load->flags |= ins_flag;
8561 if (sp [0]->opcode != OP_LDADDR)
8562 load->flags |= MONO_INST_FAULT;
8573 MonoClassField *field;
8574 gpointer addr = NULL;
8575 gboolean is_special_static;
8579 token = read32 (ip + 1);
8581 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8582 field = mono_method_get_wrapper_data (method, token);
8583 klass = field->parent;
8586 field = mono_field_from_token (image, token, &klass, generic_context);
8589 mono_class_init (klass);
8590 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8591 FIELD_ACCESS_FAILURE;
8593 /* if the class is Critical then transparent code cannot access it's fields */
8594 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8595 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8598 * We can only support shared generic static
8599 * field access on architectures where the
8600 * trampoline code has been extended to handle
8601 * the generic class init.
8603 #ifndef MONO_ARCH_VTABLE_REG
8604 GENERIC_SHARING_FAILURE (*ip);
8607 if (cfg->generic_sharing_context)
8608 context_used = mono_class_check_context_used (klass);
8610 ftype = mono_field_get_type (field);
8612 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8614 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8615 * to be called here.
8617 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8618 mono_class_vtable (cfg->domain, klass);
8619 CHECK_TYPELOAD (klass);
8621 mono_domain_lock (cfg->domain);
8622 if (cfg->domain->special_static_fields)
8623 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8624 mono_domain_unlock (cfg->domain);
8626 is_special_static = mono_class_field_is_special_static (field);
8628 /* Generate IR to compute the field address */
8629 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8631 * Fast access to TLS data
8632 * Inline version of get_thread_static_data () in
8636 int idx, static_data_reg, array_reg, dreg;
8637 MonoInst *thread_ins;
8639 // offset &= 0x7fffffff;
8640 // idx = (offset >> 24) - 1;
8641 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8643 thread_ins = mono_get_thread_intrinsic (cfg);
8644 MONO_ADD_INS (cfg->cbb, thread_ins);
8645 static_data_reg = alloc_ireg (cfg);
8646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8648 if (cfg->compile_aot) {
8649 int offset_reg, offset2_reg, idx_reg;
8651 /* For TLS variables, this will return the TLS offset */
8652 EMIT_NEW_SFLDACONST (cfg, ins, field);
8653 offset_reg = ins->dreg;
8654 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8655 idx_reg = alloc_ireg (cfg);
8656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8657 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8659 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8660 array_reg = alloc_ireg (cfg);
8661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8662 offset2_reg = alloc_ireg (cfg);
8663 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8664 dreg = alloc_ireg (cfg);
8665 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8667 offset = (gsize)addr & 0x7fffffff;
8668 idx = (offset >> 24) - 1;
8670 array_reg = alloc_ireg (cfg);
8671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8672 dreg = alloc_ireg (cfg);
8673 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8675 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8676 (cfg->compile_aot && is_special_static) ||
8677 (context_used && is_special_static)) {
8678 MonoInst *iargs [2];
8680 g_assert (field->parent);
8681 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8683 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8684 field, MONO_RGCTX_INFO_CLASS_FIELD);
8686 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8688 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8689 } else if (context_used) {
8690 MonoInst *static_data;
8693 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8694 method->klass->name_space, method->klass->name, method->name,
8695 depth, field->offset);
8698 if (mono_class_needs_cctor_run (klass, method))
8699 emit_generic_class_init (cfg, klass);
8702 * The pointer we're computing here is
8704 * super_info.static_data + field->offset
8706 static_data = emit_get_rgctx_klass (cfg, context_used,
8707 klass, MONO_RGCTX_INFO_STATIC_DATA);
8709 if (field->offset == 0) {
8712 int addr_reg = mono_alloc_preg (cfg);
8713 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8715 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8716 MonoInst *iargs [2];
8718 g_assert (field->parent);
8719 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8720 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8721 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8723 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8725 CHECK_TYPELOAD (klass);
8727 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8728 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8729 if (cfg->verbose_level > 2)
8730 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8731 class_inits = g_slist_prepend (class_inits, vtable);
8733 if (cfg->run_cctors) {
8735 /* This makes so that inline cannot trigger */
8736 /* .cctors: too many apps depend on them */
8737 /* running with a specific order... */
8738 if (! vtable->initialized)
8740 ex = mono_runtime_class_init_full (vtable, FALSE);
8742 set_exception_object (cfg, ex);
8743 goto exception_exit;
8747 addr = (char*)vtable->data + field->offset;
8749 if (cfg->compile_aot)
8750 EMIT_NEW_SFLDACONST (cfg, ins, field);
8752 EMIT_NEW_PCONST (cfg, ins, addr);
8754 MonoInst *iargs [1];
8755 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8756 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8760 /* Generate IR to do the actual load/store operation */
8762 if (*ip == CEE_LDSFLDA) {
8763 ins->klass = mono_class_from_mono_type (ftype);
8764 ins->type = STACK_PTR;
8766 } else if (*ip == CEE_STSFLD) {
8771 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8772 store->flags |= ins_flag;
8774 gboolean is_const = FALSE;
8775 MonoVTable *vtable = NULL;
8777 if (!context_used) {
8778 vtable = mono_class_vtable (cfg->domain, klass);
8779 CHECK_TYPELOAD (klass);
8781 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8782 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8783 gpointer addr = (char*)vtable->data + field->offset;
8784 int ro_type = ftype->type;
8785 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8786 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8788 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8791 case MONO_TYPE_BOOLEAN:
8793 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8797 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8800 case MONO_TYPE_CHAR:
8802 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8806 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8811 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8815 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8821 case MONO_TYPE_FNPTR:
8822 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8823 type_to_eval_stack_type ((cfg), field->type, *sp);
8826 case MONO_TYPE_STRING:
8827 case MONO_TYPE_OBJECT:
8828 case MONO_TYPE_CLASS:
8829 case MONO_TYPE_SZARRAY:
8830 case MONO_TYPE_ARRAY:
8831 if (!mono_gc_is_moving ()) {
8832 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8833 type_to_eval_stack_type ((cfg), field->type, *sp);
8841 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8846 case MONO_TYPE_VALUETYPE:
8856 CHECK_STACK_OVF (1);
8858 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8859 load->flags |= ins_flag;
8872 token = read32 (ip + 1);
8873 klass = mini_get_class (method, token, generic_context);
8874 CHECK_TYPELOAD (klass);
8875 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8876 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8877 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8878 generic_class_is_reference_type (cfg, klass)) {
8879 /* insert call to write barrier */
8880 emit_write_barrier (cfg, sp [0], sp [1], -1);
8892 const char *data_ptr;
8894 guint32 field_token;
8900 token = read32 (ip + 1);
8902 klass = mini_get_class (method, token, generic_context);
8903 CHECK_TYPELOAD (klass);
8905 if (cfg->generic_sharing_context)
8906 context_used = mono_class_check_context_used (klass);
8908 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8909 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8910 ins->sreg1 = sp [0]->dreg;
8911 ins->type = STACK_I4;
8912 ins->dreg = alloc_ireg (cfg);
8913 MONO_ADD_INS (cfg->cbb, ins);
8914 *sp = mono_decompose_opcode (cfg, ins);
8919 MonoClass *array_class = mono_array_class_get (klass, 1);
8920 /* FIXME: we cannot get a managed
8921 allocator because we can't get the
8922 open generic class's vtable. We
8923 have the same problem in
8924 handle_alloc(). This
8925 needs to be solved so that we can
8926 have managed allocs of shared
8929 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8930 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8932 MonoMethod *managed_alloc = NULL;
8934 /* FIXME: Decompose later to help abcrem */
8937 args [0] = emit_get_rgctx_klass (cfg, context_used,
8938 array_class, MONO_RGCTX_INFO_VTABLE);
8943 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8945 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8947 if (cfg->opt & MONO_OPT_SHARED) {
8948 /* Decompose now to avoid problems with references to the domainvar */
8949 MonoInst *iargs [3];
8951 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8952 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8955 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8957 /* Decompose later since it is needed by abcrem */
8958 MonoClass *array_type = mono_array_class_get (klass, 1);
8959 mono_class_vtable (cfg->domain, array_type);
8960 CHECK_TYPELOAD (array_type);
8962 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8963 ins->dreg = alloc_preg (cfg);
8964 ins->sreg1 = sp [0]->dreg;
8965 ins->inst_newa_class = klass;
8966 ins->type = STACK_OBJ;
8968 MONO_ADD_INS (cfg->cbb, ins);
8969 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8970 cfg->cbb->has_array_access = TRUE;
8972 /* Needed so mono_emit_load_get_addr () gets called */
8973 mono_get_got_var (cfg);
8983 * we inline/optimize the initialization sequence if possible.
8984 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8985 * for small sizes open code the memcpy
8986 * ensure the rva field is big enough
8988 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8989 MonoMethod *memcpy_method = get_memcpy_method ();
8990 MonoInst *iargs [3];
8991 int add_reg = alloc_preg (cfg);
8993 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8994 if (cfg->compile_aot) {
8995 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8997 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8999 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9000 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9009 if (sp [0]->type != STACK_OBJ)
9012 dreg = alloc_preg (cfg);
9013 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9014 ins->dreg = alloc_preg (cfg);
9015 ins->sreg1 = sp [0]->dreg;
9016 ins->type = STACK_I4;
9017 /* This flag will be inherited by the decomposition */
9018 ins->flags |= MONO_INST_FAULT;
9019 MONO_ADD_INS (cfg->cbb, ins);
9020 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9021 cfg->cbb->has_array_access = TRUE;
9029 if (sp [0]->type != STACK_OBJ)
9032 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9034 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9035 CHECK_TYPELOAD (klass);
9036 /* we need to make sure that this array is exactly the type it needs
9037 * to be for correctness. the wrappers are lax with their usage
9038 * so we need to ignore them here
9040 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9041 MonoClass *array_class = mono_array_class_get (klass, 1);
9042 mini_emit_check_array_type (cfg, sp [0], array_class);
9043 CHECK_TYPELOAD (array_class);
9047 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9062 case CEE_LDELEM_REF: {
9068 if (*ip == CEE_LDELEM) {
9070 token = read32 (ip + 1);
9071 klass = mini_get_class (method, token, generic_context);
9072 CHECK_TYPELOAD (klass);
9073 mono_class_init (klass);
9076 klass = array_access_to_klass (*ip);
9078 if (sp [0]->type != STACK_OBJ)
9081 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9083 if (sp [1]->opcode == OP_ICONST) {
9084 int array_reg = sp [0]->dreg;
9085 int index_reg = sp [1]->dreg;
9086 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9088 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9089 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9091 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9092 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9095 if (*ip == CEE_LDELEM)
9108 case CEE_STELEM_REF:
9115 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9117 if (*ip == CEE_STELEM) {
9119 token = read32 (ip + 1);
9120 klass = mini_get_class (method, token, generic_context);
9121 CHECK_TYPELOAD (klass);
9122 mono_class_init (klass);
9125 klass = array_access_to_klass (*ip);
9127 if (sp [0]->type != STACK_OBJ)
9130 /* storing a NULL doesn't need any of the complex checks in stelemref */
9131 if (generic_class_is_reference_type (cfg, klass) &&
9132 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9133 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9134 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9135 MonoInst *iargs [3];
9138 mono_class_setup_vtable (obj_array);
9139 g_assert (helper->slot);
9141 if (sp [0]->type != STACK_OBJ)
9143 if (sp [2]->type != STACK_OBJ)
9150 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9152 if (sp [1]->opcode == OP_ICONST) {
9153 int array_reg = sp [0]->dreg;
9154 int index_reg = sp [1]->dreg;
9155 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9157 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9158 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9160 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9161 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9165 if (*ip == CEE_STELEM)
9172 case CEE_CKFINITE: {
9176 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9177 ins->sreg1 = sp [0]->dreg;
9178 ins->dreg = alloc_freg (cfg);
9179 ins->type = STACK_R8;
9180 MONO_ADD_INS (bblock, ins);
9182 *sp++ = mono_decompose_opcode (cfg, ins);
9187 case CEE_REFANYVAL: {
9188 MonoInst *src_var, *src;
9190 int klass_reg = alloc_preg (cfg);
9191 int dreg = alloc_preg (cfg);
9194 MONO_INST_NEW (cfg, ins, *ip);
9197 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9198 CHECK_TYPELOAD (klass);
9199 mono_class_init (klass);
9201 if (cfg->generic_sharing_context)
9202 context_used = mono_class_check_context_used (klass);
9205 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9207 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9208 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9212 MonoInst *klass_ins;
9214 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9215 klass, MONO_RGCTX_INFO_KLASS);
9218 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9219 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9221 mini_emit_class_check (cfg, klass_reg, klass);
9223 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9224 ins->type = STACK_MP;
9229 case CEE_MKREFANY: {
9230 MonoInst *loc, *addr;
9233 MONO_INST_NEW (cfg, ins, *ip);
9236 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9237 CHECK_TYPELOAD (klass);
9238 mono_class_init (klass);
9240 if (cfg->generic_sharing_context)
9241 context_used = mono_class_check_context_used (klass);
9243 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9244 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9247 MonoInst *const_ins;
9248 int type_reg = alloc_preg (cfg);
9250 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9251 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9252 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9253 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9254 } else if (cfg->compile_aot) {
9255 int const_reg = alloc_preg (cfg);
9256 int type_reg = alloc_preg (cfg);
9258 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9259 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9260 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9261 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9263 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9264 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9268 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9269 ins->type = STACK_VTYPE;
9270 ins->klass = mono_defaults.typed_reference_class;
9277 MonoClass *handle_class;
9279 CHECK_STACK_OVF (1);
9282 n = read32 (ip + 1);
9284 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9285 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9286 handle = mono_method_get_wrapper_data (method, n);
9287 handle_class = mono_method_get_wrapper_data (method, n + 1);
9288 if (handle_class == mono_defaults.typehandle_class)
9289 handle = &((MonoClass*)handle)->byval_arg;
9292 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9296 mono_class_init (handle_class);
9297 if (cfg->generic_sharing_context) {
9298 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9299 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9300 /* This case handles ldtoken
9301 of an open type, like for
9304 } else if (handle_class == mono_defaults.typehandle_class) {
9305 /* If we get a MONO_TYPE_CLASS
9306 then we need to provide the
9308 instantiation of it. */
9309 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9312 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9313 } else if (handle_class == mono_defaults.fieldhandle_class)
9314 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9315 else if (handle_class == mono_defaults.methodhandle_class)
9316 context_used = mono_method_check_context_used (handle);
9318 g_assert_not_reached ();
9321 if ((cfg->opt & MONO_OPT_SHARED) &&
9322 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9323 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9324 MonoInst *addr, *vtvar, *iargs [3];
9325 int method_context_used;
9327 if (cfg->generic_sharing_context)
9328 method_context_used = mono_method_check_context_used (method);
9330 method_context_used = 0;
9332 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9334 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9335 EMIT_NEW_ICONST (cfg, iargs [1], n);
9336 if (method_context_used) {
9337 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9338 method, MONO_RGCTX_INFO_METHOD);
9339 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9341 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9342 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9344 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9348 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9350 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9351 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9352 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9353 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9354 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9355 MonoClass *tclass = mono_class_from_mono_type (handle);
9357 mono_class_init (tclass);
9359 ins = emit_get_rgctx_klass (cfg, context_used,
9360 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9361 } else if (cfg->compile_aot) {
9362 if (method->wrapper_type) {
9363 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9364 /* Special case for static synchronized wrappers */
9365 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9367 /* FIXME: n is not a normal token */
9368 cfg->disable_aot = TRUE;
9369 EMIT_NEW_PCONST (cfg, ins, NULL);
9372 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9375 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9377 ins->type = STACK_OBJ;
9378 ins->klass = cmethod->klass;
9381 MonoInst *addr, *vtvar;
9383 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9386 if (handle_class == mono_defaults.typehandle_class) {
9387 ins = emit_get_rgctx_klass (cfg, context_used,
9388 mono_class_from_mono_type (handle),
9389 MONO_RGCTX_INFO_TYPE);
9390 } else if (handle_class == mono_defaults.methodhandle_class) {
9391 ins = emit_get_rgctx_method (cfg, context_used,
9392 handle, MONO_RGCTX_INFO_METHOD);
9393 } else if (handle_class == mono_defaults.fieldhandle_class) {
9394 ins = emit_get_rgctx_field (cfg, context_used,
9395 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9397 g_assert_not_reached ();
9399 } else if (cfg->compile_aot) {
9400 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9402 EMIT_NEW_PCONST (cfg, ins, handle);
9404 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9405 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9406 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9416 MONO_INST_NEW (cfg, ins, OP_THROW);
9418 ins->sreg1 = sp [0]->dreg;
9420 bblock->out_of_line = TRUE;
9421 MONO_ADD_INS (bblock, ins);
9422 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9423 MONO_ADD_INS (bblock, ins);
9426 link_bblock (cfg, bblock, end_bblock);
9427 start_new_bblock = 1;
9429 case CEE_ENDFINALLY:
9430 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9431 MONO_ADD_INS (bblock, ins);
9433 start_new_bblock = 1;
9436 * Control will leave the method so empty the stack, otherwise
9437 * the next basic block will start with a nonempty stack.
9439 while (sp != stack_start) {
9447 if (*ip == CEE_LEAVE) {
9449 target = ip + 5 + (gint32)read32(ip + 1);
9452 target = ip + 2 + (signed char)(ip [1]);
9455 /* empty the stack */
9456 while (sp != stack_start) {
9461 * If this leave statement is in a catch block, check for a
9462 * pending exception, and rethrow it if necessary.
9463 * We avoid doing this in runtime invoke wrappers, since those are called
9464 * by native code which excepts the wrapper to catch all exceptions.
9466 for (i = 0; i < header->num_clauses; ++i) {
9467 MonoExceptionClause *clause = &header->clauses [i];
9470 * Use <= in the final comparison to handle clauses with multiple
9471 * leave statements, like in bug #78024.
9472 * The ordering of the exception clauses guarantees that we find the
9475 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9477 MonoBasicBlock *dont_throw;
9482 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9485 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9487 NEW_BBLOCK (cfg, dont_throw);
9490 * Currently, we always rethrow the abort exception, despite the
9491 * fact that this is not correct. See thread6.cs for an example.
9492 * But propagating the abort exception is more important than
9493 * getting the sematics right.
9495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9497 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9499 MONO_START_BB (cfg, dont_throw);
9504 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9506 MonoExceptionClause *clause;
9508 for (tmp = handlers; tmp; tmp = tmp->next) {
9510 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9512 link_bblock (cfg, bblock, tblock);
9513 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9514 ins->inst_target_bb = tblock;
9515 ins->inst_eh_block = clause;
9516 MONO_ADD_INS (bblock, ins);
9517 bblock->has_call_handler = 1;
9518 if (COMPILE_LLVM (cfg)) {
9519 MonoBasicBlock *target_bb;
9522 * Link the finally bblock with the target, since it will
9523 * conceptually branch there.
9524 * FIXME: Have to link the bblock containing the endfinally.
9526 GET_BBLOCK (cfg, target_bb, target);
9527 link_bblock (cfg, tblock, target_bb);
9530 g_list_free (handlers);
9533 MONO_INST_NEW (cfg, ins, OP_BR);
9534 MONO_ADD_INS (bblock, ins);
9535 GET_BBLOCK (cfg, tblock, target);
9536 link_bblock (cfg, bblock, tblock);
9537 ins->inst_target_bb = tblock;
9538 start_new_bblock = 1;
9540 if (*ip == CEE_LEAVE)
9549 * Mono specific opcodes
9551 case MONO_CUSTOM_PREFIX: {
9553 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9557 case CEE_MONO_ICALL: {
9559 MonoJitICallInfo *info;
9561 token = read32 (ip + 2);
9562 func = mono_method_get_wrapper_data (method, token);
9563 info = mono_find_jit_icall_by_addr (func);
9566 CHECK_STACK (info->sig->param_count);
9567 sp -= info->sig->param_count;
9569 ins = mono_emit_jit_icall (cfg, info->func, sp);
9570 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9574 inline_costs += 10 * num_calls++;
9578 case CEE_MONO_LDPTR: {
9581 CHECK_STACK_OVF (1);
9583 token = read32 (ip + 2);
9585 ptr = mono_method_get_wrapper_data (method, token);
9586 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9587 MonoJitICallInfo *callinfo;
9588 const char *icall_name;
9590 icall_name = method->name + strlen ("__icall_wrapper_");
9591 g_assert (icall_name);
9592 callinfo = mono_find_jit_icall_by_name (icall_name);
9593 g_assert (callinfo);
9595 if (ptr == callinfo->func) {
9596 /* Will be transformed into an AOTCONST later */
9597 EMIT_NEW_PCONST (cfg, ins, ptr);
9603 /* FIXME: Generalize this */
9604 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9605 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9610 EMIT_NEW_PCONST (cfg, ins, ptr);
9613 inline_costs += 10 * num_calls++;
9614 /* Can't embed random pointers into AOT code */
9615 cfg->disable_aot = 1;
9618 case CEE_MONO_ICALL_ADDR: {
9619 MonoMethod *cmethod;
9622 CHECK_STACK_OVF (1);
9624 token = read32 (ip + 2);
9626 cmethod = mono_method_get_wrapper_data (method, token);
9628 if (cfg->compile_aot) {
9629 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9631 ptr = mono_lookup_internal_call (cmethod);
9633 EMIT_NEW_PCONST (cfg, ins, ptr);
9639 case CEE_MONO_VTADDR: {
9640 MonoInst *src_var, *src;
9646 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9647 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9652 case CEE_MONO_NEWOBJ: {
9653 MonoInst *iargs [2];
9655 CHECK_STACK_OVF (1);
9657 token = read32 (ip + 2);
9658 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9659 mono_class_init (klass);
9660 NEW_DOMAINCONST (cfg, iargs [0]);
9661 MONO_ADD_INS (cfg->cbb, iargs [0]);
9662 NEW_CLASSCONST (cfg, iargs [1], klass);
9663 MONO_ADD_INS (cfg->cbb, iargs [1]);
9664 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9666 inline_costs += 10 * num_calls++;
9669 case CEE_MONO_OBJADDR:
9672 MONO_INST_NEW (cfg, ins, OP_MOVE);
9673 ins->dreg = alloc_preg (cfg);
9674 ins->sreg1 = sp [0]->dreg;
9675 ins->type = STACK_MP;
9676 MONO_ADD_INS (cfg->cbb, ins);
9680 case CEE_MONO_LDNATIVEOBJ:
9682 * Similar to LDOBJ, but instead load the unmanaged
9683 * representation of the vtype to the stack.
9688 token = read32 (ip + 2);
9689 klass = mono_method_get_wrapper_data (method, token);
9690 g_assert (klass->valuetype);
9691 mono_class_init (klass);
9694 MonoInst *src, *dest, *temp;
9697 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9698 temp->backend.is_pinvoke = 1;
9699 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9700 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9702 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9703 dest->type = STACK_VTYPE;
9704 dest->klass = klass;
9710 case CEE_MONO_RETOBJ: {
9712 * Same as RET, but return the native representation of a vtype
9715 g_assert (cfg->ret);
9716 g_assert (mono_method_signature (method)->pinvoke);
9721 token = read32 (ip + 2);
9722 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9724 if (!cfg->vret_addr) {
9725 g_assert (cfg->ret_var_is_local);
9727 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9729 EMIT_NEW_RETLOADA (cfg, ins);
9731 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9733 if (sp != stack_start)
9736 MONO_INST_NEW (cfg, ins, OP_BR);
9737 ins->inst_target_bb = end_bblock;
9738 MONO_ADD_INS (bblock, ins);
9739 link_bblock (cfg, bblock, end_bblock);
9740 start_new_bblock = 1;
9744 case CEE_MONO_CISINST:
9745 case CEE_MONO_CCASTCLASS: {
9750 token = read32 (ip + 2);
9751 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9752 if (ip [1] == CEE_MONO_CISINST)
9753 ins = handle_cisinst (cfg, klass, sp [0]);
9755 ins = handle_ccastclass (cfg, klass, sp [0]);
9761 case CEE_MONO_SAVE_LMF:
9762 case CEE_MONO_RESTORE_LMF:
9763 #ifdef MONO_ARCH_HAVE_LMF_OPS
9764 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9765 MONO_ADD_INS (bblock, ins);
9766 cfg->need_lmf_area = TRUE;
9770 case CEE_MONO_CLASSCONST:
9771 CHECK_STACK_OVF (1);
9773 token = read32 (ip + 2);
9774 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9777 inline_costs += 10 * num_calls++;
9779 case CEE_MONO_NOT_TAKEN:
9780 bblock->out_of_line = TRUE;
9784 CHECK_STACK_OVF (1);
9786 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9787 ins->dreg = alloc_preg (cfg);
9788 ins->inst_offset = (gint32)read32 (ip + 2);
9789 ins->type = STACK_PTR;
9790 MONO_ADD_INS (bblock, ins);
9794 case CEE_MONO_DYN_CALL: {
9797 /* It would be easier to call a trampoline, but that would put an
9798 * extra frame on the stack, confusing exception handling. So
9799 * implement it inline using an opcode for now.
9802 if (!cfg->dyn_call_var) {
9803 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9804 /* prevent it from being register allocated */
9805 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9808 /* Has to use a call inst since it local regalloc expects it */
9809 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9810 ins = (MonoInst*)call;
9812 ins->sreg1 = sp [0]->dreg;
9813 ins->sreg2 = sp [1]->dreg;
9814 MONO_ADD_INS (bblock, ins);
9816 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9817 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9821 inline_costs += 10 * num_calls++;
9826 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9836 /* somewhat similar to LDTOKEN */
9837 MonoInst *addr, *vtvar;
9838 CHECK_STACK_OVF (1);
9839 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9841 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9842 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9844 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9845 ins->type = STACK_VTYPE;
9846 ins->klass = mono_defaults.argumenthandle_class;
9859 * The following transforms:
9860 * CEE_CEQ into OP_CEQ
9861 * CEE_CGT into OP_CGT
9862 * CEE_CGT_UN into OP_CGT_UN
9863 * CEE_CLT into OP_CLT
9864 * CEE_CLT_UN into OP_CLT_UN
9866 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9868 MONO_INST_NEW (cfg, ins, cmp->opcode);
9870 cmp->sreg1 = sp [0]->dreg;
9871 cmp->sreg2 = sp [1]->dreg;
9872 type_from_op (cmp, sp [0], sp [1]);
9874 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9875 cmp->opcode = OP_LCOMPARE;
9876 else if (sp [0]->type == STACK_R8)
9877 cmp->opcode = OP_FCOMPARE;
9879 cmp->opcode = OP_ICOMPARE;
9880 MONO_ADD_INS (bblock, cmp);
9881 ins->type = STACK_I4;
9882 ins->dreg = alloc_dreg (cfg, ins->type);
9883 type_from_op (ins, sp [0], sp [1]);
9885 if (cmp->opcode == OP_FCOMPARE) {
9887 * The backends expect the fceq opcodes to do the
9890 cmp->opcode = OP_NOP;
9891 ins->sreg1 = cmp->sreg1;
9892 ins->sreg2 = cmp->sreg2;
9894 MONO_ADD_INS (bblock, ins);
9901 MonoMethod *cil_method;
9902 gboolean needs_static_rgctx_invoke;
9904 CHECK_STACK_OVF (1);
9906 n = read32 (ip + 2);
9907 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9908 if (!cmethod || mono_loader_get_last_error ())
9910 mono_class_init (cmethod->klass);
9912 mono_save_token_info (cfg, image, n, cmethod);
9914 if (cfg->generic_sharing_context)
9915 context_used = mono_method_check_context_used (cmethod);
9917 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9919 cil_method = cmethod;
9920 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9921 METHOD_ACCESS_FAILURE;
9923 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9924 if (check_linkdemand (cfg, method, cmethod))
9926 CHECK_CFG_EXCEPTION;
9927 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9928 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9932 * Optimize the common case of ldftn+delegate creation
9934 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9935 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9936 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9937 MonoInst *target_ins;
9939 int invoke_context_used = 0;
9941 invoke = mono_get_delegate_invoke (ctor_method->klass);
9942 if (!invoke || !mono_method_signature (invoke))
9945 if (cfg->generic_sharing_context)
9946 invoke_context_used = mono_method_check_context_used (invoke);
9948 target_ins = sp [-1];
9950 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9951 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9952 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9953 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9954 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9958 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9959 /* FIXME: SGEN support */
9960 if (invoke_context_used == 0) {
9962 if (cfg->verbose_level > 3)
9963 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9965 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9966 CHECK_CFG_EXCEPTION;
9975 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9976 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9980 inline_costs += 10 * num_calls++;
9983 case CEE_LDVIRTFTN: {
9988 n = read32 (ip + 2);
9989 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9990 if (!cmethod || mono_loader_get_last_error ())
9992 mono_class_init (cmethod->klass);
9994 if (cfg->generic_sharing_context)
9995 context_used = mono_method_check_context_used (cmethod);
9997 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9998 if (check_linkdemand (cfg, method, cmethod))
10000 CHECK_CFG_EXCEPTION;
10001 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10002 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10008 args [1] = emit_get_rgctx_method (cfg, context_used,
10009 cmethod, MONO_RGCTX_INFO_METHOD);
10012 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10014 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10017 inline_costs += 10 * num_calls++;
10021 CHECK_STACK_OVF (1);
10023 n = read16 (ip + 2);
10025 EMIT_NEW_ARGLOAD (cfg, ins, n);
10030 CHECK_STACK_OVF (1);
10032 n = read16 (ip + 2);
10034 NEW_ARGLOADA (cfg, ins, n);
10035 MONO_ADD_INS (cfg->cbb, ins);
10043 n = read16 (ip + 2);
10045 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10047 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10051 CHECK_STACK_OVF (1);
10053 n = read16 (ip + 2);
10055 EMIT_NEW_LOCLOAD (cfg, ins, n);
10060 unsigned char *tmp_ip;
10061 CHECK_STACK_OVF (1);
10063 n = read16 (ip + 2);
10066 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10072 EMIT_NEW_LOCLOADA (cfg, ins, n);
10081 n = read16 (ip + 2);
10083 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10085 emit_stloc_ir (cfg, sp, header, n);
10092 if (sp != stack_start)
10094 if (cfg->method != method)
10096 * Inlining this into a loop in a parent could lead to
10097 * stack overflows which is different behavior than the
10098 * non-inlined case, thus disable inlining in this case.
10100 goto inline_failure;
10102 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10103 ins->dreg = alloc_preg (cfg);
10104 ins->sreg1 = sp [0]->dreg;
10105 ins->type = STACK_PTR;
10106 MONO_ADD_INS (cfg->cbb, ins);
10108 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10110 ins->flags |= MONO_INST_INIT;
10115 case CEE_ENDFILTER: {
10116 MonoExceptionClause *clause, *nearest;
10117 int cc, nearest_num;
10121 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10123 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10124 ins->sreg1 = (*sp)->dreg;
10125 MONO_ADD_INS (bblock, ins);
10126 start_new_bblock = 1;
10131 for (cc = 0; cc < header->num_clauses; ++cc) {
10132 clause = &header->clauses [cc];
10133 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10134 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10135 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10140 g_assert (nearest);
10141 if ((ip - header->code) != nearest->handler_offset)
10146 case CEE_UNALIGNED_:
10147 ins_flag |= MONO_INST_UNALIGNED;
10148 /* FIXME: record alignment? we can assume 1 for now */
10152 case CEE_VOLATILE_:
10153 ins_flag |= MONO_INST_VOLATILE;
10157 ins_flag |= MONO_INST_TAILCALL;
10158 cfg->flags |= MONO_CFG_HAS_TAIL;
10159 /* Can't inline tail calls at this time */
10160 inline_costs += 100000;
10167 token = read32 (ip + 2);
10168 klass = mini_get_class (method, token, generic_context);
10169 CHECK_TYPELOAD (klass);
10170 if (generic_class_is_reference_type (cfg, klass))
10171 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10173 mini_emit_initobj (cfg, *sp, NULL, klass);
10177 case CEE_CONSTRAINED_:
10179 token = read32 (ip + 2);
10180 if (method->wrapper_type != MONO_WRAPPER_NONE)
10181 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10183 constrained_call = mono_class_get_full (image, token, generic_context);
10184 CHECK_TYPELOAD (constrained_call);
10188 case CEE_INITBLK: {
10189 MonoInst *iargs [3];
10193 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10194 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10195 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10196 /* emit_memset only works when val == 0 */
10197 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10199 iargs [0] = sp [0];
10200 iargs [1] = sp [1];
10201 iargs [2] = sp [2];
10202 if (ip [1] == CEE_CPBLK) {
10203 MonoMethod *memcpy_method = get_memcpy_method ();
10204 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10206 MonoMethod *memset_method = get_memset_method ();
10207 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10217 ins_flag |= MONO_INST_NOTYPECHECK;
10219 ins_flag |= MONO_INST_NORANGECHECK;
10220 /* we ignore the no-nullcheck for now since we
10221 * really do it explicitly only when doing callvirt->call
10225 case CEE_RETHROW: {
10227 int handler_offset = -1;
10229 for (i = 0; i < header->num_clauses; ++i) {
10230 MonoExceptionClause *clause = &header->clauses [i];
10231 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10232 handler_offset = clause->handler_offset;
10237 bblock->flags |= BB_EXCEPTION_UNSAFE;
10239 g_assert (handler_offset != -1);
10241 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10242 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10243 ins->sreg1 = load->dreg;
10244 MONO_ADD_INS (bblock, ins);
10246 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10247 MONO_ADD_INS (bblock, ins);
10250 link_bblock (cfg, bblock, end_bblock);
10251 start_new_bblock = 1;
10259 CHECK_STACK_OVF (1);
10261 token = read32 (ip + 2);
10262 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10263 MonoType *type = mono_type_create_from_typespec (image, token);
10264 token = mono_type_size (type, &ialign);
10266 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10267 CHECK_TYPELOAD (klass);
10268 mono_class_init (klass);
10269 token = mono_class_value_size (klass, &align);
10271 EMIT_NEW_ICONST (cfg, ins, token);
10276 case CEE_REFANYTYPE: {
10277 MonoInst *src_var, *src;
10283 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10285 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10286 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10287 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10292 case CEE_READONLY_:
10305 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10315 g_warning ("opcode 0x%02x not handled", *ip);
10319 if (start_new_bblock != 1)
10322 bblock->cil_length = ip - bblock->cil_code;
10323 bblock->next_bb = end_bblock;
10325 if (cfg->method == method && cfg->domainvar) {
10327 MonoInst *get_domain;
10329 cfg->cbb = init_localsbb;
10331 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10332 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10335 get_domain->dreg = alloc_preg (cfg);
10336 MONO_ADD_INS (cfg->cbb, get_domain);
10338 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10339 MONO_ADD_INS (cfg->cbb, store);
10342 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10343 if (cfg->compile_aot)
10344 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10345 mono_get_got_var (cfg);
10348 if (cfg->method == method && cfg->got_var)
10349 mono_emit_load_got_addr (cfg);
10354 cfg->cbb = init_localsbb;
10356 for (i = 0; i < header->num_locals; ++i) {
10357 MonoType *ptype = header->locals [i];
10358 int t = ptype->type;
10359 dreg = cfg->locals [i]->dreg;
10361 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10362 t = mono_class_enum_basetype (ptype->data.klass)->type;
10363 if (ptype->byref) {
10364 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10365 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10366 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10367 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10368 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10369 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10370 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10371 ins->type = STACK_R8;
10372 ins->inst_p0 = (void*)&r8_0;
10373 ins->dreg = alloc_dreg (cfg, STACK_R8);
10374 MONO_ADD_INS (init_localsbb, ins);
10375 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10376 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10377 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10378 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10380 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10385 if (cfg->init_ref_vars && cfg->method == method) {
10386 /* Emit initialization for ref vars */
10387 // FIXME: Avoid duplication initialization for IL locals.
10388 for (i = 0; i < cfg->num_varinfo; ++i) {
10389 MonoInst *ins = cfg->varinfo [i];
10391 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10392 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10396 /* Add a sequence point for method entry/exit events */
10398 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10399 MONO_ADD_INS (init_localsbb, ins);
10400 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10401 MONO_ADD_INS (cfg->bb_exit, ins);
10406 if (cfg->method == method) {
10407 MonoBasicBlock *bb;
10408 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10409 bb->region = mono_find_block_region (cfg, bb->real_offset);
10411 mono_create_spvar_for_region (cfg, bb->region);
10412 if (cfg->verbose_level > 2)
10413 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10417 g_slist_free (class_inits);
10418 dont_inline = g_list_remove (dont_inline, method);
10420 if (inline_costs < 0) {
10423 /* Method is too large */
10424 mname = mono_method_full_name (method, TRUE);
10425 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10426 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10428 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10429 mono_basic_block_free (original_bb);
10433 if ((cfg->verbose_level > 2) && (cfg->method == method))
10434 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10436 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10437 mono_basic_block_free (original_bb);
10438 return inline_costs;
10441 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10448 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10452 set_exception_type_from_invalid_il (cfg, method, ip);
10456 g_slist_free (class_inits);
10457 mono_basic_block_free (original_bb);
10458 dont_inline = g_list_remove (dont_inline, method);
10459 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10464 store_membase_reg_to_store_membase_imm (int opcode)
10467 case OP_STORE_MEMBASE_REG:
10468 return OP_STORE_MEMBASE_IMM;
10469 case OP_STOREI1_MEMBASE_REG:
10470 return OP_STOREI1_MEMBASE_IMM;
10471 case OP_STOREI2_MEMBASE_REG:
10472 return OP_STOREI2_MEMBASE_IMM;
10473 case OP_STOREI4_MEMBASE_REG:
10474 return OP_STOREI4_MEMBASE_IMM;
10475 case OP_STOREI8_MEMBASE_REG:
10476 return OP_STOREI8_MEMBASE_IMM;
10478 g_assert_not_reached ();
10484 #endif /* DISABLE_JIT */
10487 mono_op_to_op_imm (int opcode)
10491 return OP_IADD_IMM;
10493 return OP_ISUB_IMM;
10495 return OP_IDIV_IMM;
10497 return OP_IDIV_UN_IMM;
10499 return OP_IREM_IMM;
10501 return OP_IREM_UN_IMM;
10503 return OP_IMUL_IMM;
10505 return OP_IAND_IMM;
10509 return OP_IXOR_IMM;
10511 return OP_ISHL_IMM;
10513 return OP_ISHR_IMM;
10515 return OP_ISHR_UN_IMM;
10518 return OP_LADD_IMM;
10520 return OP_LSUB_IMM;
10522 return OP_LAND_IMM;
10526 return OP_LXOR_IMM;
10528 return OP_LSHL_IMM;
10530 return OP_LSHR_IMM;
10532 return OP_LSHR_UN_IMM;
10535 return OP_COMPARE_IMM;
10537 return OP_ICOMPARE_IMM;
10539 return OP_LCOMPARE_IMM;
10541 case OP_STORE_MEMBASE_REG:
10542 return OP_STORE_MEMBASE_IMM;
10543 case OP_STOREI1_MEMBASE_REG:
10544 return OP_STOREI1_MEMBASE_IMM;
10545 case OP_STOREI2_MEMBASE_REG:
10546 return OP_STOREI2_MEMBASE_IMM;
10547 case OP_STOREI4_MEMBASE_REG:
10548 return OP_STOREI4_MEMBASE_IMM;
10550 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10552 return OP_X86_PUSH_IMM;
10553 case OP_X86_COMPARE_MEMBASE_REG:
10554 return OP_X86_COMPARE_MEMBASE_IMM;
10556 #if defined(TARGET_AMD64)
10557 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10558 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10560 case OP_VOIDCALL_REG:
10561 return OP_VOIDCALL;
10569 return OP_LOCALLOC_IMM;
10576 ldind_to_load_membase (int opcode)
10580 return OP_LOADI1_MEMBASE;
10582 return OP_LOADU1_MEMBASE;
10584 return OP_LOADI2_MEMBASE;
10586 return OP_LOADU2_MEMBASE;
10588 return OP_LOADI4_MEMBASE;
10590 return OP_LOADU4_MEMBASE;
10592 return OP_LOAD_MEMBASE;
10593 case CEE_LDIND_REF:
10594 return OP_LOAD_MEMBASE;
10596 return OP_LOADI8_MEMBASE;
10598 return OP_LOADR4_MEMBASE;
10600 return OP_LOADR8_MEMBASE;
10602 g_assert_not_reached ();
10609 stind_to_store_membase (int opcode)
10613 return OP_STOREI1_MEMBASE_REG;
10615 return OP_STOREI2_MEMBASE_REG;
10617 return OP_STOREI4_MEMBASE_REG;
10619 case CEE_STIND_REF:
10620 return OP_STORE_MEMBASE_REG;
10622 return OP_STOREI8_MEMBASE_REG;
10624 return OP_STORER4_MEMBASE_REG;
10626 return OP_STORER8_MEMBASE_REG;
10628 g_assert_not_reached ();
10635 mono_load_membase_to_load_mem (int opcode)
10637 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10638 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10640 case OP_LOAD_MEMBASE:
10641 return OP_LOAD_MEM;
10642 case OP_LOADU1_MEMBASE:
10643 return OP_LOADU1_MEM;
10644 case OP_LOADU2_MEMBASE:
10645 return OP_LOADU2_MEM;
10646 case OP_LOADI4_MEMBASE:
10647 return OP_LOADI4_MEM;
10648 case OP_LOADU4_MEMBASE:
10649 return OP_LOADU4_MEM;
10650 #if SIZEOF_REGISTER == 8
10651 case OP_LOADI8_MEMBASE:
10652 return OP_LOADI8_MEM;
10661 op_to_op_dest_membase (int store_opcode, int opcode)
10663 #if defined(TARGET_X86)
10664 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10669 return OP_X86_ADD_MEMBASE_REG;
10671 return OP_X86_SUB_MEMBASE_REG;
10673 return OP_X86_AND_MEMBASE_REG;
10675 return OP_X86_OR_MEMBASE_REG;
10677 return OP_X86_XOR_MEMBASE_REG;
10680 return OP_X86_ADD_MEMBASE_IMM;
10683 return OP_X86_SUB_MEMBASE_IMM;
10686 return OP_X86_AND_MEMBASE_IMM;
10689 return OP_X86_OR_MEMBASE_IMM;
10692 return OP_X86_XOR_MEMBASE_IMM;
10698 #if defined(TARGET_AMD64)
10699 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10704 return OP_X86_ADD_MEMBASE_REG;
10706 return OP_X86_SUB_MEMBASE_REG;
10708 return OP_X86_AND_MEMBASE_REG;
10710 return OP_X86_OR_MEMBASE_REG;
10712 return OP_X86_XOR_MEMBASE_REG;
10714 return OP_X86_ADD_MEMBASE_IMM;
10716 return OP_X86_SUB_MEMBASE_IMM;
10718 return OP_X86_AND_MEMBASE_IMM;
10720 return OP_X86_OR_MEMBASE_IMM;
10722 return OP_X86_XOR_MEMBASE_IMM;
10724 return OP_AMD64_ADD_MEMBASE_REG;
10726 return OP_AMD64_SUB_MEMBASE_REG;
10728 return OP_AMD64_AND_MEMBASE_REG;
10730 return OP_AMD64_OR_MEMBASE_REG;
10732 return OP_AMD64_XOR_MEMBASE_REG;
10735 return OP_AMD64_ADD_MEMBASE_IMM;
10738 return OP_AMD64_SUB_MEMBASE_IMM;
10741 return OP_AMD64_AND_MEMBASE_IMM;
10744 return OP_AMD64_OR_MEMBASE_IMM;
10747 return OP_AMD64_XOR_MEMBASE_IMM;
10757 op_to_op_store_membase (int store_opcode, int opcode)
10759 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10762 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10763 return OP_X86_SETEQ_MEMBASE;
10765 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10766 return OP_X86_SETNE_MEMBASE;
10774 op_to_op_src1_membase (int load_opcode, int opcode)
10777 /* FIXME: This has sign extension issues */
10779 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10780 return OP_X86_COMPARE_MEMBASE8_IMM;
10783 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10788 return OP_X86_PUSH_MEMBASE;
10789 case OP_COMPARE_IMM:
10790 case OP_ICOMPARE_IMM:
10791 return OP_X86_COMPARE_MEMBASE_IMM;
10794 return OP_X86_COMPARE_MEMBASE_REG;
10798 #ifdef TARGET_AMD64
10799 /* FIXME: This has sign extension issues */
10801 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10802 return OP_X86_COMPARE_MEMBASE8_IMM;
10807 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10808 return OP_X86_PUSH_MEMBASE;
10810 /* FIXME: This only works for 32 bit immediates
10811 case OP_COMPARE_IMM:
10812 case OP_LCOMPARE_IMM:
10813 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10814 return OP_AMD64_COMPARE_MEMBASE_IMM;
10816 case OP_ICOMPARE_IMM:
10817 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10818 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10822 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10823 return OP_AMD64_COMPARE_MEMBASE_REG;
10826 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10827 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10836 op_to_op_src2_membase (int load_opcode, int opcode)
10839 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10845 return OP_X86_COMPARE_REG_MEMBASE;
10847 return OP_X86_ADD_REG_MEMBASE;
10849 return OP_X86_SUB_REG_MEMBASE;
10851 return OP_X86_AND_REG_MEMBASE;
10853 return OP_X86_OR_REG_MEMBASE;
10855 return OP_X86_XOR_REG_MEMBASE;
10859 #ifdef TARGET_AMD64
10860 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10863 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10865 return OP_X86_ADD_REG_MEMBASE;
10867 return OP_X86_SUB_REG_MEMBASE;
10869 return OP_X86_AND_REG_MEMBASE;
10871 return OP_X86_OR_REG_MEMBASE;
10873 return OP_X86_XOR_REG_MEMBASE;
10875 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10879 return OP_AMD64_COMPARE_REG_MEMBASE;
10881 return OP_AMD64_ADD_REG_MEMBASE;
10883 return OP_AMD64_SUB_REG_MEMBASE;
10885 return OP_AMD64_AND_REG_MEMBASE;
10887 return OP_AMD64_OR_REG_MEMBASE;
10889 return OP_AMD64_XOR_REG_MEMBASE;
10898 mono_op_to_op_imm_noemul (int opcode)
10901 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10907 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10915 return mono_op_to_op_imm (opcode);
10919 #ifndef DISABLE_JIT
10922 * mono_handle_global_vregs:
10924 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10928 mono_handle_global_vregs (MonoCompile *cfg)
10930 gint32 *vreg_to_bb;
10931 MonoBasicBlock *bb;
10934 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10936 #ifdef MONO_ARCH_SIMD_INTRINSICS
10937 if (cfg->uses_simd_intrinsics)
10938 mono_simd_simplify_indirection (cfg);
10941 /* Find local vregs used in more than one bb */
10942 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10943 MonoInst *ins = bb->code;
10944 int block_num = bb->block_num;
10946 if (cfg->verbose_level > 2)
10947 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10950 for (; ins; ins = ins->next) {
10951 const char *spec = INS_INFO (ins->opcode);
10952 int regtype = 0, regindex;
10955 if (G_UNLIKELY (cfg->verbose_level > 2))
10956 mono_print_ins (ins);
10958 g_assert (ins->opcode >= MONO_CEE_LAST);
10960 for (regindex = 0; regindex < 4; regindex ++) {
10963 if (regindex == 0) {
10964 regtype = spec [MONO_INST_DEST];
10965 if (regtype == ' ')
10968 } else if (regindex == 1) {
10969 regtype = spec [MONO_INST_SRC1];
10970 if (regtype == ' ')
10973 } else if (regindex == 2) {
10974 regtype = spec [MONO_INST_SRC2];
10975 if (regtype == ' ')
10978 } else if (regindex == 3) {
10979 regtype = spec [MONO_INST_SRC3];
10980 if (regtype == ' ')
10985 #if SIZEOF_REGISTER == 4
10986 /* In the LLVM case, the long opcodes are not decomposed */
10987 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10989 * Since some instructions reference the original long vreg,
10990 * and some reference the two component vregs, it is quite hard
10991 * to determine when it needs to be global. So be conservative.
10993 if (!get_vreg_to_inst (cfg, vreg)) {
10994 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10996 if (cfg->verbose_level > 2)
10997 printf ("LONG VREG R%d made global.\n", vreg);
11001 * Make the component vregs volatile since the optimizations can
11002 * get confused otherwise.
11004 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11005 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11009 g_assert (vreg != -1);
11011 prev_bb = vreg_to_bb [vreg];
11012 if (prev_bb == 0) {
11013 /* 0 is a valid block num */
11014 vreg_to_bb [vreg] = block_num + 1;
11015 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11016 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11019 if (!get_vreg_to_inst (cfg, vreg)) {
11020 if (G_UNLIKELY (cfg->verbose_level > 2))
11021 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11025 if (vreg_is_ref (cfg, vreg))
11026 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11028 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11031 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11034 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11037 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11040 g_assert_not_reached ();
11044 /* Flag as having been used in more than one bb */
11045 vreg_to_bb [vreg] = -1;
11051 /* If a variable is used in only one bblock, convert it into a local vreg */
11052 for (i = 0; i < cfg->num_varinfo; i++) {
11053 MonoInst *var = cfg->varinfo [i];
11054 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11056 switch (var->type) {
11062 #if SIZEOF_REGISTER == 8
11065 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11066 /* Enabling this screws up the fp stack on x86 */
11069 /* Arguments are implicitly global */
11070 /* Putting R4 vars into registers doesn't work currently */
11071 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11073 * Make that the variable's liveness interval doesn't contain a call, since
11074 * that would cause the lvreg to be spilled, making the whole optimization
11077 /* This is too slow for JIT compilation */
11079 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11081 int def_index, call_index, ins_index;
11082 gboolean spilled = FALSE;
11087 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11088 const char *spec = INS_INFO (ins->opcode);
11090 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11091 def_index = ins_index;
11093 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11094 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11095 if (call_index > def_index) {
11101 if (MONO_IS_CALL (ins))
11102 call_index = ins_index;
11112 if (G_UNLIKELY (cfg->verbose_level > 2))
11113 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11114 var->flags |= MONO_INST_IS_DEAD;
11115 cfg->vreg_to_inst [var->dreg] = NULL;
11122 * Compress the varinfo and vars tables so the liveness computation is faster and
11123 * takes up less space.
11126 for (i = 0; i < cfg->num_varinfo; ++i) {
11127 MonoInst *var = cfg->varinfo [i];
11128 if (pos < i && cfg->locals_start == i)
11129 cfg->locals_start = pos;
11130 if (!(var->flags & MONO_INST_IS_DEAD)) {
11132 cfg->varinfo [pos] = cfg->varinfo [i];
11133 cfg->varinfo [pos]->inst_c0 = pos;
11134 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11135 cfg->vars [pos].idx = pos;
11136 #if SIZEOF_REGISTER == 4
11137 if (cfg->varinfo [pos]->type == STACK_I8) {
11138 /* Modify the two component vars too */
11141 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11142 var1->inst_c0 = pos;
11143 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11144 var1->inst_c0 = pos;
11151 cfg->num_varinfo = pos;
11152 if (cfg->locals_start > cfg->num_varinfo)
11153 cfg->locals_start = cfg->num_varinfo;
11157 * mono_spill_global_vars:
11159 * Generate spill code for variables which are not allocated to registers,
11160 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11161 * code is generated which could be optimized by the local optimization passes.
11164 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11166 MonoBasicBlock *bb;
11168 int orig_next_vreg;
11169 guint32 *vreg_to_lvreg;
11171 guint32 i, lvregs_len;
11172 gboolean dest_has_lvreg = FALSE;
11173 guint32 stacktypes [128];
11174 MonoInst **live_range_start, **live_range_end;
11175 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11177 *need_local_opts = FALSE;
11179 memset (spec2, 0, sizeof (spec2));
11181 /* FIXME: Move this function to mini.c */
11182 stacktypes ['i'] = STACK_PTR;
11183 stacktypes ['l'] = STACK_I8;
11184 stacktypes ['f'] = STACK_R8;
11185 #ifdef MONO_ARCH_SIMD_INTRINSICS
11186 stacktypes ['x'] = STACK_VTYPE;
11189 #if SIZEOF_REGISTER == 4
11190 /* Create MonoInsts for longs */
11191 for (i = 0; i < cfg->num_varinfo; i++) {
11192 MonoInst *ins = cfg->varinfo [i];
11194 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11195 switch (ins->type) {
11200 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11203 g_assert (ins->opcode == OP_REGOFFSET);
11205 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11207 tree->opcode = OP_REGOFFSET;
11208 tree->inst_basereg = ins->inst_basereg;
11209 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11211 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11213 tree->opcode = OP_REGOFFSET;
11214 tree->inst_basereg = ins->inst_basereg;
11215 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11225 if (cfg->compute_gc_maps) {
11226 /* registers need liveness info even for !non refs */
11227 for (i = 0; i < cfg->num_varinfo; i++) {
11228 MonoInst *ins = cfg->varinfo [i];
11230 if (ins->opcode == OP_REGVAR)
11231 ins->flags |= MONO_INST_GC_TRACK;
11235 /* FIXME: widening and truncation */
11238 * As an optimization, when a variable allocated to the stack is first loaded into
11239 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11240 * the variable again.
11242 orig_next_vreg = cfg->next_vreg;
11243 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11244 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11248 * These arrays contain the first and last instructions accessing a given
11250 * Since we emit bblocks in the same order we process them here, and we
11251 * don't split live ranges, these will precisely describe the live range of
11252 * the variable, i.e. the instruction range where a valid value can be found
11253 * in the variables location.
11254 * The live range is computed using the liveness info computed by the liveness pass.
11255 * We can't use vmv->range, since that is an abstract live range, and we need
11256 * one which is instruction precise.
11257 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11259 /* FIXME: Only do this if debugging info is requested */
11260 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11261 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11262 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11263 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11265 /* Add spill loads/stores */
11266 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11269 if (cfg->verbose_level > 2)
11270 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11272 /* Clear vreg_to_lvreg array */
11273 for (i = 0; i < lvregs_len; i++)
11274 vreg_to_lvreg [lvregs [i]] = 0;
11278 MONO_BB_FOR_EACH_INS (bb, ins) {
11279 const char *spec = INS_INFO (ins->opcode);
11280 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11281 gboolean store, no_lvreg;
11282 int sregs [MONO_MAX_SRC_REGS];
11284 if (G_UNLIKELY (cfg->verbose_level > 2))
11285 mono_print_ins (ins);
11287 if (ins->opcode == OP_NOP)
11291 * We handle LDADDR here as well, since it can only be decomposed
11292 * when variable addresses are known.
11294 if (ins->opcode == OP_LDADDR) {
11295 MonoInst *var = ins->inst_p0;
11297 if (var->opcode == OP_VTARG_ADDR) {
11298 /* Happens on SPARC/S390 where vtypes are passed by reference */
11299 MonoInst *vtaddr = var->inst_left;
11300 if (vtaddr->opcode == OP_REGVAR) {
11301 ins->opcode = OP_MOVE;
11302 ins->sreg1 = vtaddr->dreg;
11304 else if (var->inst_left->opcode == OP_REGOFFSET) {
11305 ins->opcode = OP_LOAD_MEMBASE;
11306 ins->inst_basereg = vtaddr->inst_basereg;
11307 ins->inst_offset = vtaddr->inst_offset;
11311 g_assert (var->opcode == OP_REGOFFSET);
11313 ins->opcode = OP_ADD_IMM;
11314 ins->sreg1 = var->inst_basereg;
11315 ins->inst_imm = var->inst_offset;
11318 *need_local_opts = TRUE;
11319 spec = INS_INFO (ins->opcode);
11322 if (ins->opcode < MONO_CEE_LAST) {
11323 mono_print_ins (ins);
11324 g_assert_not_reached ();
11328 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11332 if (MONO_IS_STORE_MEMBASE (ins)) {
11333 tmp_reg = ins->dreg;
11334 ins->dreg = ins->sreg2;
11335 ins->sreg2 = tmp_reg;
11338 spec2 [MONO_INST_DEST] = ' ';
11339 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11340 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11341 spec2 [MONO_INST_SRC3] = ' ';
11343 } else if (MONO_IS_STORE_MEMINDEX (ins))
11344 g_assert_not_reached ();
11349 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11350 printf ("\t %.3s %d", spec, ins->dreg);
11351 num_sregs = mono_inst_get_src_registers (ins, sregs);
11352 for (srcindex = 0; srcindex < 3; ++srcindex)
11353 printf (" %d", sregs [srcindex]);
11360 regtype = spec [MONO_INST_DEST];
11361 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11364 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11365 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11366 MonoInst *store_ins;
11368 MonoInst *def_ins = ins;
11369 int dreg = ins->dreg; /* The original vreg */
11371 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11373 if (var->opcode == OP_REGVAR) {
11374 ins->dreg = var->dreg;
11375 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11377 * Instead of emitting a load+store, use a _membase opcode.
11379 g_assert (var->opcode == OP_REGOFFSET);
11380 if (ins->opcode == OP_MOVE) {
11384 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11385 ins->inst_basereg = var->inst_basereg;
11386 ins->inst_offset = var->inst_offset;
11389 spec = INS_INFO (ins->opcode);
11393 g_assert (var->opcode == OP_REGOFFSET);
11395 prev_dreg = ins->dreg;
11397 /* Invalidate any previous lvreg for this vreg */
11398 vreg_to_lvreg [ins->dreg] = 0;
11402 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11404 store_opcode = OP_STOREI8_MEMBASE_REG;
11407 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11409 if (regtype == 'l') {
11410 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11411 mono_bblock_insert_after_ins (bb, ins, store_ins);
11412 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11413 mono_bblock_insert_after_ins (bb, ins, store_ins);
11414 def_ins = store_ins;
11417 g_assert (store_opcode != OP_STOREV_MEMBASE);
11419 /* Try to fuse the store into the instruction itself */
11420 /* FIXME: Add more instructions */
11421 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11422 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11423 ins->inst_imm = ins->inst_c0;
11424 ins->inst_destbasereg = var->inst_basereg;
11425 ins->inst_offset = var->inst_offset;
11426 spec = INS_INFO (ins->opcode);
11427 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11428 ins->opcode = store_opcode;
11429 ins->inst_destbasereg = var->inst_basereg;
11430 ins->inst_offset = var->inst_offset;
11434 tmp_reg = ins->dreg;
11435 ins->dreg = ins->sreg2;
11436 ins->sreg2 = tmp_reg;
11439 spec2 [MONO_INST_DEST] = ' ';
11440 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11441 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11442 spec2 [MONO_INST_SRC3] = ' ';
11444 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11445 // FIXME: The backends expect the base reg to be in inst_basereg
11446 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11448 ins->inst_basereg = var->inst_basereg;
11449 ins->inst_offset = var->inst_offset;
11450 spec = INS_INFO (ins->opcode);
11452 /* printf ("INS: "); mono_print_ins (ins); */
11453 /* Create a store instruction */
11454 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11456 /* Insert it after the instruction */
11457 mono_bblock_insert_after_ins (bb, ins, store_ins);
11459 def_ins = store_ins;
11462 * We can't assign ins->dreg to var->dreg here, since the
11463 * sregs could use it. So set a flag, and do it after
11466 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11467 dest_has_lvreg = TRUE;
11472 if (def_ins && !live_range_start [dreg]) {
11473 live_range_start [dreg] = def_ins;
11474 live_range_start_bb [dreg] = bb;
11477 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11480 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11481 tmp->inst_c1 = dreg;
11482 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11489 num_sregs = mono_inst_get_src_registers (ins, sregs);
11490 for (srcindex = 0; srcindex < 3; ++srcindex) {
11491 regtype = spec [MONO_INST_SRC1 + srcindex];
11492 sreg = sregs [srcindex];
11494 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11495 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11496 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11497 MonoInst *use_ins = ins;
11498 MonoInst *load_ins;
11499 guint32 load_opcode;
11501 if (var->opcode == OP_REGVAR) {
11502 sregs [srcindex] = var->dreg;
11503 //mono_inst_set_src_registers (ins, sregs);
11504 live_range_end [sreg] = use_ins;
11505 live_range_end_bb [sreg] = bb;
11507 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11510 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11511 /* var->dreg is a hreg */
11512 tmp->inst_c1 = sreg;
11513 mono_bblock_insert_after_ins (bb, ins, tmp);
11519 g_assert (var->opcode == OP_REGOFFSET);
11521 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11523 g_assert (load_opcode != OP_LOADV_MEMBASE);
11525 if (vreg_to_lvreg [sreg]) {
11526 g_assert (vreg_to_lvreg [sreg] != -1);
11528 /* The variable is already loaded to an lvreg */
11529 if (G_UNLIKELY (cfg->verbose_level > 2))
11530 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11531 sregs [srcindex] = vreg_to_lvreg [sreg];
11532 //mono_inst_set_src_registers (ins, sregs);
11536 /* Try to fuse the load into the instruction */
11537 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11538 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11539 sregs [0] = var->inst_basereg;
11540 //mono_inst_set_src_registers (ins, sregs);
11541 ins->inst_offset = var->inst_offset;
11542 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11543 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11544 sregs [1] = var->inst_basereg;
11545 //mono_inst_set_src_registers (ins, sregs);
11546 ins->inst_offset = var->inst_offset;
11548 if (MONO_IS_REAL_MOVE (ins)) {
11549 ins->opcode = OP_NOP;
11552 //printf ("%d ", srcindex); mono_print_ins (ins);
11554 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11556 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11557 if (var->dreg == prev_dreg) {
11559 * sreg refers to the value loaded by the load
11560 * emitted below, but we need to use ins->dreg
11561 * since it refers to the store emitted earlier.
11565 g_assert (sreg != -1);
11566 vreg_to_lvreg [var->dreg] = sreg;
11567 g_assert (lvregs_len < 1024);
11568 lvregs [lvregs_len ++] = var->dreg;
11572 sregs [srcindex] = sreg;
11573 //mono_inst_set_src_registers (ins, sregs);
11575 if (regtype == 'l') {
11576 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11577 mono_bblock_insert_before_ins (bb, ins, load_ins);
11578 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11579 mono_bblock_insert_before_ins (bb, ins, load_ins);
11580 use_ins = load_ins;
11583 #if SIZEOF_REGISTER == 4
11584 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11586 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11587 mono_bblock_insert_before_ins (bb, ins, load_ins);
11588 use_ins = load_ins;
11592 if (var->dreg < orig_next_vreg) {
11593 live_range_end [var->dreg] = use_ins;
11594 live_range_end_bb [var->dreg] = bb;
11597 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11600 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11601 tmp->inst_c1 = var->dreg;
11602 mono_bblock_insert_after_ins (bb, ins, tmp);
11606 mono_inst_set_src_registers (ins, sregs);
11608 if (dest_has_lvreg) {
11609 g_assert (ins->dreg != -1);
11610 vreg_to_lvreg [prev_dreg] = ins->dreg;
11611 g_assert (lvregs_len < 1024);
11612 lvregs [lvregs_len ++] = prev_dreg;
11613 dest_has_lvreg = FALSE;
11617 tmp_reg = ins->dreg;
11618 ins->dreg = ins->sreg2;
11619 ins->sreg2 = tmp_reg;
11622 if (MONO_IS_CALL (ins)) {
11623 /* Clear vreg_to_lvreg array */
11624 for (i = 0; i < lvregs_len; i++)
11625 vreg_to_lvreg [lvregs [i]] = 0;
11627 } else if (ins->opcode == OP_NOP) {
11629 MONO_INST_NULLIFY_SREGS (ins);
11632 if (cfg->verbose_level > 2)
11633 mono_print_ins_index (1, ins);
11636 /* Extend the live range based on the liveness info */
11637 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11638 for (i = 0; i < cfg->num_varinfo; i ++) {
11639 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11641 if (vreg_is_volatile (cfg, vi->vreg))
11642 /* The liveness info is incomplete */
11645 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11646 /* Live from at least the first ins of this bb */
11647 live_range_start [vi->vreg] = bb->code;
11648 live_range_start_bb [vi->vreg] = bb;
11651 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11652 /* Live at least until the last ins of this bb */
11653 live_range_end [vi->vreg] = bb->last_ins;
11654 live_range_end_bb [vi->vreg] = bb;
11660 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11662 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11663 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11665 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11666 for (i = 0; i < cfg->num_varinfo; ++i) {
11667 int vreg = MONO_VARINFO (cfg, i)->vreg;
11670 if (live_range_start [vreg]) {
11671 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11673 ins->inst_c1 = vreg;
11674 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11676 if (live_range_end [vreg]) {
11677 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11679 ins->inst_c1 = vreg;
11680 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11681 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11683 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11689 g_free (live_range_start);
11690 g_free (live_range_end);
11691 g_free (live_range_start_bb);
11692 g_free (live_range_end_bb);
11697 * - use 'iadd' instead of 'int_add'
11698 * - handling ovf opcodes: decompose in method_to_ir.
11699 * - unify iregs/fregs
11700 * -> partly done, the missing parts are:
11701 * - a more complete unification would involve unifying the hregs as well, so
11702 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11703 * would no longer map to the machine hregs, so the code generators would need to
11704 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11705 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11706 * fp/non-fp branches speeds it up by about 15%.
11707 * - use sext/zext opcodes instead of shifts
11709 * - get rid of TEMPLOADs if possible and use vregs instead
11710 * - clean up usage of OP_P/OP_ opcodes
11711 * - cleanup usage of DUMMY_USE
11712 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11714 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11715 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11716 * - make sure handle_stack_args () is called before the branch is emitted
11717 * - when the new IR is done, get rid of all unused stuff
11718 * - COMPARE/BEQ as separate instructions or unify them ?
11719 * - keeping them separate allows specialized compare instructions like
11720 * compare_imm, compare_membase
11721 * - most back ends unify fp compare+branch, fp compare+ceq
11722 * - integrate mono_save_args into inline_method
11723 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11724 * - handle long shift opts on 32 bit platforms somehow: they require
11725 * 3 sregs (2 for arg1 and 1 for arg2)
11726 * - make byref a 'normal' type.
11727 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11728 * variable if needed.
11729 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11730 * like inline_method.
11731 * - remove inlining restrictions
11732 * - fix LNEG and enable cfold of INEG
11733 * - generalize x86 optimizations like ldelema as a peephole optimization
11734 * - add store_mem_imm for amd64
11735 * - optimize the loading of the interruption flag in the managed->native wrappers
11736 * - avoid special handling of OP_NOP in passes
11737 * - move code inserting instructions into one function/macro.
11738 * - try a coalescing phase after liveness analysis
11739 * - add float -> vreg conversion + local optimizations on !x86
11740 * - figure out how to handle decomposed branches during optimizations, ie.
11741 * compare+branch, op_jump_table+op_br etc.
11742 * - promote RuntimeXHandles to vregs
11743 * - vtype cleanups:
11744 * - add a NEW_VARLOADA_VREG macro
11745 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11746 * accessing vtype fields.
11747 * - get rid of I8CONST on 64 bit platforms
11748 * - dealing with the increase in code size due to branches created during opcode
11750 * - use extended basic blocks
11751 * - all parts of the JIT
11752 * - handle_global_vregs () && local regalloc
11753 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11754 * - sources of increase in code size:
11757 * - isinst and castclass
11758 * - lvregs not allocated to global registers even if used multiple times
11759 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11761 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11762 * - add all micro optimizations from the old JIT
11763 * - put tree optimizations into the deadce pass
11764 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11765 * specific function.
11766 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11767 * fcompare + branchCC.
11768 * - create a helper function for allocating a stack slot, taking into account
11769 * MONO_CFG_HAS_SPILLUP.
11771 * - merge the ia64 switch changes.
11772 * - optimize mono_regstate2_alloc_int/float.
11773 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11774 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11775 * parts of the tree could be separated by other instructions, killing the tree
11776 * arguments, or stores killing loads etc. Also, should we fold loads into other
11777 * instructions if the result of the load is used multiple times ?
11778 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11779 * - LAST MERGE: 108395.
11780 * - when returning vtypes in registers, generate IR and append it to the end of the
11781 * last bb instead of doing it in the epilog.
11782 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11790 - When to decompose opcodes:
11791 - earlier: this makes some optimizations hard to implement, since the low level IR
11792 no longer contains the neccessary information. But it is easier to do.
11793 - later: harder to implement, enables more optimizations.
11794 - Branches inside bblocks:
11795 - created when decomposing complex opcodes.
11796 - branches to another bblock: harmless, but not tracked by the branch
11797 optimizations, so need to branch to a label at the start of the bblock.
11798 - branches to inside the same bblock: very problematic, trips up the local
11799 reg allocator. Can be fixed by spitting the current bblock, but that is a
11800 complex operation, since some local vregs can become global vregs etc.
11801 - Local/global vregs:
11802 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11803 local register allocator.
11804 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11805 structure, created by mono_create_var (). Assigned to hregs or the stack by
11806 the global register allocator.
11807 - When to do optimizations like alu->alu_imm:
11808 - earlier -> saves work later on since the IR will be smaller/simpler
11809 - later -> can work on more instructions
11810 - Handling of valuetypes:
11811 - When a vtype is pushed on the stack, a new temporary is created, an
11812 instruction computing its address (LDADDR) is emitted and pushed on
11813 the stack. Need to optimize cases when the vtype is used immediately as in
11814 argument passing, stloc etc.
11815 - Instead of the to_end stuff in the old JIT, simply call the function handling
11816 the values on the stack before emitting the last instruction of the bb.
11819 #endif /* DISABLE_JIT */