2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
213 switch (type->type) {
216 case MONO_TYPE_BOOLEAN:
228 case MONO_TYPE_FNPTR:
230 case MONO_TYPE_CLASS:
231 case MONO_TYPE_STRING:
232 case MONO_TYPE_OBJECT:
233 case MONO_TYPE_SZARRAY:
234 case MONO_TYPE_ARRAY:
238 #if SIZEOF_REGISTER == 8
247 case MONO_TYPE_VALUETYPE:
248 if (type->data.klass->enumtype) {
249 type = mono_class_enum_basetype (type->data.klass);
252 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
255 case MONO_TYPE_TYPEDBYREF:
257 case MONO_TYPE_GENERICINST:
258 type = &type->data.generic_class->container_class->byval_arg;
262 g_assert (cfg->generic_sharing_context);
265 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
271 mono_print_bb (MonoBasicBlock *bb, const char *msg)
276 printf ("\n%s %d: [IN: ", msg, bb->block_num);
277 for (i = 0; i < bb->in_count; ++i)
278 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
280 for (i = 0; i < bb->out_count; ++i)
281 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
283 for (tree = bb->code; tree; tree = tree->next)
284 mono_print_ins_index (-1, tree);
288 mono_create_helper_signatures (void)
290 helper_sig_domain_get = mono_create_icall_signature ("ptr");
291 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
292 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
293 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
294 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
295 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
296 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
300 * Can't put this at the beginning, since other files reference stuff from this
305 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
307 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
309 #define GET_BBLOCK(cfg,tblock,ip) do { \
310 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
312 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
313 NEW_BBLOCK (cfg, (tblock)); \
314 (tblock)->cil_code = (ip); \
315 ADD_BBLOCK (cfg, (tblock)); \
319 #if defined(TARGET_X86) || defined(TARGET_AMD64)
320 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
321 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
322 (dest)->dreg = alloc_preg ((cfg)); \
323 (dest)->sreg1 = (sr1); \
324 (dest)->sreg2 = (sr2); \
325 (dest)->inst_imm = (imm); \
326 (dest)->backend.shift_amount = (shift); \
327 MONO_ADD_INS ((cfg)->cbb, (dest)); \
331 #if SIZEOF_REGISTER == 8
332 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
333 /* FIXME: Need to add many more cases */ \
334 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
336 int dr = alloc_preg (cfg); \
337 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
338 (ins)->sreg2 = widen->dreg; \
342 #define ADD_WIDEN_OP(ins, arg1, arg2)
345 #define ADD_BINOP(op) do { \
346 MONO_INST_NEW (cfg, ins, (op)); \
348 ins->sreg1 = sp [0]->dreg; \
349 ins->sreg2 = sp [1]->dreg; \
350 type_from_op (ins, sp [0], sp [1]); \
352 /* Have to insert a widening op */ \
353 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
354 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
355 MONO_ADD_INS ((cfg)->cbb, (ins)); \
356 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
359 #define ADD_UNOP(op) do { \
360 MONO_INST_NEW (cfg, ins, (op)); \
362 ins->sreg1 = sp [0]->dreg; \
363 type_from_op (ins, sp [0], NULL); \
365 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
366 MONO_ADD_INS ((cfg)->cbb, (ins)); \
367 *sp++ = mono_decompose_opcode (cfg, ins); \
370 #define ADD_BINCOND(next_block) do { \
373 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
374 cmp->sreg1 = sp [0]->dreg; \
375 cmp->sreg2 = sp [1]->dreg; \
376 type_from_op (cmp, sp [0], sp [1]); \
378 type_from_op (ins, sp [0], sp [1]); \
379 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
380 GET_BBLOCK (cfg, tblock, target); \
381 link_bblock (cfg, bblock, tblock); \
382 ins->inst_true_bb = tblock; \
383 if ((next_block)) { \
384 link_bblock (cfg, bblock, (next_block)); \
385 ins->inst_false_bb = (next_block); \
386 start_new_bblock = 1; \
388 GET_BBLOCK (cfg, tblock, ip); \
389 link_bblock (cfg, bblock, tblock); \
390 ins->inst_false_bb = tblock; \
391 start_new_bblock = 2; \
393 if (sp != stack_start) { \
394 handle_stack_args (cfg, stack_start, sp - stack_start); \
395 CHECK_UNVERIFIABLE (cfg); \
397 MONO_ADD_INS (bblock, cmp); \
398 MONO_ADD_INS (bblock, ins); \
402 * link_bblock: Links two basic blocks
404 * links two basic blocks in the control flow graph, the 'from'
405 * argument is the starting block and the 'to' argument is the block
406 * the control flow ends to after 'from'.
409 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
411 MonoBasicBlock **newa;
415 if (from->cil_code) {
417 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
419 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
422 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
424 printf ("edge from entry to exit\n");
429 for (i = 0; i < from->out_count; ++i) {
430 if (to == from->out_bb [i]) {
436 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
437 for (i = 0; i < from->out_count; ++i) {
438 newa [i] = from->out_bb [i];
446 for (i = 0; i < to->in_count; ++i) {
447 if (from == to->in_bb [i]) {
453 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
454 for (i = 0; i < to->in_count; ++i) {
455 newa [i] = to->in_bb [i];
464 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
466 link_bblock (cfg, from, to);
470 * mono_find_block_region:
472 * We mark each basic block with a region ID. We use that to avoid BB
473 * optimizations when blocks are in different regions.
476 * A region token that encodes where this region is, and information
477 * about the clause owner for this block.
479 * The region encodes the try/catch/filter clause that owns this block
480 * as well as the type. -1 is a special value that represents a block
481 * that is in none of try/catch/filter.
484 mono_find_block_region (MonoCompile *cfg, int offset)
486 MonoMethodHeader *header = cfg->header;
487 MonoExceptionClause *clause;
490 for (i = 0; i < header->num_clauses; ++i) {
491 clause = &header->clauses [i];
492 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
493 (offset < (clause->handler_offset)))
494 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
496 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
497 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
498 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
499 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
500 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
502 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
506 return ((i + 1) << 8) | clause->flags;
513 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
515 MonoMethodHeader *header = cfg->header;
516 MonoExceptionClause *clause;
520 for (i = 0; i < header->num_clauses; ++i) {
521 clause = &header->clauses [i];
522 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
523 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
524 if (clause->flags == type)
525 res = g_list_append (res, clause);
532 mono_create_spvar_for_region (MonoCompile *cfg, int region)
536 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
540 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
541 /* prevent it from being register allocated */
542 var->flags |= MONO_INST_INDIRECT;
544 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
548 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
550 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
554 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
558 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
563 /* prevent it from being register allocated */
564 var->flags |= MONO_INST_INDIRECT;
566 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
572 * Returns the type used in the eval stack when @type is loaded.
573 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
576 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
580 inst->klass = klass = mono_class_from_mono_type (type);
582 inst->type = STACK_MP;
587 switch (type->type) {
589 inst->type = STACK_INV;
593 case MONO_TYPE_BOOLEAN:
599 inst->type = STACK_I4;
604 case MONO_TYPE_FNPTR:
605 inst->type = STACK_PTR;
607 case MONO_TYPE_CLASS:
608 case MONO_TYPE_STRING:
609 case MONO_TYPE_OBJECT:
610 case MONO_TYPE_SZARRAY:
611 case MONO_TYPE_ARRAY:
612 inst->type = STACK_OBJ;
616 inst->type = STACK_I8;
620 inst->type = STACK_R8;
622 case MONO_TYPE_VALUETYPE:
623 if (type->data.klass->enumtype) {
624 type = mono_class_enum_basetype (type->data.klass);
628 inst->type = STACK_VTYPE;
631 case MONO_TYPE_TYPEDBYREF:
632 inst->klass = mono_defaults.typed_reference_class;
633 inst->type = STACK_VTYPE;
635 case MONO_TYPE_GENERICINST:
636 type = &type->data.generic_class->container_class->byval_arg;
639 case MONO_TYPE_MVAR :
640 /* FIXME: all the arguments must be references for now,
641 * later look inside cfg and see if the arg num is
644 g_assert (cfg->generic_sharing_context);
645 inst->type = STACK_OBJ;
648 g_error ("unknown type 0x%02x in eval stack type", type->type);
653 * The following tables are used to quickly validate the IL code in type_from_op ().
656 bin_num_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
669 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
672 /* reduce the size of this table */
674 bin_int_table [STACK_MAX] [STACK_MAX] = {
675 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
676 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
677 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
678 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
679 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
680 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
681 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
686 bin_comp_table [STACK_MAX] [STACK_MAX] = {
687 /* Inv i L p F & O vt */
689 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
690 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
691 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
692 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
693 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
694 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
695 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
698 /* reduce the size of this table */
700 shift_table [STACK_MAX] [STACK_MAX] = {
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
712 * Tables to map from the non-specific opcode to the matching
713 * type-specific opcode.
715 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
717 binops_op_map [STACK_MAX] = {
718 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
721 /* handles from CEE_NEG to CEE_CONV_U8 */
723 unops_op_map [STACK_MAX] = {
724 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
727 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
729 ovfops_op_map [STACK_MAX] = {
730 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
733 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
735 ovf2ops_op_map [STACK_MAX] = {
736 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
739 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
741 ovf3ops_op_map [STACK_MAX] = {
742 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
745 /* handles from CEE_BEQ to CEE_BLT_UN */
747 beqops_op_map [STACK_MAX] = {
748 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
751 /* handles from CEE_CEQ to CEE_CLT_UN */
753 ceqops_op_map [STACK_MAX] = {
754 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
758 * Sets ins->type (the type on the eval stack) according to the
759 * type of the opcode and the arguments to it.
760 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
762 * FIXME: this function sets ins->type unconditionally in some cases, but
763 * it should set it to invalid for some types (a conv.x on an object)
766 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
768 switch (ins->opcode) {
775 /* FIXME: check unverifiable args for STACK_MP */
776 ins->type = bin_num_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
784 ins->type = bin_int_table [src1->type] [src2->type];
785 ins->opcode += binops_op_map [ins->type];
790 ins->type = shift_table [src1->type] [src2->type];
791 ins->opcode += binops_op_map [ins->type];
796 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
797 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
798 ins->opcode = OP_LCOMPARE;
799 else if (src1->type == STACK_R8)
800 ins->opcode = OP_FCOMPARE;
802 ins->opcode = OP_ICOMPARE;
804 case OP_ICOMPARE_IMM:
805 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
806 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
807 ins->opcode = OP_LCOMPARE_IMM;
819 ins->opcode += beqops_op_map [src1->type];
822 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
823 ins->opcode += ceqops_op_map [src1->type];
829 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
830 ins->opcode += ceqops_op_map [src1->type];
834 ins->type = neg_table [src1->type];
835 ins->opcode += unops_op_map [ins->type];
838 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
839 ins->type = src1->type;
841 ins->type = STACK_INV;
842 ins->opcode += unops_op_map [ins->type];
848 ins->type = STACK_I4;
849 ins->opcode += unops_op_map [src1->type];
852 ins->type = STACK_R8;
853 switch (src1->type) {
856 ins->opcode = OP_ICONV_TO_R_UN;
859 ins->opcode = OP_LCONV_TO_R_UN;
863 case CEE_CONV_OVF_I1:
864 case CEE_CONV_OVF_U1:
865 case CEE_CONV_OVF_I2:
866 case CEE_CONV_OVF_U2:
867 case CEE_CONV_OVF_I4:
868 case CEE_CONV_OVF_U4:
869 ins->type = STACK_I4;
870 ins->opcode += ovf3ops_op_map [src1->type];
872 case CEE_CONV_OVF_I_UN:
873 case CEE_CONV_OVF_U_UN:
874 ins->type = STACK_PTR;
875 ins->opcode += ovf2ops_op_map [src1->type];
877 case CEE_CONV_OVF_I1_UN:
878 case CEE_CONV_OVF_I2_UN:
879 case CEE_CONV_OVF_I4_UN:
880 case CEE_CONV_OVF_U1_UN:
881 case CEE_CONV_OVF_U2_UN:
882 case CEE_CONV_OVF_U4_UN:
883 ins->type = STACK_I4;
884 ins->opcode += ovf2ops_op_map [src1->type];
887 ins->type = STACK_PTR;
888 switch (src1->type) {
890 ins->opcode = OP_ICONV_TO_U;
894 #if SIZEOF_REGISTER == 8
895 ins->opcode = OP_LCONV_TO_U;
897 ins->opcode = OP_MOVE;
901 ins->opcode = OP_LCONV_TO_U;
904 ins->opcode = OP_FCONV_TO_U;
910 ins->type = STACK_I8;
911 ins->opcode += unops_op_map [src1->type];
913 case CEE_CONV_OVF_I8:
914 case CEE_CONV_OVF_U8:
915 ins->type = STACK_I8;
916 ins->opcode += ovf3ops_op_map [src1->type];
918 case CEE_CONV_OVF_U8_UN:
919 case CEE_CONV_OVF_I8_UN:
920 ins->type = STACK_I8;
921 ins->opcode += ovf2ops_op_map [src1->type];
925 ins->type = STACK_R8;
926 ins->opcode += unops_op_map [src1->type];
929 ins->type = STACK_R8;
933 ins->type = STACK_I4;
934 ins->opcode += ovfops_op_map [src1->type];
939 ins->type = STACK_PTR;
940 ins->opcode += ovfops_op_map [src1->type];
948 ins->type = bin_num_table [src1->type] [src2->type];
949 ins->opcode += ovfops_op_map [src1->type];
950 if (ins->type == STACK_R8)
951 ins->type = STACK_INV;
953 case OP_LOAD_MEMBASE:
954 ins->type = STACK_PTR;
956 case OP_LOADI1_MEMBASE:
957 case OP_LOADU1_MEMBASE:
958 case OP_LOADI2_MEMBASE:
959 case OP_LOADU2_MEMBASE:
960 case OP_LOADI4_MEMBASE:
961 case OP_LOADU4_MEMBASE:
962 ins->type = STACK_PTR;
964 case OP_LOADI8_MEMBASE:
965 ins->type = STACK_I8;
967 case OP_LOADR4_MEMBASE:
968 case OP_LOADR8_MEMBASE:
969 ins->type = STACK_R8;
972 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
976 if (ins->type == STACK_MP)
977 ins->klass = mono_defaults.object_class;
982 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
988 param_table [STACK_MAX] [STACK_MAX] = {
993 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
997 switch (args->type) {
1007 for (i = 0; i < sig->param_count; ++i) {
1008 switch (args [i].type) {
1012 if (!sig->params [i]->byref)
1016 if (sig->params [i]->byref)
1018 switch (sig->params [i]->type) {
1019 case MONO_TYPE_CLASS:
1020 case MONO_TYPE_STRING:
1021 case MONO_TYPE_OBJECT:
1022 case MONO_TYPE_SZARRAY:
1023 case MONO_TYPE_ARRAY:
1030 if (sig->params [i]->byref)
1032 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1041 /*if (!param_table [args [i].type] [sig->params [i]->type])
1049 * When we need a pointer to the current domain many times in a method, we
1050 * call mono_domain_get() once and we store the result in a local variable.
1051 * This function returns the variable that represents the MonoDomain*.
1053 inline static MonoInst *
1054 mono_get_domainvar (MonoCompile *cfg)
1056 if (!cfg->domainvar)
1057 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1058 return cfg->domainvar;
1062 * The got_var contains the address of the Global Offset Table when AOT
1066 mono_get_got_var (MonoCompile *cfg)
1068 #ifdef MONO_ARCH_NEED_GOT_VAR
1069 if (!cfg->compile_aot)
1071 if (!cfg->got_var) {
1072 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 return cfg->got_var;
1081 mono_get_vtable_var (MonoCompile *cfg)
1083 g_assert (cfg->generic_sharing_context);
1085 if (!cfg->rgctx_var) {
1086 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1087 /* force the var to be stack allocated */
1088 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1091 return cfg->rgctx_var;
1095 type_from_stack_type (MonoInst *ins) {
1096 switch (ins->type) {
1097 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1098 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1099 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1100 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1102 return &ins->klass->this_arg;
1103 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1104 case STACK_VTYPE: return &ins->klass->byval_arg;
1106 g_error ("stack type %d to monotype not handled\n", ins->type);
1111 static G_GNUC_UNUSED int
1112 type_to_stack_type (MonoType *t)
1114 t = mono_type_get_underlying_type (t);
1118 case MONO_TYPE_BOOLEAN:
1121 case MONO_TYPE_CHAR:
1128 case MONO_TYPE_FNPTR:
1130 case MONO_TYPE_CLASS:
1131 case MONO_TYPE_STRING:
1132 case MONO_TYPE_OBJECT:
1133 case MONO_TYPE_SZARRAY:
1134 case MONO_TYPE_ARRAY:
1142 case MONO_TYPE_VALUETYPE:
1143 case MONO_TYPE_TYPEDBYREF:
1145 case MONO_TYPE_GENERICINST:
1146 if (mono_type_generic_inst_is_valuetype (t))
1152 g_assert_not_reached ();
1159 array_access_to_klass (int opcode)
1163 return mono_defaults.byte_class;
1165 return mono_defaults.uint16_class;
1168 return mono_defaults.int_class;
1171 return mono_defaults.sbyte_class;
1174 return mono_defaults.int16_class;
1177 return mono_defaults.int32_class;
1179 return mono_defaults.uint32_class;
1182 return mono_defaults.int64_class;
1185 return mono_defaults.single_class;
1188 return mono_defaults.double_class;
1189 case CEE_LDELEM_REF:
1190 case CEE_STELEM_REF:
1191 return mono_defaults.object_class;
1193 g_assert_not_reached ();
1199 * We try to share variables when possible
1202 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1207 /* inlining can result in deeper stacks */
1208 if (slot >= cfg->header->max_stack)
1209 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1211 pos = ins->type - 1 + slot * STACK_MAX;
1213 switch (ins->type) {
1220 if ((vnum = cfg->intvars [pos]))
1221 return cfg->varinfo [vnum];
1222 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1223 cfg->intvars [pos] = res->inst_c0;
1226 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1232 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1235 * Don't use this if a generic_context is set, since that means AOT can't
1236 * look up the method using just the image+token.
1237 * table == 0 means this is a reference made from a wrapper.
1239 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1240 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1241 jump_info_token->image = image;
1242 jump_info_token->token = token;
1243 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1248 * This function is called to handle items that are left on the evaluation stack
1249 * at basic block boundaries. What happens is that we save the values to local variables
1250 * and we reload them later when first entering the target basic block (with the
1251 * handle_loaded_temps () function).
1252 * A single joint point will use the same variables (stored in the array bb->out_stack or
1253 * bb->in_stack, if the basic block is before or after the joint point).
1255 * This function needs to be called _before_ emitting the last instruction of
1256 * the bb (i.e. before emitting a branch).
1257 * If the stack merge fails at a join point, cfg->unverifiable is set.
1260 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1263 MonoBasicBlock *bb = cfg->cbb;
1264 MonoBasicBlock *outb;
1265 MonoInst *inst, **locals;
1270 if (cfg->verbose_level > 3)
1271 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1272 if (!bb->out_scount) {
1273 bb->out_scount = count;
1274 //printf ("bblock %d has out:", bb->block_num);
1276 for (i = 0; i < bb->out_count; ++i) {
1277 outb = bb->out_bb [i];
1278 /* exception handlers are linked, but they should not be considered for stack args */
1279 if (outb->flags & BB_EXCEPTION_HANDLER)
1281 //printf (" %d", outb->block_num);
1282 if (outb->in_stack) {
1284 bb->out_stack = outb->in_stack;
1290 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1291 for (i = 0; i < count; ++i) {
1293 * try to reuse temps already allocated for this purpouse, if they occupy the same
1294 * stack slot and if they are of the same type.
1295 * This won't cause conflicts since if 'local' is used to
1296 * store one of the values in the in_stack of a bblock, then
1297 * the same variable will be used for the same outgoing stack
1299 * This doesn't work when inlining methods, since the bblocks
1300 * in the inlined methods do not inherit their in_stack from
1301 * the bblock they are inlined to. See bug #58863 for an
1304 if (cfg->inlined_method)
1305 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1307 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1312 for (i = 0; i < bb->out_count; ++i) {
1313 outb = bb->out_bb [i];
1314 /* exception handlers are linked, but they should not be considered for stack args */
1315 if (outb->flags & BB_EXCEPTION_HANDLER)
1317 if (outb->in_scount) {
1318 if (outb->in_scount != bb->out_scount) {
1319 cfg->unverifiable = TRUE;
1322 continue; /* check they are the same locals */
1324 outb->in_scount = count;
1325 outb->in_stack = bb->out_stack;
1328 locals = bb->out_stack;
1330 for (i = 0; i < count; ++i) {
1331 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1332 inst->cil_code = sp [i]->cil_code;
1333 sp [i] = locals [i];
1334 if (cfg->verbose_level > 3)
1335 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1339 * It is possible that the out bblocks already have in_stack assigned, and
1340 * the in_stacks differ. In this case, we will store to all the different
1347 /* Find a bblock which has a different in_stack */
1349 while (bindex < bb->out_count) {
1350 outb = bb->out_bb [bindex];
1351 /* exception handlers are linked, but they should not be considered for stack args */
1352 if (outb->flags & BB_EXCEPTION_HANDLER) {
1356 if (outb->in_stack != locals) {
1357 for (i = 0; i < count; ++i) {
1358 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1359 inst->cil_code = sp [i]->cil_code;
1360 sp [i] = locals [i];
1361 if (cfg->verbose_level > 3)
1362 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1364 locals = outb->in_stack;
1373 /* Emit code which loads interface_offsets [klass->interface_id]
1374 * The array is stored in memory before vtable.
1377 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1379 if (cfg->compile_aot) {
1380 int ioffset_reg = alloc_preg (cfg);
1381 int iid_reg = alloc_preg (cfg);
1383 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1384 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1393 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1395 int ibitmap_reg = alloc_preg (cfg);
1396 #ifdef COMPRESSED_INTERFACE_BITMAP
1398 MonoInst *res, *ins;
1399 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1400 MONO_ADD_INS (cfg->cbb, ins);
1402 if (cfg->compile_aot)
1403 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1405 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1406 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1407 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1409 int ibitmap_byte_reg = alloc_preg (cfg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1413 if (cfg->compile_aot) {
1414 int iid_reg = alloc_preg (cfg);
1415 int shifted_iid_reg = alloc_preg (cfg);
1416 int ibitmap_byte_address_reg = alloc_preg (cfg);
1417 int masked_iid_reg = alloc_preg (cfg);
1418 int iid_one_bit_reg = alloc_preg (cfg);
1419 int iid_bit_reg = alloc_preg (cfg);
1420 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1425 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1426 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1427 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1429 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1436 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1437 * stored in "klass_reg" implements the interface "klass".
1440 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1442 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1446 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1447 * stored in "vtable_reg" implements the interface "klass".
1450 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1452 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1456 * Emit code which checks whenever the interface id of @klass is smaller than
1457 * than the value given by max_iid_reg.
1460 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 if (cfg->compile_aot) {
1464 int iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1466 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1473 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1476 /* Same as above, but obtains max_iid from a vtable */
1478 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1479 MonoBasicBlock *false_target)
1481 int max_iid_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1484 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1487 /* Same as above, but obtains max_iid from a klass */
1489 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1490 MonoBasicBlock *false_target)
1492 int max_iid_reg = alloc_preg (cfg);
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1495 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1499 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1501 int idepth_reg = alloc_preg (cfg);
1502 int stypes_reg = alloc_preg (cfg);
1503 int stype = alloc_preg (cfg);
1505 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1511 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1513 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1514 } else if (cfg->compile_aot) {
1515 int const_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1517 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1525 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1527 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1531 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1533 int intf_reg = alloc_preg (cfg);
1535 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1536 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1541 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1545 * Variant of the above that takes a register to the class, not the vtable.
1548 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1550 int intf_bit_reg = alloc_preg (cfg);
1552 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1553 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1558 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1562 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1565 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1566 } else if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1577 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1579 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1583 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1585 if (cfg->compile_aot) {
1586 int const_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1588 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1596 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1599 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1602 int rank_reg = alloc_preg (cfg);
1603 int eclass_reg = alloc_preg (cfg);
1605 g_assert (!klass_inst);
1606 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1608 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1609 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1611 if (klass->cast_class == mono_defaults.object_class) {
1612 int parent_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1614 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1615 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1616 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1617 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1618 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1619 } else if (klass->cast_class == mono_defaults.enum_class) {
1620 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1621 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1622 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1624 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1625 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1628 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1629 /* Check that the object is a vector too */
1630 int bounds_reg = alloc_preg (cfg);
1631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1633 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1636 int idepth_reg = alloc_preg (cfg);
1637 int stypes_reg = alloc_preg (cfg);
1638 int stype = alloc_preg (cfg);
1640 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1643 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1647 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1652 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1654 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1658 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1662 g_assert (val == 0);
1667 if ((size <= 4) && (size <= align)) {
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1673 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1676 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1678 #if SIZEOF_REGISTER == 8
1680 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1686 val_reg = alloc_preg (cfg);
1688 if (SIZEOF_REGISTER == 8)
1689 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1691 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1694 /* This could be optimized further if neccesary */
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1703 #if !NO_UNALIGNED_ACCESS
1704 if (SIZEOF_REGISTER == 8) {
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1736 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1743 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1744 g_assert (size < 10000);
1747 /* This could be optimized further if neccesary */
1749 cur_reg = alloc_preg (cfg);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1758 #if !NO_UNALIGNED_ACCESS
1759 if (SIZEOF_REGISTER == 8) {
1761 cur_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1798 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 type = mini_get_basic_type_from_generic (gsctx, type);
1805 switch (type->type) {
1806 case MONO_TYPE_VOID:
1807 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1810 case MONO_TYPE_BOOLEAN:
1813 case MONO_TYPE_CHAR:
1816 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1820 case MONO_TYPE_FNPTR:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1822 case MONO_TYPE_CLASS:
1823 case MONO_TYPE_STRING:
1824 case MONO_TYPE_OBJECT:
1825 case MONO_TYPE_SZARRAY:
1826 case MONO_TYPE_ARRAY:
1827 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1830 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1833 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1834 case MONO_TYPE_VALUETYPE:
1835 if (type->data.klass->enumtype) {
1836 type = mono_class_enum_basetype (type->data.klass);
1839 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1840 case MONO_TYPE_TYPEDBYREF:
1841 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1842 case MONO_TYPE_GENERICINST:
1843 type = &type->data.generic_class->container_class->byval_arg;
1846 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1852 * target_type_is_incompatible:
1853 * @cfg: MonoCompile context
1855 * Check that the item @arg on the evaluation stack can be stored
1856 * in the target type (can be a local, or field, etc).
1857 * The cfg arg can be used to check if we need verification or just
1860 * Returns: non-0 value if arg can't be stored on a target.
1863 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1865 MonoType *simple_type;
1868 if (target->byref) {
1869 /* FIXME: check that the pointed to types match */
1870 if (arg->type == STACK_MP)
1871 return arg->klass != mono_class_from_mono_type (target);
1872 if (arg->type == STACK_PTR)
1877 simple_type = mono_type_get_underlying_type (target);
1878 switch (simple_type->type) {
1879 case MONO_TYPE_VOID:
1883 case MONO_TYPE_BOOLEAN:
1886 case MONO_TYPE_CHAR:
1889 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1893 /* STACK_MP is needed when setting pinned locals */
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1899 case MONO_TYPE_FNPTR:
1900 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1903 case MONO_TYPE_CLASS:
1904 case MONO_TYPE_STRING:
1905 case MONO_TYPE_OBJECT:
1906 case MONO_TYPE_SZARRAY:
1907 case MONO_TYPE_ARRAY:
1908 if (arg->type != STACK_OBJ)
1910 /* FIXME: check type compatibility */
1914 if (arg->type != STACK_I8)
1919 if (arg->type != STACK_R8)
1922 case MONO_TYPE_VALUETYPE:
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1929 case MONO_TYPE_TYPEDBYREF:
1930 if (arg->type != STACK_VTYPE)
1932 klass = mono_class_from_mono_type (simple_type);
1933 if (klass != arg->klass)
1936 case MONO_TYPE_GENERICINST:
1937 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1938 if (arg->type != STACK_VTYPE)
1940 klass = mono_class_from_mono_type (simple_type);
1941 if (klass != arg->klass)
1945 if (arg->type != STACK_OBJ)
1947 /* FIXME: check type compatibility */
1951 case MONO_TYPE_MVAR:
1952 /* FIXME: all the arguments must be references for now,
1953 * later look inside cfg and see if the arg num is
1954 * really a reference
1956 g_assert (cfg->generic_sharing_context);
1957 if (arg->type != STACK_OBJ)
1961 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1967 * Prepare arguments for passing to a function call.
1968 * Return a non-zero value if the arguments can't be passed to the given
1970 * The type checks are not yet complete and some conversions may need
1971 * casts on 32 or 64 bit architectures.
1973 * FIXME: implement this using target_type_is_incompatible ()
1976 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1978 MonoType *simple_type;
1982 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1986 for (i = 0; i < sig->param_count; ++i) {
1987 if (sig->params [i]->byref) {
1988 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1992 simple_type = sig->params [i];
1993 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1995 switch (simple_type->type) {
1996 case MONO_TYPE_VOID:
2001 case MONO_TYPE_BOOLEAN:
2004 case MONO_TYPE_CHAR:
2007 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2013 case MONO_TYPE_FNPTR:
2014 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2017 case MONO_TYPE_CLASS:
2018 case MONO_TYPE_STRING:
2019 case MONO_TYPE_OBJECT:
2020 case MONO_TYPE_SZARRAY:
2021 case MONO_TYPE_ARRAY:
2022 if (args [i]->type != STACK_OBJ)
2027 if (args [i]->type != STACK_I8)
2032 if (args [i]->type != STACK_R8)
2035 case MONO_TYPE_VALUETYPE:
2036 if (simple_type->data.klass->enumtype) {
2037 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2040 if (args [i]->type != STACK_VTYPE)
2043 case MONO_TYPE_TYPEDBYREF:
2044 if (args [i]->type != STACK_VTYPE)
2047 case MONO_TYPE_GENERICINST:
2048 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2052 g_error ("unknown type 0x%02x in check_call_signature",
2060 callvirt_to_call (int opcode)
2065 case OP_VOIDCALLVIRT:
2074 g_assert_not_reached ();
2081 callvirt_to_call_membase (int opcode)
2085 return OP_CALL_MEMBASE;
2086 case OP_VOIDCALLVIRT:
2087 return OP_VOIDCALL_MEMBASE;
2089 return OP_FCALL_MEMBASE;
2091 return OP_LCALL_MEMBASE;
2093 return OP_VCALL_MEMBASE;
2095 g_assert_not_reached ();
2101 #ifdef MONO_ARCH_HAVE_IMT
2103 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2107 if (COMPILE_LLVM (cfg)) {
2108 method_reg = alloc_preg (cfg);
2111 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2112 } else if (cfg->compile_aot) {
2113 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2116 MONO_INST_NEW (cfg, ins, OP_PCONST);
2117 ins->inst_p0 = call->method;
2118 ins->dreg = method_reg;
2119 MONO_ADD_INS (cfg->cbb, ins);
2123 call->imt_arg_reg = method_reg;
2125 #ifdef MONO_ARCH_IMT_REG
2126 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2128 /* Need this to keep the IMT arg alive */
2129 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2134 #ifdef MONO_ARCH_IMT_REG
2135 method_reg = alloc_preg (cfg);
2138 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2139 } else if (cfg->compile_aot) {
2140 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2143 MONO_INST_NEW (cfg, ins, OP_PCONST);
2144 ins->inst_p0 = call->method;
2145 ins->dreg = method_reg;
2146 MONO_ADD_INS (cfg->cbb, ins);
2149 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2151 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2156 static MonoJumpInfo *
2157 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2159 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2163 ji->data.target = target;
2168 inline static MonoCallInst *
2169 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2170 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2173 #ifdef MONO_ARCH_SOFT_FLOAT
2178 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2180 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2183 call->signature = sig;
2184 call->rgctx_reg = rgctx;
2186 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2189 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2190 call->vret_var = cfg->vret_addr;
2191 //g_assert_not_reached ();
2193 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2194 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2197 temp->backend.is_pinvoke = sig->pinvoke;
2200 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2201 * address of return value to increase optimization opportunities.
2202 * Before vtype decomposition, the dreg of the call ins itself represents the
2203 * fact the call modifies the return value. After decomposition, the call will
2204 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2205 * will be transformed into an LDADDR.
2207 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2208 loada->dreg = alloc_preg (cfg);
2209 loada->inst_p0 = temp;
2210 /* We reference the call too since call->dreg could change during optimization */
2211 loada->inst_p1 = call;
2212 MONO_ADD_INS (cfg->cbb, loada);
2214 call->inst.dreg = temp->dreg;
2216 call->vret_var = loada;
2217 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2218 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2220 #ifdef MONO_ARCH_SOFT_FLOAT
2221 if (COMPILE_SOFT_FLOAT (cfg)) {
2223 * If the call has a float argument, we would need to do an r8->r4 conversion using
2224 * an icall, but that cannot be done during the call sequence since it would clobber
2225 * the call registers + the stack. So we do it before emitting the call.
2227 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2229 MonoInst *in = call->args [i];
2231 if (i >= sig->hasthis)
2232 t = sig->params [i - sig->hasthis];
2234 t = &mono_defaults.int_class->byval_arg;
2235 t = mono_type_get_underlying_type (t);
2237 if (!t->byref && t->type == MONO_TYPE_R4) {
2238 MonoInst *iargs [1];
2242 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2244 /* The result will be in an int vreg */
2245 call->args [i] = conv;
2252 if (COMPILE_LLVM (cfg))
2253 mono_llvm_emit_call (cfg, call);
2255 mono_arch_emit_call (cfg, call);
2257 mono_arch_emit_call (cfg, call);
2260 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2261 cfg->flags |= MONO_CFG_HAS_CALLS;
2267 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2269 #ifdef MONO_ARCH_RGCTX_REG
2270 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2271 cfg->uses_rgctx_reg = TRUE;
2272 call->rgctx_reg = TRUE;
2274 call->rgctx_arg_reg = rgctx_reg;
2281 inline static MonoInst*
2282 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2288 rgctx_reg = mono_alloc_preg (cfg);
2289 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2295 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2297 call->inst.sreg1 = addr->dreg;
2299 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2302 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2304 return (MonoInst*)call;
2308 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2310 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2313 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2314 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2316 gboolean might_be_remote;
2317 gboolean virtual = this != NULL;
2318 gboolean enable_for_aot = TRUE;
2324 rgctx_reg = mono_alloc_preg (cfg);
2325 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2328 if (method->string_ctor) {
2329 /* Create the real signature */
2330 /* FIXME: Cache these */
2331 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2332 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2337 might_be_remote = this && sig->hasthis &&
2338 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2339 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2341 context_used = mono_method_check_context_used (method);
2342 if (might_be_remote && context_used) {
2345 g_assert (cfg->generic_sharing_context);
2347 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2349 return mono_emit_calli (cfg, sig, args, addr, NULL);
2352 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2354 if (might_be_remote)
2355 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2357 call->method = method;
2358 call->inst.flags |= MONO_INST_HAS_METHOD;
2359 call->inst.inst_left = this;
2362 int vtable_reg, slot_reg, this_reg;
2364 this_reg = this->dreg;
2366 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2367 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2368 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2370 /* Make a call to delegate->invoke_impl */
2371 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2372 call->inst.inst_basereg = this_reg;
2373 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2374 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2376 return (MonoInst*)call;
2380 if ((!cfg->compile_aot || enable_for_aot) &&
2381 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2382 (MONO_METHOD_IS_FINAL (method) &&
2383 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2384 !(method->klass->marshalbyref && context_used)) {
2386 * the method is not virtual, we just need to ensure this is not null
2387 * and then we can call the method directly.
2389 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2391 * The check above ensures method is not gshared, this is needed since
2392 * gshared methods can't have wrappers.
2394 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2397 if (!method->string_ctor)
2398 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2400 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2402 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2404 return (MonoInst*)call;
2407 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2409 * the method is virtual, but we can statically dispatch since either
2410 * it's class or the method itself are sealed.
2411 * But first we need to ensure it's not a null reference.
2413 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2415 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2416 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2418 return (MonoInst*)call;
2421 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2423 vtable_reg = alloc_preg (cfg);
2424 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2425 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2427 #ifdef MONO_ARCH_HAVE_IMT
2429 guint32 imt_slot = mono_method_get_imt_slot (method);
2430 emit_imt_argument (cfg, call, imt_arg);
2431 slot_reg = vtable_reg;
2432 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2435 if (slot_reg == -1) {
2436 slot_reg = alloc_preg (cfg);
2437 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2438 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2441 slot_reg = vtable_reg;
2442 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2443 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2444 #ifdef MONO_ARCH_HAVE_IMT
2446 g_assert (mono_method_signature (method)->generic_param_count);
2447 emit_imt_argument (cfg, call, imt_arg);
2452 call->inst.sreg1 = slot_reg;
2453 call->virtual = TRUE;
2456 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2459 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2461 return (MonoInst*)call;
2465 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2467 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2471 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2478 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2481 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2483 return (MonoInst*)call;
2487 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2489 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2493 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2497 * mono_emit_abs_call:
2499 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2501 inline static MonoInst*
2502 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2503 MonoMethodSignature *sig, MonoInst **args)
2505 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2509 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2512 if (cfg->abs_patches == NULL)
2513 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2514 g_hash_table_insert (cfg->abs_patches, ji, ji);
2515 ins = mono_emit_native_call (cfg, ji, sig, args);
2516 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2521 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2523 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2524 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2528 * Native code might return non register sized integers
2529 * without initializing the upper bits.
2531 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2532 case OP_LOADI1_MEMBASE:
2533 widen_op = OP_ICONV_TO_I1;
2535 case OP_LOADU1_MEMBASE:
2536 widen_op = OP_ICONV_TO_U1;
2538 case OP_LOADI2_MEMBASE:
2539 widen_op = OP_ICONV_TO_I2;
2541 case OP_LOADU2_MEMBASE:
2542 widen_op = OP_ICONV_TO_U2;
2548 if (widen_op != -1) {
2549 int dreg = alloc_preg (cfg);
2552 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2553 widen->type = ins->type;
2563 get_memcpy_method (void)
2565 static MonoMethod *memcpy_method = NULL;
2566 if (!memcpy_method) {
2567 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2569 g_error ("Old corlib found. Install a new one");
2571 return memcpy_method;
2575 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2577 MonoClassField *field;
2578 gpointer iter = NULL;
2580 while ((field = mono_class_get_fields (klass, &iter))) {
2583 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2585 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2586 if (mono_type_is_reference (field->type)) {
2587 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2588 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2590 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2591 MonoClass *field_class = mono_class_from_mono_type (field->type);
2592 if (field_class->has_references)
2593 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2599 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2601 int card_table_shift_bits;
2602 gpointer card_table_mask;
2604 MonoInst *dummy_use;
2605 int nursery_shift_bits;
2606 size_t nursery_size;
2607 gboolean has_card_table_wb = FALSE;
2609 if (!cfg->gen_write_barriers)
2612 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2614 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2616 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2617 has_card_table_wb = TRUE;
2620 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2623 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2624 wbarrier->sreg1 = ptr->dreg;
2626 wbarrier->sreg2 = value->dreg;
2628 wbarrier->sreg2 = value_reg;
2629 MONO_ADD_INS (cfg->cbb, wbarrier);
2630 } else if (card_table) {
2631 int offset_reg = alloc_preg (cfg);
2632 int card_reg = alloc_preg (cfg);
2635 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2636 if (card_table_mask)
2637 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2639 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2640 * IMM's larger than 32bits.
2642 if (cfg->compile_aot) {
2643 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2645 MONO_INST_NEW (cfg, ins, OP_PCONST);
2646 ins->inst_p0 = card_table;
2647 ins->dreg = card_reg;
2648 MONO_ADD_INS (cfg->cbb, ins);
2651 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2652 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2654 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2655 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2659 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2661 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2662 dummy_use->sreg1 = value_reg;
2663 MONO_ADD_INS (cfg->cbb, dummy_use);
2668 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2670 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2671 unsigned need_wb = 0;
2676 /*types with references can't have alignment smaller than sizeof(void*) */
2677 if (align < SIZEOF_VOID_P)
2680 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2681 if (size > 32 * SIZEOF_VOID_P)
2684 create_write_barrier_bitmap (klass, &need_wb, 0);
2686 /* We don't unroll more than 5 stores to avoid code bloat. */
2687 if (size > 5 * SIZEOF_VOID_P) {
2688 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2689 size += (SIZEOF_VOID_P - 1);
2690 size &= ~(SIZEOF_VOID_P - 1);
2692 EMIT_NEW_ICONST (cfg, iargs [2], size);
2693 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2694 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2698 destreg = iargs [0]->dreg;
2699 srcreg = iargs [1]->dreg;
2702 dest_ptr_reg = alloc_preg (cfg);
2703 tmp_reg = alloc_preg (cfg);
2706 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2708 while (size >= SIZEOF_VOID_P) {
2709 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2713 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2715 offset += SIZEOF_VOID_P;
2716 size -= SIZEOF_VOID_P;
2719 /*tmp += sizeof (void*)*/
2720 if (size >= SIZEOF_VOID_P) {
2721 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2722 MONO_ADD_INS (cfg->cbb, iargs [0]);
2726 /* Those cannot be references since size < sizeof (void*) */
2728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2736 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2742 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2743 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2752 * Emit code to copy a valuetype of type @klass whose address is stored in
2753 * @src->dreg to memory whose address is stored at @dest->dreg.
2756 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2758 MonoInst *iargs [4];
2761 MonoMethod *memcpy_method;
2765 * This check breaks with spilled vars... need to handle it during verification anyway.
2766 * g_assert (klass && klass == src->klass && klass == dest->klass);
2770 n = mono_class_native_size (klass, &align);
2772 n = mono_class_value_size (klass, &align);
2774 /* if native is true there should be no references in the struct */
2775 if (cfg->gen_write_barriers && klass->has_references && !native) {
2776 /* Avoid barriers when storing to the stack */
2777 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2778 (dest->opcode == OP_LDADDR))) {
2779 int context_used = 0;
2784 if (cfg->generic_sharing_context)
2785 context_used = mono_class_check_context_used (klass);
2787 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2788 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2790 } else if (context_used) {
2791 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2793 if (cfg->compile_aot) {
2794 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2796 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2797 mono_class_compute_gc_descriptor (klass);
2801 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2806 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2807 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2808 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2812 EMIT_NEW_ICONST (cfg, iargs [2], n);
2814 memcpy_method = get_memcpy_method ();
2815 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2820 get_memset_method (void)
2822 static MonoMethod *memset_method = NULL;
2823 if (!memset_method) {
2824 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2826 g_error ("Old corlib found. Install a new one");
2828 return memset_method;
2832 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2834 MonoInst *iargs [3];
2837 MonoMethod *memset_method;
2839 /* FIXME: Optimize this for the case when dest is an LDADDR */
2841 mono_class_init (klass);
2842 n = mono_class_value_size (klass, &align);
2844 if (n <= sizeof (gpointer) * 5) {
2845 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2848 memset_method = get_memset_method ();
2850 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2851 EMIT_NEW_ICONST (cfg, iargs [2], n);
2852 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2857 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2859 MonoInst *this = NULL;
2861 g_assert (cfg->generic_sharing_context);
2863 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2864 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2865 !method->klass->valuetype)
2866 EMIT_NEW_ARGLOAD (cfg, this, 0);
2868 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2869 MonoInst *mrgctx_loc, *mrgctx_var;
2872 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2874 mrgctx_loc = mono_get_vtable_var (cfg);
2875 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2878 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2879 MonoInst *vtable_loc, *vtable_var;
2883 vtable_loc = mono_get_vtable_var (cfg);
2884 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2886 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2887 MonoInst *mrgctx_var = vtable_var;
2890 vtable_reg = alloc_preg (cfg);
2891 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2892 vtable_var->type = STACK_PTR;
2898 int vtable_reg, res_reg;
2900 vtable_reg = alloc_preg (cfg);
2901 res_reg = alloc_preg (cfg);
2902 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2907 static MonoJumpInfoRgctxEntry *
2908 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2910 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2911 res->method = method;
2912 res->in_mrgctx = in_mrgctx;
2913 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2914 res->data->type = patch_type;
2915 res->data->data.target = patch_data;
2916 res->info_type = info_type;
2921 static inline MonoInst*
2922 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2924 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2928 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2929 MonoClass *klass, int rgctx_type)
2931 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2932 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2934 return emit_rgctx_fetch (cfg, rgctx, entry);
2938 * emit_get_rgctx_method:
2940 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2941 * normal constants, else emit a load from the rgctx.
2944 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2945 MonoMethod *cmethod, int rgctx_type)
2947 if (!context_used) {
2950 switch (rgctx_type) {
2951 case MONO_RGCTX_INFO_METHOD:
2952 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2954 case MONO_RGCTX_INFO_METHOD_RGCTX:
2955 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2958 g_assert_not_reached ();
2961 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2962 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2964 return emit_rgctx_fetch (cfg, rgctx, entry);
2969 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2970 MonoClassField *field, int rgctx_type)
2972 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2973 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2975 return emit_rgctx_fetch (cfg, rgctx, entry);
2979 * On return the caller must check @klass for load errors.
2982 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2984 MonoInst *vtable_arg;
2986 int context_used = 0;
2988 if (cfg->generic_sharing_context)
2989 context_used = mono_class_check_context_used (klass);
2992 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2993 klass, MONO_RGCTX_INFO_VTABLE);
2995 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2999 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3002 if (COMPILE_LLVM (cfg))
3003 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3005 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3006 #ifdef MONO_ARCH_VTABLE_REG
3007 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3008 cfg->uses_vtable_reg = TRUE;
3015 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3017 if (mini_get_debug_options ()->better_cast_details) {
3018 int to_klass_reg = alloc_preg (cfg);
3019 int vtable_reg = alloc_preg (cfg);
3020 int klass_reg = alloc_preg (cfg);
3021 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3024 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3028 MONO_ADD_INS (cfg->cbb, tls_get);
3029 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3030 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3032 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3033 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3034 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3039 reset_cast_details (MonoCompile *cfg)
3041 /* Reset the variables holding the cast details */
3042 if (mini_get_debug_options ()->better_cast_details) {
3043 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3045 MONO_ADD_INS (cfg->cbb, tls_get);
3046 /* It is enough to reset the from field */
3047 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3052 * On return the caller must check @array_class for load errors
3055 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3057 int vtable_reg = alloc_preg (cfg);
3058 int context_used = 0;
3060 if (cfg->generic_sharing_context)
3061 context_used = mono_class_check_context_used (array_class);
3063 save_cast_details (cfg, array_class, obj->dreg);
3065 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3067 if (cfg->opt & MONO_OPT_SHARED) {
3068 int class_reg = alloc_preg (cfg);
3069 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3070 if (cfg->compile_aot) {
3071 int klass_reg = alloc_preg (cfg);
3072 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3073 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3077 } else if (context_used) {
3078 MonoInst *vtable_ins;
3080 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3081 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3083 if (cfg->compile_aot) {
3087 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3089 vt_reg = alloc_preg (cfg);
3090 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3091 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3094 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3100 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3102 reset_cast_details (cfg);
3106 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3107 * generic code is generated.
3110 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3112 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3115 MonoInst *rgctx, *addr;
3117 /* FIXME: What if the class is shared? We might not
3118 have to get the address of the method from the
3120 addr = emit_get_rgctx_method (cfg, context_used, method,
3121 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3123 rgctx = emit_get_rgctx (cfg, method, context_used);
3125 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3127 return mono_emit_method_call (cfg, method, &val, NULL);
3132 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3136 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3137 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3138 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3139 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3141 obj_reg = sp [0]->dreg;
3142 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3143 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3145 /* FIXME: generics */
3146 g_assert (klass->rank == 0);
3149 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3150 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3152 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3153 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3156 MonoInst *element_class;
3158 /* This assertion is from the unboxcast insn */
3159 g_assert (klass->rank == 0);
3161 element_class = emit_get_rgctx_klass (cfg, context_used,
3162 klass->element_class, MONO_RGCTX_INFO_KLASS);
3164 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3165 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3167 save_cast_details (cfg, klass->element_class, obj_reg);
3168 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3169 reset_cast_details (cfg);
3172 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3173 MONO_ADD_INS (cfg->cbb, add);
3174 add->type = STACK_MP;
3181 * Returns NULL and set the cfg exception on error.
3184 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3186 MonoInst *iargs [2];
3192 MonoInst *iargs [2];
3195 FIXME: we cannot get managed_alloc here because we can't get
3196 the class's vtable (because it's not a closed class)
3198 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3199 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3202 if (cfg->opt & MONO_OPT_SHARED)
3203 rgctx_info = MONO_RGCTX_INFO_KLASS;
3205 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3206 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3208 if (cfg->opt & MONO_OPT_SHARED) {
3209 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3211 alloc_ftn = mono_object_new;
3214 alloc_ftn = mono_object_new_specific;
3217 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3220 if (cfg->opt & MONO_OPT_SHARED) {
3221 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3222 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3224 alloc_ftn = mono_object_new;
3225 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3226 /* This happens often in argument checking code, eg. throw new FooException... */
3227 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3228 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3229 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3231 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3232 MonoMethod *managed_alloc = NULL;
3236 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3237 cfg->exception_ptr = klass;
3241 #ifndef MONO_CROSS_COMPILE
3242 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3245 if (managed_alloc) {
3246 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3247 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3249 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3251 guint32 lw = vtable->klass->instance_size;
3252 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3253 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3254 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3257 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3261 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3265 * Returns NULL and set the cfg exception on error.
3268 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3270 MonoInst *alloc, *ins;
3272 if (mono_class_is_nullable (klass)) {
3273 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3276 /* FIXME: What if the class is shared? We might not
3277 have to get the method address from the RGCTX. */
3278 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3279 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3280 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3282 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3284 return mono_emit_method_call (cfg, method, &val, NULL);
3288 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3292 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3299 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3302 MonoGenericContainer *container;
3303 MonoGenericInst *ginst;
3305 if (klass->generic_class) {
3306 container = klass->generic_class->container_class->generic_container;
3307 ginst = klass->generic_class->context.class_inst;
3308 } else if (klass->generic_container && context_used) {
3309 container = klass->generic_container;
3310 ginst = container->context.class_inst;
3315 for (i = 0; i < container->type_argc; ++i) {
3317 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3319 type = ginst->type_argv [i];
3320 if (MONO_TYPE_IS_REFERENCE (type))
3323 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3329 // FIXME: This doesn't work yet (class libs tests fail?)
3330 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3333 * Returns NULL and set the cfg exception on error.
3336 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3338 MonoBasicBlock *is_null_bb;
3339 int obj_reg = src->dreg;
3340 int vtable_reg = alloc_preg (cfg);
3341 MonoInst *klass_inst = NULL;
3346 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3347 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3348 MonoInst *cache_ins;
3350 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3355 /* klass - it's the second element of the cache entry*/
3356 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3359 args [2] = cache_ins;
3361 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3364 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3366 if (is_complex_isinst (klass)) {
3367 /* Complex case, handle by an icall */
3373 args [1] = klass_inst;
3375 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3377 /* Simple case, handled by the code below */
3381 NEW_BBLOCK (cfg, is_null_bb);
3383 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3386 save_cast_details (cfg, klass, obj_reg);
3388 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3390 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3392 int klass_reg = alloc_preg (cfg);
3394 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3396 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3397 /* the remoting code is broken, access the class for now */
3398 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3399 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3401 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3402 cfg->exception_ptr = klass;
3405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3410 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3412 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3413 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3417 MONO_START_BB (cfg, is_null_bb);
3419 reset_cast_details (cfg);
3425 * Returns NULL and set the cfg exception on error.
3428 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3431 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3432 int obj_reg = src->dreg;
3433 int vtable_reg = alloc_preg (cfg);
3434 int res_reg = alloc_preg (cfg);
3435 MonoInst *klass_inst = NULL;
3440 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3441 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3442 MonoInst *cache_ins;
3444 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3449 /* klass - it's the second element of the cache entry*/
3450 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3453 args [2] = cache_ins;
3455 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3458 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3460 if (is_complex_isinst (klass)) {
3461 /* Complex case, handle by an icall */
3467 args [1] = klass_inst;
3469 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3471 /* Simple case, the code below can handle it */
3475 NEW_BBLOCK (cfg, is_null_bb);
3476 NEW_BBLOCK (cfg, false_bb);
3477 NEW_BBLOCK (cfg, end_bb);
3479 /* Do the assignment at the beginning, so the other assignment can be if converted */
3480 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3481 ins->type = STACK_OBJ;
3484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3485 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3487 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3489 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3490 g_assert (!context_used);
3491 /* the is_null_bb target simply copies the input register to the output */
3492 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3494 int klass_reg = alloc_preg (cfg);
3497 int rank_reg = alloc_preg (cfg);
3498 int eclass_reg = alloc_preg (cfg);
3500 g_assert (!context_used);
3501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3503 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3504 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3505 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3506 if (klass->cast_class == mono_defaults.object_class) {
3507 int parent_reg = alloc_preg (cfg);
3508 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3509 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3510 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3511 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3512 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3513 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3514 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3515 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3516 } else if (klass->cast_class == mono_defaults.enum_class) {
3517 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3519 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3520 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3522 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3523 /* Check that the object is a vector too */
3524 int bounds_reg = alloc_preg (cfg);
3525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3530 /* the is_null_bb target simply copies the input register to the output */
3531 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3533 } else if (mono_class_is_nullable (klass)) {
3534 g_assert (!context_used);
3535 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3536 /* the is_null_bb target simply copies the input register to the output */
3537 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3539 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3540 g_assert (!context_used);
3541 /* the remoting code is broken, access the class for now */
3542 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3543 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3545 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3546 cfg->exception_ptr = klass;
3549 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3554 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3557 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3558 /* the is_null_bb target simply copies the input register to the output */
3559 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3564 MONO_START_BB (cfg, false_bb);
3566 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3569 MONO_START_BB (cfg, is_null_bb);
3571 MONO_START_BB (cfg, end_bb);
3577 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3579 /* This opcode takes as input an object reference and a class, and returns:
3580 0) if the object is an instance of the class,
3581 1) if the object is not instance of the class,
3582 2) if the object is a proxy whose type cannot be determined */
3585 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3586 int obj_reg = src->dreg;
3587 int dreg = alloc_ireg (cfg);
3589 int klass_reg = alloc_preg (cfg);
3591 NEW_BBLOCK (cfg, true_bb);
3592 NEW_BBLOCK (cfg, false_bb);
3593 NEW_BBLOCK (cfg, false2_bb);
3594 NEW_BBLOCK (cfg, end_bb);
3595 NEW_BBLOCK (cfg, no_proxy_bb);
3597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3598 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3600 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3601 NEW_BBLOCK (cfg, interface_fail_bb);
3603 tmp_reg = alloc_preg (cfg);
3604 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3605 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3606 MONO_START_BB (cfg, interface_fail_bb);
3607 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3609 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3611 tmp_reg = alloc_preg (cfg);
3612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3614 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3616 tmp_reg = alloc_preg (cfg);
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3620 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3621 tmp_reg = alloc_preg (cfg);
3622 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3625 tmp_reg = alloc_preg (cfg);
3626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3627 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3630 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3633 MONO_START_BB (cfg, no_proxy_bb);
3635 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3638 MONO_START_BB (cfg, false_bb);
3640 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3643 MONO_START_BB (cfg, false2_bb);
3645 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3646 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3648 MONO_START_BB (cfg, true_bb);
3650 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3652 MONO_START_BB (cfg, end_bb);
3655 MONO_INST_NEW (cfg, ins, OP_ICONST);
3657 ins->type = STACK_I4;
3663 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3665 /* This opcode takes as input an object reference and a class, and returns:
3666 0) if the object is an instance of the class,
3667 1) if the object is a proxy whose type cannot be determined
3668 an InvalidCastException exception is thrown otherwhise*/
3671 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3672 int obj_reg = src->dreg;
3673 int dreg = alloc_ireg (cfg);
3674 int tmp_reg = alloc_preg (cfg);
3675 int klass_reg = alloc_preg (cfg);
3677 NEW_BBLOCK (cfg, end_bb);
3678 NEW_BBLOCK (cfg, ok_result_bb);
3680 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3681 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3683 save_cast_details (cfg, klass, obj_reg);
3685 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3686 NEW_BBLOCK (cfg, interface_fail_bb);
3688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3689 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3690 MONO_START_BB (cfg, interface_fail_bb);
3691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3693 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3695 tmp_reg = alloc_preg (cfg);
3696 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3698 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3700 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3701 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3704 NEW_BBLOCK (cfg, no_proxy_bb);
3706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3708 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3710 tmp_reg = alloc_preg (cfg);
3711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3714 tmp_reg = alloc_preg (cfg);
3715 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3716 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3717 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3719 NEW_BBLOCK (cfg, fail_1_bb);
3721 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3723 MONO_START_BB (cfg, fail_1_bb);
3725 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3726 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3728 MONO_START_BB (cfg, no_proxy_bb);
3730 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3733 MONO_START_BB (cfg, ok_result_bb);
3735 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3737 MONO_START_BB (cfg, end_bb);
3740 MONO_INST_NEW (cfg, ins, OP_ICONST);
3742 ins->type = STACK_I4;
3748 * Returns NULL and set the cfg exception on error.
3750 static G_GNUC_UNUSED MonoInst*
3751 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3755 gpointer *trampoline;
3756 MonoInst *obj, *method_ins, *tramp_ins;
3760 obj = handle_alloc (cfg, klass, FALSE, 0);
3764 /* Inline the contents of mono_delegate_ctor */
3766 /* Set target field */
3767 /* Optimize away setting of NULL target */
3768 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3769 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3770 if (cfg->gen_write_barriers) {
3771 dreg = alloc_preg (cfg);
3772 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3773 emit_write_barrier (cfg, ptr, target, 0);
3777 /* Set method field */
3778 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3779 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3780 if (cfg->gen_write_barriers) {
3781 dreg = alloc_preg (cfg);
3782 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3783 emit_write_barrier (cfg, ptr, method_ins, 0);
3786 * To avoid looking up the compiled code belonging to the target method
3787 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3788 * store it, and we fill it after the method has been compiled.
3790 if (!cfg->compile_aot && !method->dynamic) {
3791 MonoInst *code_slot_ins;
3794 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3796 domain = mono_domain_get ();
3797 mono_domain_lock (domain);
3798 if (!domain_jit_info (domain)->method_code_hash)
3799 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3800 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3802 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3803 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3805 mono_domain_unlock (domain);
3807 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3809 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3812 /* Set invoke_impl field */
3813 if (cfg->compile_aot) {
3814 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3816 trampoline = mono_create_delegate_trampoline (klass);
3817 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3819 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3821 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3827 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3829 MonoJitICallInfo *info;
3831 /* Need to register the icall so it gets an icall wrapper */
3832 info = mono_get_array_new_va_icall (rank);
3834 cfg->flags |= MONO_CFG_HAS_VARARGS;
3836 /* mono_array_new_va () needs a vararg calling convention */
3837 cfg->disable_llvm = TRUE;
3839 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3840 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3844 mono_emit_load_got_addr (MonoCompile *cfg)
3846 MonoInst *getaddr, *dummy_use;
3848 if (!cfg->got_var || cfg->got_var_allocated)
3851 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3852 getaddr->dreg = cfg->got_var->dreg;
3854 /* Add it to the start of the first bblock */
3855 if (cfg->bb_entry->code) {
3856 getaddr->next = cfg->bb_entry->code;
3857 cfg->bb_entry->code = getaddr;
3860 MONO_ADD_INS (cfg->bb_entry, getaddr);
3862 cfg->got_var_allocated = TRUE;
3865 * Add a dummy use to keep the got_var alive, since real uses might
3866 * only be generated by the back ends.
3867 * Add it to end_bblock, so the variable's lifetime covers the whole
3869 * It would be better to make the usage of the got var explicit in all
3870 * cases when the backend needs it (i.e. calls, throw etc.), so this
3871 * wouldn't be needed.
3873 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3874 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3877 static int inline_limit;
3878 static gboolean inline_limit_inited;
3881 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3883 MonoMethodHeaderSummary header;
3885 #ifdef MONO_ARCH_SOFT_FLOAT
3886 MonoMethodSignature *sig = mono_method_signature (method);
3890 if (cfg->generic_sharing_context)
3893 if (cfg->inline_depth > 10)
3896 #ifdef MONO_ARCH_HAVE_LMF_OPS
3897 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3898 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3899 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3904 if (!mono_method_get_header_summary (method, &header))
3907 /*runtime, icall and pinvoke are checked by summary call*/
3908 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3909 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3910 (method->klass->marshalbyref) ||
3914 /* also consider num_locals? */
3915 /* Do the size check early to avoid creating vtables */
3916 if (!inline_limit_inited) {
3917 if (getenv ("MONO_INLINELIMIT"))
3918 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3920 inline_limit = INLINE_LENGTH_LIMIT;
3921 inline_limit_inited = TRUE;
3923 if (header.code_size >= inline_limit)
3927 * if we can initialize the class of the method right away, we do,
3928 * otherwise we don't allow inlining if the class needs initialization,
3929 * since it would mean inserting a call to mono_runtime_class_init()
3930 * inside the inlined code
3932 if (!(cfg->opt & MONO_OPT_SHARED)) {
3933 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3934 if (cfg->run_cctors && method->klass->has_cctor) {
3935 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3936 if (!method->klass->runtime_info)
3937 /* No vtable created yet */
3939 vtable = mono_class_vtable (cfg->domain, method->klass);
3942 /* This makes so that inline cannot trigger */
3943 /* .cctors: too many apps depend on them */
3944 /* running with a specific order... */
3945 if (! vtable->initialized)
3947 mono_runtime_class_init (vtable);
3949 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3950 if (!method->klass->runtime_info)
3951 /* No vtable created yet */
3953 vtable = mono_class_vtable (cfg->domain, method->klass);
3956 if (!vtable->initialized)
3961 * If we're compiling for shared code
3962 * the cctor will need to be run at aot method load time, for example,
3963 * or at the end of the compilation of the inlining method.
3965 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3970 * CAS - do not inline methods with declarative security
3971 * Note: this has to be before any possible return TRUE;
3973 if (mono_method_has_declsec (method))
3976 #ifdef MONO_ARCH_SOFT_FLOAT
3978 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3980 for (i = 0; i < sig->param_count; ++i)
3981 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3989 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3991 if (vtable->initialized && !cfg->compile_aot)
3994 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3997 if (!mono_class_needs_cctor_run (vtable->klass, method))
4000 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4001 /* The initialization is already done before the method is called */
4008 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4012 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4014 mono_class_init (klass);
4015 size = mono_class_array_element_size (klass);
4017 mult_reg = alloc_preg (cfg);
4018 array_reg = arr->dreg;
4019 index_reg = index->dreg;
4021 #if SIZEOF_REGISTER == 8
4022 /* The array reg is 64 bits but the index reg is only 32 */
4023 if (COMPILE_LLVM (cfg)) {
4025 index2_reg = index_reg;
4027 index2_reg = alloc_preg (cfg);
4028 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4031 if (index->type == STACK_I8) {
4032 index2_reg = alloc_preg (cfg);
4033 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4035 index2_reg = index_reg;
4040 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4042 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4043 if (size == 1 || size == 2 || size == 4 || size == 8) {
4044 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4046 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4047 ins->type = STACK_PTR;
4053 add_reg = alloc_preg (cfg);
4055 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4056 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4057 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4058 ins->type = STACK_PTR;
4059 MONO_ADD_INS (cfg->cbb, ins);
4064 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4066 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4068 int bounds_reg = alloc_preg (cfg);
4069 int add_reg = alloc_preg (cfg);
4070 int mult_reg = alloc_preg (cfg);
4071 int mult2_reg = alloc_preg (cfg);
4072 int low1_reg = alloc_preg (cfg);
4073 int low2_reg = alloc_preg (cfg);
4074 int high1_reg = alloc_preg (cfg);
4075 int high2_reg = alloc_preg (cfg);
4076 int realidx1_reg = alloc_preg (cfg);
4077 int realidx2_reg = alloc_preg (cfg);
4078 int sum_reg = alloc_preg (cfg);
4083 mono_class_init (klass);
4084 size = mono_class_array_element_size (klass);
4086 index1 = index_ins1->dreg;
4087 index2 = index_ins2->dreg;
4089 /* range checking */
4090 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4091 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4093 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4094 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4095 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4096 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4097 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4098 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4099 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4101 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4102 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4103 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4104 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4105 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4106 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4107 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4109 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4110 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4111 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4112 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4113 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4115 ins->type = STACK_MP;
4117 MONO_ADD_INS (cfg->cbb, ins);
4124 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4128 MonoMethod *addr_method;
4131 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4134 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4136 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4137 /* emit_ldelema_2 depends on OP_LMUL */
4138 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4139 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4143 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4144 addr_method = mono_marshal_get_array_address (rank, element_size);
4145 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4150 static MonoBreakPolicy
4151 always_insert_breakpoint (MonoMethod *method)
4153 return MONO_BREAK_POLICY_ALWAYS;
4156 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4159 * mono_set_break_policy:
4160 * policy_callback: the new callback function
4162 * Allow embedders to decide wherther to actually obey breakpoint instructions
4163 * (both break IL instructions and Debugger.Break () method calls), for example
4164 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4165 * untrusted or semi-trusted code.
4167 * @policy_callback will be called every time a break point instruction needs to
4168 * be inserted with the method argument being the method that calls Debugger.Break()
4169 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4170 * if it wants the breakpoint to not be effective in the given method.
4171 * #MONO_BREAK_POLICY_ALWAYS is the default.
4174 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4176 if (policy_callback)
4177 break_policy_func = policy_callback;
4179 break_policy_func = always_insert_breakpoint;
4183 should_insert_brekpoint (MonoMethod *method) {
4184 switch (break_policy_func (method)) {
4185 case MONO_BREAK_POLICY_ALWAYS:
4187 case MONO_BREAK_POLICY_NEVER:
4189 case MONO_BREAK_POLICY_ON_DBG:
4190 return mono_debug_using_mono_debugger ();
4192 g_warning ("Incorrect value returned from break policy callback");
4197 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4199 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4201 MonoInst *addr, *store, *load;
4202 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4204 /* the bounds check is already done by the callers */
4205 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4207 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4208 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4210 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4211 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4217 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4219 MonoInst *ins = NULL;
4220 #ifdef MONO_ARCH_SIMD_INTRINSICS
4221 if (cfg->opt & MONO_OPT_SIMD) {
4222 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4232 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4234 MonoInst *ins = NULL;
4236 static MonoClass *runtime_helpers_class = NULL;
4237 if (! runtime_helpers_class)
4238 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4239 "System.Runtime.CompilerServices", "RuntimeHelpers");
4241 if (cmethod->klass == mono_defaults.string_class) {
4242 if (strcmp (cmethod->name, "get_Chars") == 0) {
4243 int dreg = alloc_ireg (cfg);
4244 int index_reg = alloc_preg (cfg);
4245 int mult_reg = alloc_preg (cfg);
4246 int add_reg = alloc_preg (cfg);
4248 #if SIZEOF_REGISTER == 8
4249 /* The array reg is 64 bits but the index reg is only 32 */
4250 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4252 index_reg = args [1]->dreg;
4254 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4256 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4257 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4258 add_reg = ins->dreg;
4259 /* Avoid a warning */
4261 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4265 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4266 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4267 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4269 type_from_op (ins, NULL, NULL);
4271 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4272 int dreg = alloc_ireg (cfg);
4273 /* Decompose later to allow more optimizations */
4274 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4275 ins->type = STACK_I4;
4276 ins->flags |= MONO_INST_FAULT;
4277 cfg->cbb->has_array_access = TRUE;
4278 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4281 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4282 int mult_reg = alloc_preg (cfg);
4283 int add_reg = alloc_preg (cfg);
4285 /* The corlib functions check for oob already. */
4286 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4287 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4288 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4289 return cfg->cbb->last_ins;
4292 } else if (cmethod->klass == mono_defaults.object_class) {
4294 if (strcmp (cmethod->name, "GetType") == 0) {
4295 int dreg = alloc_preg (cfg);
4296 int vt_reg = alloc_preg (cfg);
4297 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4298 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4299 type_from_op (ins, NULL, NULL);
4302 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4303 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4304 int dreg = alloc_ireg (cfg);
4305 int t1 = alloc_ireg (cfg);
4307 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4308 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4309 ins->type = STACK_I4;
4313 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4314 MONO_INST_NEW (cfg, ins, OP_NOP);
4315 MONO_ADD_INS (cfg->cbb, ins);
4319 } else if (cmethod->klass == mono_defaults.array_class) {
4320 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4321 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4323 #ifndef MONO_BIG_ARRAYS
4325 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4328 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4329 int dreg = alloc_ireg (cfg);
4330 int bounds_reg = alloc_ireg (cfg);
4331 MonoBasicBlock *end_bb, *szarray_bb;
4332 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4334 NEW_BBLOCK (cfg, end_bb);
4335 NEW_BBLOCK (cfg, szarray_bb);
4337 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4338 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4339 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4340 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4341 /* Non-szarray case */
4343 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4344 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4346 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4347 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4349 MONO_START_BB (cfg, szarray_bb);
4352 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4353 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4355 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4356 MONO_START_BB (cfg, end_bb);
4358 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4359 ins->type = STACK_I4;
4365 if (cmethod->name [0] != 'g')
4368 if (strcmp (cmethod->name, "get_Rank") == 0) {
4369 int dreg = alloc_ireg (cfg);
4370 int vtable_reg = alloc_preg (cfg);
4371 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4372 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4373 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4374 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4375 type_from_op (ins, NULL, NULL);
4378 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4379 int dreg = alloc_ireg (cfg);
4381 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4382 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4383 type_from_op (ins, NULL, NULL);
4388 } else if (cmethod->klass == runtime_helpers_class) {
4390 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4391 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4395 } else if (cmethod->klass == mono_defaults.thread_class) {
4396 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4397 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4398 MONO_ADD_INS (cfg->cbb, ins);
4400 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4401 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4402 MONO_ADD_INS (cfg->cbb, ins);
4405 } else if (cmethod->klass == mono_defaults.monitor_class) {
4406 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4407 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4410 if (COMPILE_LLVM (cfg)) {
4412 * Pass the argument normally, the LLVM backend will handle the
4413 * calling convention problems.
4415 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4417 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4418 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4419 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4420 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4423 return (MonoInst*)call;
4424 } else if (strcmp (cmethod->name, "Exit") == 0) {
4427 if (COMPILE_LLVM (cfg)) {
4428 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4430 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4431 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4432 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4433 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4436 return (MonoInst*)call;
4438 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4439 MonoMethod *fast_method = NULL;
4441 /* Avoid infinite recursion */
4442 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4443 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4444 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4447 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4448 strcmp (cmethod->name, "Exit") == 0)
4449 fast_method = mono_monitor_get_fast_path (cmethod);
4453 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4455 } else if (cmethod->klass->image == mono_defaults.corlib &&
4456 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4457 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4460 #if SIZEOF_REGISTER == 8
4461 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4462 /* 64 bit reads are already atomic */
4463 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4464 ins->dreg = mono_alloc_preg (cfg);
4465 ins->inst_basereg = args [0]->dreg;
4466 ins->inst_offset = 0;
4467 MONO_ADD_INS (cfg->cbb, ins);
4471 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4472 if (strcmp (cmethod->name, "Increment") == 0) {
4473 MonoInst *ins_iconst;
4476 if (fsig->params [0]->type == MONO_TYPE_I4)
4477 opcode = OP_ATOMIC_ADD_NEW_I4;
4478 #if SIZEOF_REGISTER == 8
4479 else if (fsig->params [0]->type == MONO_TYPE_I8)
4480 opcode = OP_ATOMIC_ADD_NEW_I8;
4483 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4484 ins_iconst->inst_c0 = 1;
4485 ins_iconst->dreg = mono_alloc_ireg (cfg);
4486 MONO_ADD_INS (cfg->cbb, ins_iconst);
4488 MONO_INST_NEW (cfg, ins, opcode);
4489 ins->dreg = mono_alloc_ireg (cfg);
4490 ins->inst_basereg = args [0]->dreg;
4491 ins->inst_offset = 0;
4492 ins->sreg2 = ins_iconst->dreg;
4493 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4494 MONO_ADD_INS (cfg->cbb, ins);
4496 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4497 MonoInst *ins_iconst;
4500 if (fsig->params [0]->type == MONO_TYPE_I4)
4501 opcode = OP_ATOMIC_ADD_NEW_I4;
4502 #if SIZEOF_REGISTER == 8
4503 else if (fsig->params [0]->type == MONO_TYPE_I8)
4504 opcode = OP_ATOMIC_ADD_NEW_I8;
4507 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4508 ins_iconst->inst_c0 = -1;
4509 ins_iconst->dreg = mono_alloc_ireg (cfg);
4510 MONO_ADD_INS (cfg->cbb, ins_iconst);
4512 MONO_INST_NEW (cfg, ins, opcode);
4513 ins->dreg = mono_alloc_ireg (cfg);
4514 ins->inst_basereg = args [0]->dreg;
4515 ins->inst_offset = 0;
4516 ins->sreg2 = ins_iconst->dreg;
4517 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4518 MONO_ADD_INS (cfg->cbb, ins);
4520 } else if (strcmp (cmethod->name, "Add") == 0) {
4523 if (fsig->params [0]->type == MONO_TYPE_I4)
4524 opcode = OP_ATOMIC_ADD_NEW_I4;
4525 #if SIZEOF_REGISTER == 8
4526 else if (fsig->params [0]->type == MONO_TYPE_I8)
4527 opcode = OP_ATOMIC_ADD_NEW_I8;
4531 MONO_INST_NEW (cfg, ins, opcode);
4532 ins->dreg = mono_alloc_ireg (cfg);
4533 ins->inst_basereg = args [0]->dreg;
4534 ins->inst_offset = 0;
4535 ins->sreg2 = args [1]->dreg;
4536 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4537 MONO_ADD_INS (cfg->cbb, ins);
4540 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4542 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4543 if (strcmp (cmethod->name, "Exchange") == 0) {
4545 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4547 if (fsig->params [0]->type == MONO_TYPE_I4)
4548 opcode = OP_ATOMIC_EXCHANGE_I4;
4549 #if SIZEOF_REGISTER == 8
4550 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4551 (fsig->params [0]->type == MONO_TYPE_I))
4552 opcode = OP_ATOMIC_EXCHANGE_I8;
4554 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4555 opcode = OP_ATOMIC_EXCHANGE_I4;
4560 MONO_INST_NEW (cfg, ins, opcode);
4561 ins->dreg = mono_alloc_ireg (cfg);
4562 ins->inst_basereg = args [0]->dreg;
4563 ins->inst_offset = 0;
4564 ins->sreg2 = args [1]->dreg;
4565 MONO_ADD_INS (cfg->cbb, ins);
4567 switch (fsig->params [0]->type) {
4569 ins->type = STACK_I4;
4573 ins->type = STACK_I8;
4575 case MONO_TYPE_OBJECT:
4576 ins->type = STACK_OBJ;
4579 g_assert_not_reached ();
4582 if (cfg->gen_write_barriers && is_ref)
4583 emit_write_barrier (cfg, args [0], args [1], -1);
4585 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4587 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4588 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4590 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4591 if (fsig->params [1]->type == MONO_TYPE_I4)
4593 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4594 size = sizeof (gpointer);
4595 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4598 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4599 ins->dreg = alloc_ireg (cfg);
4600 ins->sreg1 = args [0]->dreg;
4601 ins->sreg2 = args [1]->dreg;
4602 ins->sreg3 = args [2]->dreg;
4603 ins->type = STACK_I4;
4604 MONO_ADD_INS (cfg->cbb, ins);
4605 } else if (size == 8) {
4606 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4607 ins->dreg = alloc_ireg (cfg);
4608 ins->sreg1 = args [0]->dreg;
4609 ins->sreg2 = args [1]->dreg;
4610 ins->sreg3 = args [2]->dreg;
4611 ins->type = STACK_I8;
4612 MONO_ADD_INS (cfg->cbb, ins);
4614 /* g_assert_not_reached (); */
4616 if (cfg->gen_write_barriers && is_ref)
4617 emit_write_barrier (cfg, args [0], args [1], -1);
4619 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4623 } else if (cmethod->klass->image == mono_defaults.corlib) {
4624 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4625 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4626 if (should_insert_brekpoint (cfg->method))
4627 MONO_INST_NEW (cfg, ins, OP_BREAK);
4629 MONO_INST_NEW (cfg, ins, OP_NOP);
4630 MONO_ADD_INS (cfg->cbb, ins);
4633 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4634 && strcmp (cmethod->klass->name, "Environment") == 0) {
4636 EMIT_NEW_ICONST (cfg, ins, 1);
4638 EMIT_NEW_ICONST (cfg, ins, 0);
4642 } else if (cmethod->klass == mono_defaults.math_class) {
4644 * There is general branches code for Min/Max, but it does not work for
4646 * http://everything2.com/?node_id=1051618
4650 #ifdef MONO_ARCH_SIMD_INTRINSICS
4651 if (cfg->opt & MONO_OPT_SIMD) {
4652 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4658 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4662 * This entry point could be used later for arbitrary method
4665 inline static MonoInst*
4666 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4667 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4669 if (method->klass == mono_defaults.string_class) {
4670 /* managed string allocation support */
4671 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4672 MonoInst *iargs [2];
4673 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4674 MonoMethod *managed_alloc = NULL;
4676 g_assert (vtable); /*Should not fail since it System.String*/
4677 #ifndef MONO_CROSS_COMPILE
4678 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4682 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4683 iargs [1] = args [0];
4684 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4691 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4693 MonoInst *store, *temp;
4696 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4697 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4700 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4701 * would be different than the MonoInst's used to represent arguments, and
4702 * the ldelema implementation can't deal with that.
4703 * Solution: When ldelema is used on an inline argument, create a var for
4704 * it, emit ldelema on that var, and emit the saving code below in
4705 * inline_method () if needed.
4707 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4708 cfg->args [i] = temp;
4709 /* This uses cfg->args [i] which is set by the preceeding line */
4710 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4711 store->cil_code = sp [0]->cil_code;
4716 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4717 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4719 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4721 check_inline_called_method_name_limit (MonoMethod *called_method)
4724 static char *limit = NULL;
4726 if (limit == NULL) {
4727 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4729 if (limit_string != NULL)
4730 limit = limit_string;
4732 limit = (char *) "";
4735 if (limit [0] != '\0') {
4736 char *called_method_name = mono_method_full_name (called_method, TRUE);
4738 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4739 g_free (called_method_name);
4741 //return (strncmp_result <= 0);
4742 return (strncmp_result == 0);
4749 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4751 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4754 static char *limit = NULL;
4756 if (limit == NULL) {
4757 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4758 if (limit_string != NULL) {
4759 limit = limit_string;
4761 limit = (char *) "";
4765 if (limit [0] != '\0') {
4766 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4768 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4769 g_free (caller_method_name);
4771 //return (strncmp_result <= 0);
4772 return (strncmp_result == 0);
4780 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4781 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4783 MonoInst *ins, *rvar = NULL;
4784 MonoMethodHeader *cheader;
4785 MonoBasicBlock *ebblock, *sbblock;
4787 MonoMethod *prev_inlined_method;
4788 MonoInst **prev_locals, **prev_args;
4789 MonoType **prev_arg_types;
4790 guint prev_real_offset;
4791 GHashTable *prev_cbb_hash;
4792 MonoBasicBlock **prev_cil_offset_to_bb;
4793 MonoBasicBlock *prev_cbb;
4794 unsigned char* prev_cil_start;
4795 guint32 prev_cil_offset_to_bb_len;
4796 MonoMethod *prev_current_method;
4797 MonoGenericContext *prev_generic_context;
4798 gboolean ret_var_set, prev_ret_var_set;
4800 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4802 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4803 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4806 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4807 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4811 if (cfg->verbose_level > 2)
4812 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4814 if (!cmethod->inline_info) {
4815 mono_jit_stats.inlineable_methods++;
4816 cmethod->inline_info = 1;
4819 /* allocate local variables */
4820 cheader = mono_method_get_header (cmethod);
4822 if (cheader == NULL || mono_loader_get_last_error ()) {
4823 MonoLoaderError *error = mono_loader_get_last_error ();
4826 mono_metadata_free_mh (cheader);
4827 if (inline_always && error)
4828 mono_cfg_set_exception (cfg, error->exception_type);
4830 mono_loader_clear_error ();
4834 /*Must verify before creating locals as it can cause the JIT to assert.*/
4835 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4836 mono_metadata_free_mh (cheader);
4840 /* allocate space to store the return value */
4841 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4842 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4846 prev_locals = cfg->locals;
4847 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4848 for (i = 0; i < cheader->num_locals; ++i)
4849 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4851 /* allocate start and end blocks */
4852 /* This is needed so if the inline is aborted, we can clean up */
4853 NEW_BBLOCK (cfg, sbblock);
4854 sbblock->real_offset = real_offset;
4856 NEW_BBLOCK (cfg, ebblock);
4857 ebblock->block_num = cfg->num_bblocks++;
4858 ebblock->real_offset = real_offset;
4860 prev_args = cfg->args;
4861 prev_arg_types = cfg->arg_types;
4862 prev_inlined_method = cfg->inlined_method;
4863 cfg->inlined_method = cmethod;
4864 cfg->ret_var_set = FALSE;
4865 cfg->inline_depth ++;
4866 prev_real_offset = cfg->real_offset;
4867 prev_cbb_hash = cfg->cbb_hash;
4868 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4869 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4870 prev_cil_start = cfg->cil_start;
4871 prev_cbb = cfg->cbb;
4872 prev_current_method = cfg->current_method;
4873 prev_generic_context = cfg->generic_context;
4874 prev_ret_var_set = cfg->ret_var_set;
4876 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4878 ret_var_set = cfg->ret_var_set;
4880 cfg->inlined_method = prev_inlined_method;
4881 cfg->real_offset = prev_real_offset;
4882 cfg->cbb_hash = prev_cbb_hash;
4883 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4884 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4885 cfg->cil_start = prev_cil_start;
4886 cfg->locals = prev_locals;
4887 cfg->args = prev_args;
4888 cfg->arg_types = prev_arg_types;
4889 cfg->current_method = prev_current_method;
4890 cfg->generic_context = prev_generic_context;
4891 cfg->ret_var_set = prev_ret_var_set;
4892 cfg->inline_depth --;
4894 if ((costs >= 0 && costs < 60) || inline_always) {
4895 if (cfg->verbose_level > 2)
4896 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4898 mono_jit_stats.inlined_methods++;
4900 /* always add some code to avoid block split failures */
4901 MONO_INST_NEW (cfg, ins, OP_NOP);
4902 MONO_ADD_INS (prev_cbb, ins);
4904 prev_cbb->next_bb = sbblock;
4905 link_bblock (cfg, prev_cbb, sbblock);
4908 * Get rid of the begin and end bblocks if possible to aid local
4911 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4913 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4914 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4916 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4917 MonoBasicBlock *prev = ebblock->in_bb [0];
4918 mono_merge_basic_blocks (cfg, prev, ebblock);
4920 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4921 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4922 cfg->cbb = prev_cbb;
4930 * If the inlined method contains only a throw, then the ret var is not
4931 * set, so set it to a dummy value.
4934 static double r8_0 = 0.0;
4936 switch (rvar->type) {
4938 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4941 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4946 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4949 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4950 ins->type = STACK_R8;
4951 ins->inst_p0 = (void*)&r8_0;
4952 ins->dreg = rvar->dreg;
4953 MONO_ADD_INS (cfg->cbb, ins);
4956 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4959 g_assert_not_reached ();
4963 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4966 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4969 if (cfg->verbose_level > 2)
4970 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4971 cfg->exception_type = MONO_EXCEPTION_NONE;
4972 mono_loader_clear_error ();
4974 /* This gets rid of the newly added bblocks */
4975 cfg->cbb = prev_cbb;
4977 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4982 * Some of these comments may well be out-of-date.
4983 * Design decisions: we do a single pass over the IL code (and we do bblock
4984 * splitting/merging in the few cases when it's required: a back jump to an IL
4985 * address that was not already seen as bblock starting point).
4986 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4987 * Complex operations are decomposed in simpler ones right away. We need to let the
4988 * arch-specific code peek and poke inside this process somehow (except when the
4989 * optimizations can take advantage of the full semantic info of coarse opcodes).
4990 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4991 * MonoInst->opcode initially is the IL opcode or some simplification of that
4992 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4993 * opcode with value bigger than OP_LAST.
4994 * At this point the IR can be handed over to an interpreter, a dumb code generator
4995 * or to the optimizing code generator that will translate it to SSA form.
4997 * Profiling directed optimizations.
4998 * We may compile by default with few or no optimizations and instrument the code
4999 * or the user may indicate what methods to optimize the most either in a config file
5000 * or through repeated runs where the compiler applies offline the optimizations to
5001 * each method and then decides if it was worth it.
5004 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5005 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5006 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5007 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5008 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5009 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5010 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5011 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5013 /* offset from br.s -> br like opcodes */
5014 #define BIG_BRANCH_OFFSET 13
5017 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5019 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5021 return b == NULL || b == bb;
5025 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5027 unsigned char *ip = start;
5028 unsigned char *target;
5031 MonoBasicBlock *bblock;
5032 const MonoOpcode *opcode;
5035 cli_addr = ip - start;
5036 i = mono_opcode_value ((const guint8 **)&ip, end);
5039 opcode = &mono_opcodes [i];
5040 switch (opcode->argument) {
5041 case MonoInlineNone:
5044 case MonoInlineString:
5045 case MonoInlineType:
5046 case MonoInlineField:
5047 case MonoInlineMethod:
5050 case MonoShortInlineR:
5057 case MonoShortInlineVar:
5058 case MonoShortInlineI:
5061 case MonoShortInlineBrTarget:
5062 target = start + cli_addr + 2 + (signed char)ip [1];
5063 GET_BBLOCK (cfg, bblock, target);
5066 GET_BBLOCK (cfg, bblock, ip);
5068 case MonoInlineBrTarget:
5069 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5070 GET_BBLOCK (cfg, bblock, target);
5073 GET_BBLOCK (cfg, bblock, ip);
5075 case MonoInlineSwitch: {
5076 guint32 n = read32 (ip + 1);
5079 cli_addr += 5 + 4 * n;
5080 target = start + cli_addr;
5081 GET_BBLOCK (cfg, bblock, target);
5083 for (j = 0; j < n; ++j) {
5084 target = start + cli_addr + (gint32)read32 (ip);
5085 GET_BBLOCK (cfg, bblock, target);
5095 g_assert_not_reached ();
5098 if (i == CEE_THROW) {
5099 unsigned char *bb_start = ip - 1;
5101 /* Find the start of the bblock containing the throw */
5103 while ((bb_start >= start) && !bblock) {
5104 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5108 bblock->out_of_line = 1;
5117 static inline MonoMethod *
5118 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5122 if (m->wrapper_type != MONO_WRAPPER_NONE)
5123 return mono_method_get_wrapper_data (m, token);
5125 method = mono_get_method_full (m->klass->image, token, klass, context);
5130 static inline MonoMethod *
5131 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5133 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5135 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5141 static inline MonoClass*
5142 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5146 if (method->wrapper_type != MONO_WRAPPER_NONE)
5147 klass = mono_method_get_wrapper_data (method, token);
5149 klass = mono_class_get_full (method->klass->image, token, context);
5151 mono_class_init (klass);
5156 * Returns TRUE if the JIT should abort inlining because "callee"
5157 * is influenced by security attributes.
5160 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5164 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5168 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5169 if (result == MONO_JIT_SECURITY_OK)
5172 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5173 /* Generate code to throw a SecurityException before the actual call/link */
5174 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5177 NEW_ICONST (cfg, args [0], 4);
5178 NEW_METHODCONST (cfg, args [1], caller);
5179 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5180 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5181 /* don't hide previous results */
5182 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5183 cfg->exception_data = result;
5191 throw_exception (void)
5193 static MonoMethod *method = NULL;
5196 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5197 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5204 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5206 MonoMethod *thrower = throw_exception ();
5209 EMIT_NEW_PCONST (cfg, args [0], ex);
5210 mono_emit_method_call (cfg, thrower, args, NULL);
5214 * Return the original method is a wrapper is specified. We can only access
5215 * the custom attributes from the original method.
5218 get_original_method (MonoMethod *method)
5220 if (method->wrapper_type == MONO_WRAPPER_NONE)
5223 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5224 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5227 /* in other cases we need to find the original method */
5228 return mono_marshal_method_from_wrapper (method);
5232 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5233 MonoBasicBlock *bblock, unsigned char *ip)
5235 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5236 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5238 emit_throw_exception (cfg, ex);
5242 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5243 MonoBasicBlock *bblock, unsigned char *ip)
5245 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5246 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5248 emit_throw_exception (cfg, ex);
5252 * Check that the IL instructions at ip are the array initialization
5253 * sequence and return the pointer to the data and the size.
5256 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5259 * newarr[System.Int32]
5261 * ldtoken field valuetype ...
5262 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5264 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5265 guint32 token = read32 (ip + 7);
5266 guint32 field_token = read32 (ip + 2);
5267 guint32 field_index = field_token & 0xffffff;
5269 const char *data_ptr;
5271 MonoMethod *cmethod;
5272 MonoClass *dummy_class;
5273 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5279 *out_field_token = field_token;
5281 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5284 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5286 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5287 case MONO_TYPE_BOOLEAN:
5291 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5292 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5293 case MONO_TYPE_CHAR:
5303 return NULL; /* stupid ARM FP swapped format */
5313 if (size > mono_type_size (field->type, &dummy_align))
5316 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5317 if (!method->klass->image->dynamic) {
5318 field_index = read32 (ip + 2) & 0xffffff;
5319 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5320 data_ptr = mono_image_rva_map (method->klass->image, rva);
5321 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5322 /* for aot code we do the lookup on load */
5323 if (aot && data_ptr)
5324 return GUINT_TO_POINTER (rva);
5326 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5328 data_ptr = mono_field_get_data (field);
5336 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5338 char *method_fname = mono_method_full_name (method, TRUE);
5340 MonoMethodHeader *header = mono_method_get_header (method);
5342 if (header->code_size == 0)
5343 method_code = g_strdup ("method body is empty.");
5345 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5346 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5347 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5348 g_free (method_fname);
5349 g_free (method_code);
5350 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5354 set_exception_object (MonoCompile *cfg, MonoException *exception)
5356 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5357 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5358 cfg->exception_ptr = exception;
5362 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5366 if (cfg->generic_sharing_context)
5367 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5369 type = &klass->byval_arg;
5370 return MONO_TYPE_IS_REFERENCE (type);
5374 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5377 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5378 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5379 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5380 /* Optimize reg-reg moves away */
5382 * Can't optimize other opcodes, since sp[0] might point to
5383 * the last ins of a decomposed opcode.
5385 sp [0]->dreg = (cfg)->locals [n]->dreg;
5387 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5392 * ldloca inhibits many optimizations so try to get rid of it in common
5395 static inline unsigned char *
5396 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5405 local = read16 (ip + 2);
5409 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5410 gboolean skip = FALSE;
5412 /* From the INITOBJ case */
5413 token = read32 (ip + 2);
5414 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5415 CHECK_TYPELOAD (klass);
5416 if (generic_class_is_reference_type (cfg, klass)) {
5417 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5418 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5419 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5420 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5421 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5434 is_exception_class (MonoClass *class)
5437 if (class == mono_defaults.exception_class)
5439 class = class->parent;
5445 * is_jit_optimizer_disabled:
5447 * Determine whenever M's assembly has a DebuggableAttribute with the
5448 * IsJITOptimizerDisabled flag set.
5451 is_jit_optimizer_disabled (MonoMethod *m)
5453 MonoAssembly *ass = m->klass->image->assembly;
5454 MonoCustomAttrInfo* attrs;
5455 static MonoClass *klass;
5457 gboolean val = FALSE;
5460 if (ass->jit_optimizer_disabled_inited)
5461 return ass->jit_optimizer_disabled;
5463 klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5465 attrs = mono_custom_attrs_from_assembly (ass);
5467 for (i = 0; i < attrs->num_attrs; ++i) {
5468 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5471 MonoMethodSignature *sig;
5473 if (!attr->ctor || attr->ctor->klass != klass)
5475 /* Decode the attribute. See reflection.c */
5476 len = attr->data_size;
5477 p = (const char*)attr->data;
5478 g_assert (read16 (p) == 0x0001);
5481 // FIXME: Support named parameters
5482 sig = mono_method_signature (attr->ctor);
5483 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5485 /* Two boolean arguments */
5489 mono_custom_attrs_free (attrs);
5492 ass->jit_optimizer_disabled = val;
5493 mono_memory_barrier ();
5494 ass->jit_optimizer_disabled_inited = TRUE;
5500 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5502 gboolean supported_tail_call;
5505 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5506 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5508 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5511 for (i = 0; i < fsig->param_count; ++i) {
5512 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5513 /* These can point to the current method's stack */
5514 supported_tail_call = FALSE;
5516 if (fsig->hasthis && cmethod->klass->valuetype)
5517 /* this might point to the current method's stack */
5518 supported_tail_call = FALSE;
5519 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5520 supported_tail_call = FALSE;
5521 if (cfg->method->save_lmf)
5522 supported_tail_call = FALSE;
5523 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5524 supported_tail_call = FALSE;
5526 /* Debugging support */
5528 if (supported_tail_call) {
5529 static int count = 0;
5531 if (getenv ("COUNT")) {
5532 if (count == atoi (getenv ("COUNT")))
5533 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5534 if (count > atoi (getenv ("COUNT")))
5535 supported_tail_call = FALSE;
5540 return supported_tail_call;
5544 * mono_method_to_ir:
5546 * Translate the .net IL into linear IR.
5549 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5550 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5551 guint inline_offset, gboolean is_virtual_call)
5554 MonoInst *ins, **sp, **stack_start;
5555 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5556 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5557 MonoMethod *cmethod, *method_definition;
5558 MonoInst **arg_array;
5559 MonoMethodHeader *header;
5561 guint32 token, ins_flag;
5563 MonoClass *constrained_call = NULL;
5564 unsigned char *ip, *end, *target, *err_pos;
5565 static double r8_0 = 0.0;
5566 MonoMethodSignature *sig;
5567 MonoGenericContext *generic_context = NULL;
5568 MonoGenericContainer *generic_container = NULL;
5569 MonoType **param_types;
5570 int i, n, start_new_bblock, dreg;
5571 int num_calls = 0, inline_costs = 0;
5572 int breakpoint_id = 0;
5574 MonoBoolean security, pinvoke;
5575 MonoSecurityManager* secman = NULL;
5576 MonoDeclSecurityActions actions;
5577 GSList *class_inits = NULL;
5578 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5580 gboolean init_locals, seq_points, skip_dead_blocks;
5581 gboolean disable_inline;
5583 disable_inline = is_jit_optimizer_disabled (method);
5585 /* serialization and xdomain stuff may need access to private fields and methods */
5586 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5587 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5588 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5589 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5590 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5591 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5593 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5595 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5596 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5597 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5598 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5600 image = method->klass->image;
5601 header = mono_method_get_header (method);
5603 MonoLoaderError *error;
5605 if ((error = mono_loader_get_last_error ())) {
5606 mono_cfg_set_exception (cfg, error->exception_type);
5608 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5609 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5611 goto exception_exit;
5613 generic_container = mono_method_get_generic_container (method);
5614 sig = mono_method_signature (method);
5615 num_args = sig->hasthis + sig->param_count;
5616 ip = (unsigned char*)header->code;
5617 cfg->cil_start = ip;
5618 end = ip + header->code_size;
5619 mono_jit_stats.cil_code_size += header->code_size;
5620 init_locals = header->init_locals;
5622 seq_points = cfg->gen_seq_points && cfg->method == method;
5625 * Methods without init_locals set could cause asserts in various passes
5630 method_definition = method;
5631 while (method_definition->is_inflated) {
5632 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5633 method_definition = imethod->declaring;
5636 /* SkipVerification is not allowed if core-clr is enabled */
5637 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5639 dont_verify_stloc = TRUE;
5642 if (mono_debug_using_mono_debugger ())
5643 cfg->keep_cil_nops = TRUE;
5645 if (sig->is_inflated)
5646 generic_context = mono_method_get_context (method);
5647 else if (generic_container)
5648 generic_context = &generic_container->context;
5649 cfg->generic_context = generic_context;
5651 if (!cfg->generic_sharing_context)
5652 g_assert (!sig->has_type_parameters);
5654 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5655 g_assert (method->is_inflated);
5656 g_assert (mono_method_get_context (method)->method_inst);
5658 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5659 g_assert (sig->generic_param_count);
5661 if (cfg->method == method) {
5662 cfg->real_offset = 0;
5664 cfg->real_offset = inline_offset;
5667 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5668 cfg->cil_offset_to_bb_len = header->code_size;
5670 cfg->current_method = method;
5672 if (cfg->verbose_level > 2)
5673 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5675 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5677 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5678 for (n = 0; n < sig->param_count; ++n)
5679 param_types [n + sig->hasthis] = sig->params [n];
5680 cfg->arg_types = param_types;
5682 dont_inline = g_list_prepend (dont_inline, method);
5683 if (cfg->method == method) {
5685 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5686 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5689 NEW_BBLOCK (cfg, start_bblock);
5690 cfg->bb_entry = start_bblock;
5691 start_bblock->cil_code = NULL;
5692 start_bblock->cil_length = 0;
5695 NEW_BBLOCK (cfg, end_bblock);
5696 cfg->bb_exit = end_bblock;
5697 end_bblock->cil_code = NULL;
5698 end_bblock->cil_length = 0;
5699 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5700 g_assert (cfg->num_bblocks == 2);
5702 arg_array = cfg->args;
5704 if (header->num_clauses) {
5705 cfg->spvars = g_hash_table_new (NULL, NULL);
5706 cfg->exvars = g_hash_table_new (NULL, NULL);
5708 /* handle exception clauses */
5709 for (i = 0; i < header->num_clauses; ++i) {
5710 MonoBasicBlock *try_bb;
5711 MonoExceptionClause *clause = &header->clauses [i];
5712 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5713 try_bb->real_offset = clause->try_offset;
5714 try_bb->try_start = TRUE;
5715 try_bb->region = ((i + 1) << 8) | clause->flags;
5716 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5717 tblock->real_offset = clause->handler_offset;
5718 tblock->flags |= BB_EXCEPTION_HANDLER;
5720 link_bblock (cfg, try_bb, tblock);
5722 if (*(ip + clause->handler_offset) == CEE_POP)
5723 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5725 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5726 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5727 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5728 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5729 MONO_ADD_INS (tblock, ins);
5732 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5733 MONO_ADD_INS (tblock, ins);
5736 /* todo: is a fault block unsafe to optimize? */
5737 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5738 tblock->flags |= BB_EXCEPTION_UNSAFE;
5742 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5744 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5746 /* catch and filter blocks get the exception object on the stack */
5747 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5748 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5749 MonoInst *dummy_use;
5751 /* mostly like handle_stack_args (), but just sets the input args */
5752 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5753 tblock->in_scount = 1;
5754 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5755 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5758 * Add a dummy use for the exvar so its liveness info will be
5762 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5764 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5765 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5766 tblock->flags |= BB_EXCEPTION_HANDLER;
5767 tblock->real_offset = clause->data.filter_offset;
5768 tblock->in_scount = 1;
5769 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5770 /* The filter block shares the exvar with the handler block */
5771 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5772 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5773 MONO_ADD_INS (tblock, ins);
5777 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5778 clause->data.catch_class &&
5779 cfg->generic_sharing_context &&
5780 mono_class_check_context_used (clause->data.catch_class)) {
5782 * In shared generic code with catch
5783 * clauses containing type variables
5784 * the exception handling code has to
5785 * be able to get to the rgctx.
5786 * Therefore we have to make sure that
5787 * the vtable/mrgctx argument (for
5788 * static or generic methods) or the
5789 * "this" argument (for non-static
5790 * methods) are live.
5792 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5793 mini_method_get_context (method)->method_inst ||
5794 method->klass->valuetype) {
5795 mono_get_vtable_var (cfg);
5797 MonoInst *dummy_use;
5799 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5804 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5805 cfg->cbb = start_bblock;
5806 cfg->args = arg_array;
5807 mono_save_args (cfg, sig, inline_args);
5810 /* FIRST CODE BLOCK */
5811 NEW_BBLOCK (cfg, bblock);
5812 bblock->cil_code = ip;
5816 ADD_BBLOCK (cfg, bblock);
5818 if (cfg->method == method) {
5819 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5820 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5821 MONO_INST_NEW (cfg, ins, OP_BREAK);
5822 MONO_ADD_INS (bblock, ins);
5826 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5827 secman = mono_security_manager_get_methods ();
5829 security = (secman && mono_method_has_declsec (method));
5830 /* at this point having security doesn't mean we have any code to generate */
5831 if (security && (cfg->method == method)) {
5832 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5833 * And we do not want to enter the next section (with allocation) if we
5834 * have nothing to generate */
5835 security = mono_declsec_get_demands (method, &actions);
5838 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5839 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5841 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5842 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5843 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5845 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5846 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5850 mono_custom_attrs_free (custom);
5853 custom = mono_custom_attrs_from_class (wrapped->klass);
5854 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5858 mono_custom_attrs_free (custom);
5861 /* not a P/Invoke after all */
5866 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5867 /* we use a separate basic block for the initialization code */
5868 NEW_BBLOCK (cfg, init_localsbb);
5869 cfg->bb_init = init_localsbb;
5870 init_localsbb->real_offset = cfg->real_offset;
5871 start_bblock->next_bb = init_localsbb;
5872 init_localsbb->next_bb = bblock;
5873 link_bblock (cfg, start_bblock, init_localsbb);
5874 link_bblock (cfg, init_localsbb, bblock);
5876 cfg->cbb = init_localsbb;
5878 start_bblock->next_bb = bblock;
5879 link_bblock (cfg, start_bblock, bblock);
5882 /* at this point we know, if security is TRUE, that some code needs to be generated */
5883 if (security && (cfg->method == method)) {
5886 mono_jit_stats.cas_demand_generation++;
5888 if (actions.demand.blob) {
5889 /* Add code for SecurityAction.Demand */
5890 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5891 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5892 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5893 mono_emit_method_call (cfg, secman->demand, args, NULL);
5895 if (actions.noncasdemand.blob) {
5896 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5897 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5898 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5899 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5900 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5901 mono_emit_method_call (cfg, secman->demand, args, NULL);
5903 if (actions.demandchoice.blob) {
5904 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5905 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5906 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5907 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5908 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5912 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5914 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5917 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5918 /* check if this is native code, e.g. an icall or a p/invoke */
5919 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5920 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5922 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5923 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5925 /* if this ia a native call then it can only be JITted from platform code */
5926 if ((icall || pinvk) && method->klass && method->klass->image) {
5927 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5928 MonoException *ex = icall ? mono_get_exception_security () :
5929 mono_get_exception_method_access ();
5930 emit_throw_exception (cfg, ex);
5937 if (header->code_size == 0)
5940 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5945 if (cfg->method == method)
5946 mono_debug_init_method (cfg, bblock, breakpoint_id);
5948 for (n = 0; n < header->num_locals; ++n) {
5949 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5954 /* We force the vtable variable here for all shared methods
5955 for the possibility that they might show up in a stack
5956 trace where their exact instantiation is needed. */
5957 if (cfg->generic_sharing_context && method == cfg->method) {
5958 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5959 mini_method_get_context (method)->method_inst ||
5960 method->klass->valuetype) {
5961 mono_get_vtable_var (cfg);
5963 /* FIXME: Is there a better way to do this?
5964 We need the variable live for the duration
5965 of the whole method. */
5966 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5970 /* add a check for this != NULL to inlined methods */
5971 if (is_virtual_call) {
5974 NEW_ARGLOAD (cfg, arg_ins, 0);
5975 MONO_ADD_INS (cfg->cbb, arg_ins);
5976 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5979 skip_dead_blocks = !dont_verify;
5980 if (skip_dead_blocks) {
5981 original_bb = bb = mono_basic_block_split (method, &error);
5982 if (!mono_error_ok (&error)) {
5983 mono_error_cleanup (&error);
5989 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5990 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5993 start_new_bblock = 0;
5996 if (cfg->method == method)
5997 cfg->real_offset = ip - header->code;
5999 cfg->real_offset = inline_offset;
6004 if (start_new_bblock) {
6005 bblock->cil_length = ip - bblock->cil_code;
6006 if (start_new_bblock == 2) {
6007 g_assert (ip == tblock->cil_code);
6009 GET_BBLOCK (cfg, tblock, ip);
6011 bblock->next_bb = tblock;
6014 start_new_bblock = 0;
6015 for (i = 0; i < bblock->in_scount; ++i) {
6016 if (cfg->verbose_level > 3)
6017 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6018 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6022 g_slist_free (class_inits);
6025 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6026 link_bblock (cfg, bblock, tblock);
6027 if (sp != stack_start) {
6028 handle_stack_args (cfg, stack_start, sp - stack_start);
6030 CHECK_UNVERIFIABLE (cfg);
6032 bblock->next_bb = tblock;
6035 for (i = 0; i < bblock->in_scount; ++i) {
6036 if (cfg->verbose_level > 3)
6037 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6038 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6041 g_slist_free (class_inits);
6046 if (skip_dead_blocks) {
6047 int ip_offset = ip - header->code;
6049 if (ip_offset == bb->end)
6053 int op_size = mono_opcode_size (ip, end);
6054 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6056 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6058 if (ip_offset + op_size == bb->end) {
6059 MONO_INST_NEW (cfg, ins, OP_NOP);
6060 MONO_ADD_INS (bblock, ins);
6061 start_new_bblock = 1;
6069 * Sequence points are points where the debugger can place a breakpoint.
6070 * Currently, we generate these automatically at points where the IL
6073 if (seq_points && sp == stack_start) {
6074 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6075 MONO_ADD_INS (cfg->cbb, ins);
6078 bblock->real_offset = cfg->real_offset;
6080 if ((cfg->method == method) && cfg->coverage_info) {
6081 guint32 cil_offset = ip - header->code;
6082 cfg->coverage_info->data [cil_offset].cil_code = ip;
6084 /* TODO: Use an increment here */
6085 #if defined(TARGET_X86)
6086 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6087 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6089 MONO_ADD_INS (cfg->cbb, ins);
6091 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6092 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6096 if (cfg->verbose_level > 3)
6097 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6101 if (cfg->keep_cil_nops)
6102 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6104 MONO_INST_NEW (cfg, ins, OP_NOP);
6106 MONO_ADD_INS (bblock, ins);
6109 if (should_insert_brekpoint (cfg->method))
6110 MONO_INST_NEW (cfg, ins, OP_BREAK);
6112 MONO_INST_NEW (cfg, ins, OP_NOP);
6114 MONO_ADD_INS (bblock, ins);
6120 CHECK_STACK_OVF (1);
6121 n = (*ip)-CEE_LDARG_0;
6123 EMIT_NEW_ARGLOAD (cfg, ins, n);
6131 CHECK_STACK_OVF (1);
6132 n = (*ip)-CEE_LDLOC_0;
6134 EMIT_NEW_LOCLOAD (cfg, ins, n);
6143 n = (*ip)-CEE_STLOC_0;
6146 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6148 emit_stloc_ir (cfg, sp, header, n);
6155 CHECK_STACK_OVF (1);
6158 EMIT_NEW_ARGLOAD (cfg, ins, n);
6164 CHECK_STACK_OVF (1);
6167 NEW_ARGLOADA (cfg, ins, n);
6168 MONO_ADD_INS (cfg->cbb, ins);
6178 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6180 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6185 CHECK_STACK_OVF (1);
6188 EMIT_NEW_LOCLOAD (cfg, ins, n);
6192 case CEE_LDLOCA_S: {
6193 unsigned char *tmp_ip;
6195 CHECK_STACK_OVF (1);
6196 CHECK_LOCAL (ip [1]);
6198 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6204 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6213 CHECK_LOCAL (ip [1]);
6214 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6216 emit_stloc_ir (cfg, sp, header, ip [1]);
6221 CHECK_STACK_OVF (1);
6222 EMIT_NEW_PCONST (cfg, ins, NULL);
6223 ins->type = STACK_OBJ;
6228 CHECK_STACK_OVF (1);
6229 EMIT_NEW_ICONST (cfg, ins, -1);
6242 CHECK_STACK_OVF (1);
6243 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6249 CHECK_STACK_OVF (1);
6251 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6257 CHECK_STACK_OVF (1);
6258 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6264 CHECK_STACK_OVF (1);
6265 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6266 ins->type = STACK_I8;
6267 ins->dreg = alloc_dreg (cfg, STACK_I8);
6269 ins->inst_l = (gint64)read64 (ip);
6270 MONO_ADD_INS (bblock, ins);
6276 gboolean use_aotconst = FALSE;
6278 #ifdef TARGET_POWERPC
6279 /* FIXME: Clean this up */
6280 if (cfg->compile_aot)
6281 use_aotconst = TRUE;
6284 /* FIXME: we should really allocate this only late in the compilation process */
6285 f = mono_domain_alloc (cfg->domain, sizeof (float));
6287 CHECK_STACK_OVF (1);
6293 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6295 dreg = alloc_freg (cfg);
6296 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6297 ins->type = STACK_R8;
6299 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6300 ins->type = STACK_R8;
6301 ins->dreg = alloc_dreg (cfg, STACK_R8);
6303 MONO_ADD_INS (bblock, ins);
6313 gboolean use_aotconst = FALSE;
6315 #ifdef TARGET_POWERPC
6316 /* FIXME: Clean this up */
6317 if (cfg->compile_aot)
6318 use_aotconst = TRUE;
6321 /* FIXME: we should really allocate this only late in the compilation process */
6322 d = mono_domain_alloc (cfg->domain, sizeof (double));
6324 CHECK_STACK_OVF (1);
6330 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6332 dreg = alloc_freg (cfg);
6333 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6334 ins->type = STACK_R8;
6336 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6337 ins->type = STACK_R8;
6338 ins->dreg = alloc_dreg (cfg, STACK_R8);
6340 MONO_ADD_INS (bblock, ins);
6349 MonoInst *temp, *store;
6351 CHECK_STACK_OVF (1);
6355 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6356 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6358 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6361 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6374 if (sp [0]->type == STACK_R8)
6375 /* we need to pop the value from the x86 FP stack */
6376 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6385 if (stack_start != sp)
6387 token = read32 (ip + 1);
6388 /* FIXME: check the signature matches */
6389 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6391 if (!cmethod || mono_loader_get_last_error ())
6394 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6395 GENERIC_SHARING_FAILURE (CEE_JMP);
6397 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6398 CHECK_CFG_EXCEPTION;
6400 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6402 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6405 /* Handle tail calls similarly to calls */
6406 n = fsig->param_count + fsig->hasthis;
6408 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6409 call->method = cmethod;
6410 call->tail_call = TRUE;
6411 call->signature = mono_method_signature (cmethod);
6412 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6413 call->inst.inst_p0 = cmethod;
6414 for (i = 0; i < n; ++i)
6415 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6417 mono_arch_emit_call (cfg, call);
6418 MONO_ADD_INS (bblock, (MonoInst*)call);
6421 for (i = 0; i < num_args; ++i)
6422 /* Prevent arguments from being optimized away */
6423 arg_array [i]->flags |= MONO_INST_VOLATILE;
6425 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6426 ins = (MonoInst*)call;
6427 ins->inst_p0 = cmethod;
6428 MONO_ADD_INS (bblock, ins);
6432 start_new_bblock = 1;
6437 case CEE_CALLVIRT: {
6438 MonoInst *addr = NULL;
6439 MonoMethodSignature *fsig = NULL;
6441 int virtual = *ip == CEE_CALLVIRT;
6442 int calli = *ip == CEE_CALLI;
6443 gboolean pass_imt_from_rgctx = FALSE;
6444 MonoInst *imt_arg = NULL;
6445 gboolean pass_vtable = FALSE;
6446 gboolean pass_mrgctx = FALSE;
6447 MonoInst *vtable_arg = NULL;
6448 gboolean check_this = FALSE;
6449 gboolean supported_tail_call = FALSE;
6452 token = read32 (ip + 1);
6459 if (method->wrapper_type != MONO_WRAPPER_NONE)
6460 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6462 fsig = mono_metadata_parse_signature (image, token);
6464 n = fsig->param_count + fsig->hasthis;
6466 if (method->dynamic && fsig->pinvoke) {
6470 * This is a call through a function pointer using a pinvoke
6471 * signature. Have to create a wrapper and call that instead.
6472 * FIXME: This is very slow, need to create a wrapper at JIT time
6473 * instead based on the signature.
6475 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6476 EMIT_NEW_PCONST (cfg, args [1], fsig);
6478 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6481 MonoMethod *cil_method;
6483 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6484 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6485 cil_method = cmethod;
6486 } else if (constrained_call) {
6487 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6489 * This is needed since get_method_constrained can't find
6490 * the method in klass representing a type var.
6491 * The type var is guaranteed to be a reference type in this
6494 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6495 cil_method = cmethod;
6496 g_assert (!cmethod->klass->valuetype);
6498 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6501 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6502 cil_method = cmethod;
6505 if (!cmethod || mono_loader_get_last_error ())
6507 if (!dont_verify && !cfg->skip_visibility) {
6508 MonoMethod *target_method = cil_method;
6509 if (method->is_inflated) {
6510 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6512 if (!mono_method_can_access_method (method_definition, target_method) &&
6513 !mono_method_can_access_method (method, cil_method))
6514 METHOD_ACCESS_FAILURE;
6517 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6518 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6520 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6521 /* MS.NET seems to silently convert this to a callvirt */
6526 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6527 * converts to a callvirt.
6529 * tests/bug-515884.il is an example of this behavior
6531 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6532 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6533 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6537 if (!cmethod->klass->inited)
6538 if (!mono_class_init (cmethod->klass))
6541 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6542 mini_class_is_system_array (cmethod->klass)) {
6543 array_rank = cmethod->klass->rank;
6544 fsig = mono_method_signature (cmethod);
6546 fsig = mono_method_signature (cmethod);
6551 if (fsig->pinvoke) {
6552 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6553 check_for_pending_exc, FALSE);
6554 fsig = mono_method_signature (wrapper);
6555 } else if (constrained_call) {
6556 fsig = mono_method_signature (cmethod);
6558 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6562 mono_save_token_info (cfg, image, token, cil_method);
6564 n = fsig->param_count + fsig->hasthis;
6566 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6567 if (check_linkdemand (cfg, method, cmethod))
6569 CHECK_CFG_EXCEPTION;
6572 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6573 g_assert_not_reached ();
6576 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6579 if (!cfg->generic_sharing_context && cmethod)
6580 g_assert (!mono_method_check_context_used (cmethod));
6584 //g_assert (!virtual || fsig->hasthis);
6588 if (constrained_call) {
6590 * We have the `constrained.' prefix opcode.
6592 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6594 * The type parameter is instantiated as a valuetype,
6595 * but that type doesn't override the method we're
6596 * calling, so we need to box `this'.
6598 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6599 ins->klass = constrained_call;
6600 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6601 CHECK_CFG_EXCEPTION;
6602 } else if (!constrained_call->valuetype) {
6603 int dreg = alloc_preg (cfg);
6606 * The type parameter is instantiated as a reference
6607 * type. We have a managed pointer on the stack, so
6608 * we need to dereference it here.
6610 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6611 ins->type = STACK_OBJ;
6613 } else if (cmethod->klass->valuetype)
6615 constrained_call = NULL;
6618 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6622 * If the callee is a shared method, then its static cctor
6623 * might not get called after the call was patched.
6625 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6626 emit_generic_class_init (cfg, cmethod->klass);
6627 CHECK_TYPELOAD (cmethod->klass);
6630 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6631 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6632 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6633 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6634 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6637 * Pass vtable iff target method might
6638 * be shared, which means that sharing
6639 * is enabled for its class and its
6640 * context is sharable (and it's not a
6643 if (sharing_enabled && context_sharable &&
6644 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6648 if (cmethod && mini_method_get_context (cmethod) &&
6649 mini_method_get_context (cmethod)->method_inst) {
6650 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6651 MonoGenericContext *context = mini_method_get_context (cmethod);
6652 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6654 g_assert (!pass_vtable);
6656 if (sharing_enabled && context_sharable)
6660 if (cfg->generic_sharing_context && cmethod) {
6661 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6663 context_used = mono_method_check_context_used (cmethod);
6665 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6666 /* Generic method interface
6667 calls are resolved via a
6668 helper function and don't
6670 if (!cmethod_context || !cmethod_context->method_inst)
6671 pass_imt_from_rgctx = TRUE;
6675 * If a shared method calls another
6676 * shared method then the caller must
6677 * have a generic sharing context
6678 * because the magic trampoline
6679 * requires it. FIXME: We shouldn't
6680 * have to force the vtable/mrgctx
6681 * variable here. Instead there
6682 * should be a flag in the cfg to
6683 * request a generic sharing context.
6686 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6687 mono_get_vtable_var (cfg);
6692 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6694 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6696 CHECK_TYPELOAD (cmethod->klass);
6697 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6702 g_assert (!vtable_arg);
6704 if (!cfg->compile_aot) {
6706 * emit_get_rgctx_method () calls mono_class_vtable () so check
6707 * for type load errors before.
6709 mono_class_setup_vtable (cmethod->klass);
6710 CHECK_TYPELOAD (cmethod->klass);
6713 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6715 /* !marshalbyref is needed to properly handle generic methods + remoting */
6716 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6717 MONO_METHOD_IS_FINAL (cmethod)) &&
6718 !cmethod->klass->marshalbyref) {
6725 if (pass_imt_from_rgctx) {
6726 g_assert (!pass_vtable);
6729 imt_arg = emit_get_rgctx_method (cfg, context_used,
6730 cmethod, MONO_RGCTX_INFO_METHOD);
6734 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6736 /* Calling virtual generic methods */
6737 if (cmethod && virtual &&
6738 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6739 !(MONO_METHOD_IS_FINAL (cmethod) &&
6740 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6741 mono_method_signature (cmethod)->generic_param_count) {
6742 MonoInst *this_temp, *this_arg_temp, *store;
6743 MonoInst *iargs [4];
6745 g_assert (mono_method_signature (cmethod)->is_inflated);
6747 /* Prevent inlining of methods that contain indirect calls */
6750 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6751 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6752 g_assert (!imt_arg);
6754 g_assert (cmethod->is_inflated);
6755 imt_arg = emit_get_rgctx_method (cfg, context_used,
6756 cmethod, MONO_RGCTX_INFO_METHOD);
6757 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6761 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6762 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6763 MONO_ADD_INS (bblock, store);
6765 /* FIXME: This should be a managed pointer */
6766 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6768 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6769 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6770 cmethod, MONO_RGCTX_INFO_METHOD);
6771 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6772 addr = mono_emit_jit_icall (cfg,
6773 mono_helper_compile_generic_method, iargs);
6775 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6777 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6780 if (!MONO_TYPE_IS_VOID (fsig->ret))
6781 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6783 CHECK_CFG_EXCEPTION;
6791 * Implement a workaround for the inherent races involved in locking:
6797 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6798 * try block, the Exit () won't be executed, see:
6799 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6800 * To work around this, we extend such try blocks to include the last x bytes
6801 * of the Monitor.Enter () call.
6803 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6804 MonoBasicBlock *tbb;
6806 GET_BBLOCK (cfg, tbb, ip + 5);
6808 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6809 * from Monitor.Enter like ArgumentNullException.
6811 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6812 /* Mark this bblock as needing to be extended */
6813 tbb->extend_try_block = TRUE;
6817 /* Conversion to a JIT intrinsic */
6818 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6820 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6821 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6826 CHECK_CFG_EXCEPTION;
6834 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6835 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6836 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6837 !g_list_find (dont_inline, cmethod)) {
6839 gboolean always = FALSE;
6841 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6842 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6843 /* Prevent inlining of methods that call wrappers */
6845 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6849 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6851 cfg->real_offset += 5;
6854 if (!MONO_TYPE_IS_VOID (fsig->ret))
6855 /* *sp is already set by inline_method */
6858 inline_costs += costs;
6864 inline_costs += 10 * num_calls++;
6866 /* Tail recursion elimination */
6867 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6868 gboolean has_vtargs = FALSE;
6871 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6874 /* keep it simple */
6875 for (i = fsig->param_count - 1; i >= 0; i--) {
6876 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6881 for (i = 0; i < n; ++i)
6882 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6883 MONO_INST_NEW (cfg, ins, OP_BR);
6884 MONO_ADD_INS (bblock, ins);
6885 tblock = start_bblock->out_bb [0];
6886 link_bblock (cfg, bblock, tblock);
6887 ins->inst_target_bb = tblock;
6888 start_new_bblock = 1;
6890 /* skip the CEE_RET, too */
6891 if (ip_in_bb (cfg, bblock, ip + 5))
6901 /* Generic sharing */
6902 /* FIXME: only do this for generic methods if
6903 they are not shared! */
6904 if (context_used && !imt_arg && !array_rank &&
6905 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6906 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6907 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6908 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6911 g_assert (cfg->generic_sharing_context && cmethod);
6915 * We are compiling a call to a
6916 * generic method from shared code,
6917 * which means that we have to look up
6918 * the method in the rgctx and do an
6921 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6924 /* Indirect calls */
6926 g_assert (!imt_arg);
6928 if (*ip == CEE_CALL)
6929 g_assert (context_used);
6930 else if (*ip == CEE_CALLI)
6931 g_assert (!vtable_arg);
6933 /* FIXME: what the hell is this??? */
6934 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6935 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6937 /* Prevent inlining of methods with indirect calls */
6943 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
6944 call = (MonoCallInst*)ins;
6946 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6948 * Instead of emitting an indirect call, emit a direct call
6949 * with the contents of the aotconst as the patch info.
6951 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6953 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6954 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6957 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6960 if (!MONO_TYPE_IS_VOID (fsig->ret))
6961 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6963 CHECK_CFG_EXCEPTION;
6974 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6975 MonoInst *val = sp [fsig->param_count];
6977 if (val->type == STACK_OBJ) {
6978 MonoInst *iargs [2];
6983 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6986 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6987 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
6988 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
6989 emit_write_barrier (cfg, addr, val, 0);
6990 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6991 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6993 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6996 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6997 if (!cmethod->klass->element_class->valuetype && !readonly)
6998 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6999 CHECK_TYPELOAD (cmethod->klass);
7002 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7005 g_assert_not_reached ();
7008 CHECK_CFG_EXCEPTION;
7015 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7017 if (!MONO_TYPE_IS_VOID (fsig->ret))
7018 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7020 CHECK_CFG_EXCEPTION;
7027 /* Tail prefix / tail call optimization */
7029 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7030 /* FIXME: runtime generic context pointer for jumps? */
7031 /* FIXME: handle this for generic sharing eventually */
7032 supported_tail_call = cmethod &&
7033 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7034 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7035 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7037 if (supported_tail_call) {
7040 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7043 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7045 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7046 /* Handle tail calls similarly to calls */
7047 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7049 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7050 call->tail_call = TRUE;
7051 call->method = cmethod;
7052 call->signature = mono_method_signature (cmethod);
7055 * We implement tail calls by storing the actual arguments into the
7056 * argument variables, then emitting a CEE_JMP.
7058 for (i = 0; i < n; ++i) {
7059 /* Prevent argument from being register allocated */
7060 arg_array [i]->flags |= MONO_INST_VOLATILE;
7061 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7065 ins = (MonoInst*)call;
7066 ins->inst_p0 = cmethod;
7067 ins->inst_p1 = arg_array [0];
7068 MONO_ADD_INS (bblock, ins);
7069 link_bblock (cfg, bblock, end_bblock);
7070 start_new_bblock = 1;
7072 CHECK_CFG_EXCEPTION;
7077 // FIXME: Eliminate unreachable epilogs
7080 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7081 * only reachable from this call.
7083 GET_BBLOCK (cfg, tblock, ip);
7084 if (tblock == bblock || tblock->in_count == 0)
7091 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7092 imt_arg, vtable_arg);
7094 if (!MONO_TYPE_IS_VOID (fsig->ret))
7095 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7097 CHECK_CFG_EXCEPTION;
7104 if (cfg->method != method) {
7105 /* return from inlined method */
7107 * If in_count == 0, that means the ret is unreachable due to
7108 * being preceeded by a throw. In that case, inline_method () will
7109 * handle setting the return value
7110 * (test case: test_0_inline_throw ()).
7112 if (return_var && cfg->cbb->in_count) {
7116 //g_assert (returnvar != -1);
7117 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7118 cfg->ret_var_set = TRUE;
7122 MonoType *ret_type = mono_method_signature (method)->ret;
7126 * Place a seq point here too even through the IL stack is not
7127 * empty, so a step over on
7130 * will work correctly.
7132 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7133 MONO_ADD_INS (cfg->cbb, ins);
7136 g_assert (!return_var);
7140 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7143 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7146 if (!cfg->vret_addr) {
7149 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7151 EMIT_NEW_RETLOADA (cfg, ret_addr);
7153 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7154 ins->klass = mono_class_from_mono_type (ret_type);
7157 #ifdef MONO_ARCH_SOFT_FLOAT
7158 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7159 MonoInst *iargs [1];
7163 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7164 mono_arch_emit_setret (cfg, method, conv);
7166 mono_arch_emit_setret (cfg, method, *sp);
7169 mono_arch_emit_setret (cfg, method, *sp);
7174 if (sp != stack_start)
7176 MONO_INST_NEW (cfg, ins, OP_BR);
7178 ins->inst_target_bb = end_bblock;
7179 MONO_ADD_INS (bblock, ins);
7180 link_bblock (cfg, bblock, end_bblock);
7181 start_new_bblock = 1;
7185 MONO_INST_NEW (cfg, ins, OP_BR);
7187 target = ip + 1 + (signed char)(*ip);
7189 GET_BBLOCK (cfg, tblock, target);
7190 link_bblock (cfg, bblock, tblock);
7191 ins->inst_target_bb = tblock;
7192 if (sp != stack_start) {
7193 handle_stack_args (cfg, stack_start, sp - stack_start);
7195 CHECK_UNVERIFIABLE (cfg);
7197 MONO_ADD_INS (bblock, ins);
7198 start_new_bblock = 1;
7199 inline_costs += BRANCH_COST;
7213 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7215 target = ip + 1 + *(signed char*)ip;
7221 inline_costs += BRANCH_COST;
7225 MONO_INST_NEW (cfg, ins, OP_BR);
7228 target = ip + 4 + (gint32)read32(ip);
7230 GET_BBLOCK (cfg, tblock, target);
7231 link_bblock (cfg, bblock, tblock);
7232 ins->inst_target_bb = tblock;
7233 if (sp != stack_start) {
7234 handle_stack_args (cfg, stack_start, sp - stack_start);
7236 CHECK_UNVERIFIABLE (cfg);
7239 MONO_ADD_INS (bblock, ins);
7241 start_new_bblock = 1;
7242 inline_costs += BRANCH_COST;
7249 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7250 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7251 guint32 opsize = is_short ? 1 : 4;
7253 CHECK_OPSIZE (opsize);
7255 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7258 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7263 GET_BBLOCK (cfg, tblock, target);
7264 link_bblock (cfg, bblock, tblock);
7265 GET_BBLOCK (cfg, tblock, ip);
7266 link_bblock (cfg, bblock, tblock);
7268 if (sp != stack_start) {
7269 handle_stack_args (cfg, stack_start, sp - stack_start);
7270 CHECK_UNVERIFIABLE (cfg);
7273 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7274 cmp->sreg1 = sp [0]->dreg;
7275 type_from_op (cmp, sp [0], NULL);
7278 #if SIZEOF_REGISTER == 4
7279 if (cmp->opcode == OP_LCOMPARE_IMM) {
7280 /* Convert it to OP_LCOMPARE */
7281 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7282 ins->type = STACK_I8;
7283 ins->dreg = alloc_dreg (cfg, STACK_I8);
7285 MONO_ADD_INS (bblock, ins);
7286 cmp->opcode = OP_LCOMPARE;
7287 cmp->sreg2 = ins->dreg;
7290 MONO_ADD_INS (bblock, cmp);
7292 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7293 type_from_op (ins, sp [0], NULL);
7294 MONO_ADD_INS (bblock, ins);
7295 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7296 GET_BBLOCK (cfg, tblock, target);
7297 ins->inst_true_bb = tblock;
7298 GET_BBLOCK (cfg, tblock, ip);
7299 ins->inst_false_bb = tblock;
7300 start_new_bblock = 2;
7303 inline_costs += BRANCH_COST;
7318 MONO_INST_NEW (cfg, ins, *ip);
7320 target = ip + 4 + (gint32)read32(ip);
7326 inline_costs += BRANCH_COST;
7330 MonoBasicBlock **targets;
7331 MonoBasicBlock *default_bblock;
7332 MonoJumpInfoBBTable *table;
7333 int offset_reg = alloc_preg (cfg);
7334 int target_reg = alloc_preg (cfg);
7335 int table_reg = alloc_preg (cfg);
7336 int sum_reg = alloc_preg (cfg);
7337 gboolean use_op_switch;
7341 n = read32 (ip + 1);
7344 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7348 CHECK_OPSIZE (n * sizeof (guint32));
7349 target = ip + n * sizeof (guint32);
7351 GET_BBLOCK (cfg, default_bblock, target);
7352 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7354 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7355 for (i = 0; i < n; ++i) {
7356 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7357 targets [i] = tblock;
7358 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7362 if (sp != stack_start) {
7364 * Link the current bb with the targets as well, so handle_stack_args
7365 * will set their in_stack correctly.
7367 link_bblock (cfg, bblock, default_bblock);
7368 for (i = 0; i < n; ++i)
7369 link_bblock (cfg, bblock, targets [i]);
7371 handle_stack_args (cfg, stack_start, sp - stack_start);
7373 CHECK_UNVERIFIABLE (cfg);
7376 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7377 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7380 for (i = 0; i < n; ++i)
7381 link_bblock (cfg, bblock, targets [i]);
7383 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7384 table->table = targets;
7385 table->table_size = n;
7387 use_op_switch = FALSE;
7389 /* ARM implements SWITCH statements differently */
7390 /* FIXME: Make it use the generic implementation */
7391 if (!cfg->compile_aot)
7392 use_op_switch = TRUE;
7395 if (COMPILE_LLVM (cfg))
7396 use_op_switch = TRUE;
7398 cfg->cbb->has_jump_table = 1;
7400 if (use_op_switch) {
7401 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7402 ins->sreg1 = src1->dreg;
7403 ins->inst_p0 = table;
7404 ins->inst_many_bb = targets;
7405 ins->klass = GUINT_TO_POINTER (n);
7406 MONO_ADD_INS (cfg->cbb, ins);
7408 if (sizeof (gpointer) == 8)
7409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7413 #if SIZEOF_REGISTER == 8
7414 /* The upper word might not be zero, and we add it to a 64 bit address later */
7415 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7418 if (cfg->compile_aot) {
7419 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7421 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7422 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7423 ins->inst_p0 = table;
7424 ins->dreg = table_reg;
7425 MONO_ADD_INS (cfg->cbb, ins);
7428 /* FIXME: Use load_memindex */
7429 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7430 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7431 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7433 start_new_bblock = 1;
7434 inline_costs += (BRANCH_COST * 2);
7454 dreg = alloc_freg (cfg);
7457 dreg = alloc_lreg (cfg);
7460 dreg = alloc_preg (cfg);
7463 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7464 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7465 ins->flags |= ins_flag;
7467 MONO_ADD_INS (bblock, ins);
7482 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7483 ins->flags |= ins_flag;
7485 MONO_ADD_INS (bblock, ins);
7487 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7488 emit_write_barrier (cfg, sp [0], sp [1], -1);
7497 MONO_INST_NEW (cfg, ins, (*ip));
7499 ins->sreg1 = sp [0]->dreg;
7500 ins->sreg2 = sp [1]->dreg;
7501 type_from_op (ins, sp [0], sp [1]);
7503 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7505 /* Use the immediate opcodes if possible */
7506 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7507 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7508 if (imm_opcode != -1) {
7509 ins->opcode = imm_opcode;
7510 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7513 sp [1]->opcode = OP_NOP;
7517 MONO_ADD_INS ((cfg)->cbb, (ins));
7519 *sp++ = mono_decompose_opcode (cfg, ins);
7536 MONO_INST_NEW (cfg, ins, (*ip));
7538 ins->sreg1 = sp [0]->dreg;
7539 ins->sreg2 = sp [1]->dreg;
7540 type_from_op (ins, sp [0], sp [1]);
7542 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7543 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7545 /* FIXME: Pass opcode to is_inst_imm */
7547 /* Use the immediate opcodes if possible */
7548 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7551 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7552 if (imm_opcode != -1) {
7553 ins->opcode = imm_opcode;
7554 if (sp [1]->opcode == OP_I8CONST) {
7555 #if SIZEOF_REGISTER == 8
7556 ins->inst_imm = sp [1]->inst_l;
7558 ins->inst_ls_word = sp [1]->inst_ls_word;
7559 ins->inst_ms_word = sp [1]->inst_ms_word;
7563 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7566 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7567 if (sp [1]->next == NULL)
7568 sp [1]->opcode = OP_NOP;
7571 MONO_ADD_INS ((cfg)->cbb, (ins));
7573 *sp++ = mono_decompose_opcode (cfg, ins);
7586 case CEE_CONV_OVF_I8:
7587 case CEE_CONV_OVF_U8:
7591 /* Special case this earlier so we have long constants in the IR */
7592 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7593 int data = sp [-1]->inst_c0;
7594 sp [-1]->opcode = OP_I8CONST;
7595 sp [-1]->type = STACK_I8;
7596 #if SIZEOF_REGISTER == 8
7597 if ((*ip) == CEE_CONV_U8)
7598 sp [-1]->inst_c0 = (guint32)data;
7600 sp [-1]->inst_c0 = data;
7602 sp [-1]->inst_ls_word = data;
7603 if ((*ip) == CEE_CONV_U8)
7604 sp [-1]->inst_ms_word = 0;
7606 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7608 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7615 case CEE_CONV_OVF_I4:
7616 case CEE_CONV_OVF_I1:
7617 case CEE_CONV_OVF_I2:
7618 case CEE_CONV_OVF_I:
7619 case CEE_CONV_OVF_U:
7622 if (sp [-1]->type == STACK_R8) {
7623 ADD_UNOP (CEE_CONV_OVF_I8);
7630 case CEE_CONV_OVF_U1:
7631 case CEE_CONV_OVF_U2:
7632 case CEE_CONV_OVF_U4:
7635 if (sp [-1]->type == STACK_R8) {
7636 ADD_UNOP (CEE_CONV_OVF_U8);
7643 case CEE_CONV_OVF_I1_UN:
7644 case CEE_CONV_OVF_I2_UN:
7645 case CEE_CONV_OVF_I4_UN:
7646 case CEE_CONV_OVF_I8_UN:
7647 case CEE_CONV_OVF_U1_UN:
7648 case CEE_CONV_OVF_U2_UN:
7649 case CEE_CONV_OVF_U4_UN:
7650 case CEE_CONV_OVF_U8_UN:
7651 case CEE_CONV_OVF_I_UN:
7652 case CEE_CONV_OVF_U_UN:
7659 CHECK_CFG_EXCEPTION;
7663 case CEE_ADD_OVF_UN:
7665 case CEE_MUL_OVF_UN:
7667 case CEE_SUB_OVF_UN:
7675 token = read32 (ip + 1);
7676 klass = mini_get_class (method, token, generic_context);
7677 CHECK_TYPELOAD (klass);
7679 if (generic_class_is_reference_type (cfg, klass)) {
7680 MonoInst *store, *load;
7681 int dreg = alloc_preg (cfg);
7683 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7684 load->flags |= ins_flag;
7685 MONO_ADD_INS (cfg->cbb, load);
7687 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7688 store->flags |= ins_flag;
7689 MONO_ADD_INS (cfg->cbb, store);
7691 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7692 emit_write_barrier (cfg, sp [0], sp [1], -1);
7694 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7706 token = read32 (ip + 1);
7707 klass = mini_get_class (method, token, generic_context);
7708 CHECK_TYPELOAD (klass);
7710 /* Optimize the common ldobj+stloc combination */
7720 loc_index = ip [5] - CEE_STLOC_0;
7727 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7728 CHECK_LOCAL (loc_index);
7730 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7731 ins->dreg = cfg->locals [loc_index]->dreg;
7737 /* Optimize the ldobj+stobj combination */
7738 /* The reference case ends up being a load+store anyway */
7739 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7744 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7751 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7760 CHECK_STACK_OVF (1);
7762 n = read32 (ip + 1);
7764 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7765 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7766 ins->type = STACK_OBJ;
7769 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7770 MonoInst *iargs [1];
7772 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7773 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7775 if (cfg->opt & MONO_OPT_SHARED) {
7776 MonoInst *iargs [3];
7778 if (cfg->compile_aot) {
7779 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7781 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7782 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7783 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7784 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7785 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7787 if (bblock->out_of_line) {
7788 MonoInst *iargs [2];
7790 if (image == mono_defaults.corlib) {
7792 * Avoid relocations in AOT and save some space by using a
7793 * version of helper_ldstr specialized to mscorlib.
7795 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7796 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7798 /* Avoid creating the string object */
7799 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7800 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7801 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7805 if (cfg->compile_aot) {
7806 NEW_LDSTRCONST (cfg, ins, image, n);
7808 MONO_ADD_INS (bblock, ins);
7811 NEW_PCONST (cfg, ins, NULL);
7812 ins->type = STACK_OBJ;
7813 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7815 OUT_OF_MEMORY_FAILURE;
7818 MONO_ADD_INS (bblock, ins);
7827 MonoInst *iargs [2];
7828 MonoMethodSignature *fsig;
7831 MonoInst *vtable_arg = NULL;
7834 token = read32 (ip + 1);
7835 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7836 if (!cmethod || mono_loader_get_last_error ())
7838 fsig = mono_method_get_signature (cmethod, image, token);
7842 mono_save_token_info (cfg, image, token, cmethod);
7844 if (!mono_class_init (cmethod->klass))
7847 if (cfg->generic_sharing_context)
7848 context_used = mono_method_check_context_used (cmethod);
7850 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7851 if (check_linkdemand (cfg, method, cmethod))
7853 CHECK_CFG_EXCEPTION;
7854 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7855 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7858 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7859 emit_generic_class_init (cfg, cmethod->klass);
7860 CHECK_TYPELOAD (cmethod->klass);
7863 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7864 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7865 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7866 mono_class_vtable (cfg->domain, cmethod->klass);
7867 CHECK_TYPELOAD (cmethod->klass);
7869 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7870 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7873 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7874 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7876 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7878 CHECK_TYPELOAD (cmethod->klass);
7879 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7884 n = fsig->param_count;
7888 * Generate smaller code for the common newobj <exception> instruction in
7889 * argument checking code.
7891 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7892 is_exception_class (cmethod->klass) && n <= 2 &&
7893 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7894 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7895 MonoInst *iargs [3];
7897 g_assert (!vtable_arg);
7901 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7904 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7908 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7913 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7916 g_assert_not_reached ();
7924 /* move the args to allow room for 'this' in the first position */
7930 /* check_call_signature () requires sp[0] to be set */
7931 this_ins.type = STACK_OBJ;
7933 if (check_call_signature (cfg, fsig, sp))
7938 if (mini_class_is_system_array (cmethod->klass)) {
7939 g_assert (!vtable_arg);
7941 *sp = emit_get_rgctx_method (cfg, context_used,
7942 cmethod, MONO_RGCTX_INFO_METHOD);
7944 /* Avoid varargs in the common case */
7945 if (fsig->param_count == 1)
7946 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7947 else if (fsig->param_count == 2)
7948 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7949 else if (fsig->param_count == 3)
7950 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7952 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7953 } else if (cmethod->string_ctor) {
7954 g_assert (!context_used);
7955 g_assert (!vtable_arg);
7956 /* we simply pass a null pointer */
7957 EMIT_NEW_PCONST (cfg, *sp, NULL);
7958 /* now call the string ctor */
7959 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
7961 MonoInst* callvirt_this_arg = NULL;
7963 if (cmethod->klass->valuetype) {
7964 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7965 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7966 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7971 * The code generated by mini_emit_virtual_call () expects
7972 * iargs [0] to be a boxed instance, but luckily the vcall
7973 * will be transformed into a normal call there.
7975 } else if (context_used) {
7976 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7979 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7981 CHECK_TYPELOAD (cmethod->klass);
7984 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7985 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7986 * As a workaround, we call class cctors before allocating objects.
7988 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7989 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7990 if (cfg->verbose_level > 2)
7991 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7992 class_inits = g_slist_prepend (class_inits, vtable);
7995 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7998 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8001 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8003 /* Now call the actual ctor */
8004 /* Avoid virtual calls to ctors if possible */
8005 if (cmethod->klass->marshalbyref)
8006 callvirt_this_arg = sp [0];
8009 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8010 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8011 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8016 CHECK_CFG_EXCEPTION;
8017 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8018 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8019 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8020 !g_list_find (dont_inline, cmethod)) {
8023 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8024 cfg->real_offset += 5;
8027 inline_costs += costs - 5;
8030 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8032 } else if (context_used &&
8033 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8034 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8035 MonoInst *cmethod_addr;
8037 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8038 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8040 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8043 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8044 callvirt_this_arg, NULL, vtable_arg);
8048 if (alloc == NULL) {
8050 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8051 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8065 token = read32 (ip + 1);
8066 klass = mini_get_class (method, token, generic_context);
8067 CHECK_TYPELOAD (klass);
8068 if (sp [0]->type != STACK_OBJ)
8071 if (cfg->generic_sharing_context)
8072 context_used = mono_class_check_context_used (klass);
8074 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8075 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8082 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8085 /*FIXME AOT support*/
8086 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8088 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8089 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8092 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8093 MonoMethod *mono_castclass;
8094 MonoInst *iargs [1];
8097 mono_castclass = mono_marshal_get_castclass (klass);
8100 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8101 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8102 CHECK_CFG_EXCEPTION;
8103 g_assert (costs > 0);
8106 cfg->real_offset += 5;
8111 inline_costs += costs;
8114 ins = handle_castclass (cfg, klass, *sp, context_used);
8115 CHECK_CFG_EXCEPTION;
8125 token = read32 (ip + 1);
8126 klass = mini_get_class (method, token, generic_context);
8127 CHECK_TYPELOAD (klass);
8128 if (sp [0]->type != STACK_OBJ)
8131 if (cfg->generic_sharing_context)
8132 context_used = mono_class_check_context_used (klass);
8134 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8135 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8142 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8145 /*FIXME AOT support*/
8146 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8148 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8151 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8152 MonoMethod *mono_isinst;
8153 MonoInst *iargs [1];
8156 mono_isinst = mono_marshal_get_isinst (klass);
8159 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8160 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8161 CHECK_CFG_EXCEPTION;
8162 g_assert (costs > 0);
8165 cfg->real_offset += 5;
8170 inline_costs += costs;
8173 ins = handle_isinst (cfg, klass, *sp, context_used);
8174 CHECK_CFG_EXCEPTION;
8181 case CEE_UNBOX_ANY: {
8185 token = read32 (ip + 1);
8186 klass = mini_get_class (method, token, generic_context);
8187 CHECK_TYPELOAD (klass);
8189 mono_save_token_info (cfg, image, token, klass);
8191 if (cfg->generic_sharing_context)
8192 context_used = mono_class_check_context_used (klass);
8194 if (generic_class_is_reference_type (cfg, klass)) {
8195 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8196 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8197 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8204 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8207 /*FIXME AOT support*/
8208 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8210 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8211 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8214 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8215 MonoMethod *mono_castclass;
8216 MonoInst *iargs [1];
8219 mono_castclass = mono_marshal_get_castclass (klass);
8222 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8223 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8224 CHECK_CFG_EXCEPTION;
8225 g_assert (costs > 0);
8228 cfg->real_offset += 5;
8232 inline_costs += costs;
8234 ins = handle_castclass (cfg, klass, *sp, context_used);
8235 CHECK_CFG_EXCEPTION;
8243 if (mono_class_is_nullable (klass)) {
8244 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8251 ins = handle_unbox (cfg, klass, sp, context_used);
8257 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8270 token = read32 (ip + 1);
8271 klass = mini_get_class (method, token, generic_context);
8272 CHECK_TYPELOAD (klass);
8274 mono_save_token_info (cfg, image, token, klass);
8276 if (cfg->generic_sharing_context)
8277 context_used = mono_class_check_context_used (klass);
8279 if (generic_class_is_reference_type (cfg, klass)) {
8285 if (klass == mono_defaults.void_class)
8287 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8289 /* frequent check in generic code: box (struct), brtrue */
8291 // FIXME: LLVM can't handle the inconsistent bb linking
8292 if (!mono_class_is_nullable (klass) &&
8293 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8294 (ip [5] == CEE_BRTRUE ||
8295 ip [5] == CEE_BRTRUE_S ||
8296 ip [5] == CEE_BRFALSE ||
8297 ip [5] == CEE_BRFALSE_S)) {
8298 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8300 MonoBasicBlock *true_bb, *false_bb;
8304 if (cfg->verbose_level > 3) {
8305 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8306 printf ("<box+brtrue opt>\n");
8314 target = ip + 1 + (signed char)(*ip);
8321 target = ip + 4 + (gint)(read32 (ip));
8325 g_assert_not_reached ();
8329 * We need to link both bblocks, since it is needed for handling stack
8330 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8331 * Branching to only one of them would lead to inconsistencies, so
8332 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8334 GET_BBLOCK (cfg, true_bb, target);
8335 GET_BBLOCK (cfg, false_bb, ip);
8337 mono_link_bblock (cfg, cfg->cbb, true_bb);
8338 mono_link_bblock (cfg, cfg->cbb, false_bb);
8340 if (sp != stack_start) {
8341 handle_stack_args (cfg, stack_start, sp - stack_start);
8343 CHECK_UNVERIFIABLE (cfg);
8346 if (COMPILE_LLVM (cfg)) {
8347 dreg = alloc_ireg (cfg);
8348 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8349 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8351 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8353 /* The JIT can't eliminate the iconst+compare */
8354 MONO_INST_NEW (cfg, ins, OP_BR);
8355 ins->inst_target_bb = is_true ? true_bb : false_bb;
8356 MONO_ADD_INS (cfg->cbb, ins);
8359 start_new_bblock = 1;
8363 *sp++ = handle_box (cfg, val, klass, context_used);
8365 CHECK_CFG_EXCEPTION;
8374 token = read32 (ip + 1);
8375 klass = mini_get_class (method, token, generic_context);
8376 CHECK_TYPELOAD (klass);
8378 mono_save_token_info (cfg, image, token, klass);
8380 if (cfg->generic_sharing_context)
8381 context_used = mono_class_check_context_used (klass);
8383 if (mono_class_is_nullable (klass)) {
8386 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8387 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8391 ins = handle_unbox (cfg, klass, sp, context_used);
8401 MonoClassField *field;
8405 if (*ip == CEE_STFLD) {
8412 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8414 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8417 token = read32 (ip + 1);
8418 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8419 field = mono_method_get_wrapper_data (method, token);
8420 klass = field->parent;
8423 field = mono_field_from_token (image, token, &klass, generic_context);
8427 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8428 FIELD_ACCESS_FAILURE;
8429 mono_class_init (klass);
8431 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8432 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8433 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8434 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8437 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8438 if (*ip == CEE_STFLD) {
8439 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8441 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8442 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8443 MonoInst *iargs [5];
8446 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8447 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8448 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8452 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8453 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8454 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8455 CHECK_CFG_EXCEPTION;
8456 g_assert (costs > 0);
8458 cfg->real_offset += 5;
8461 inline_costs += costs;
8463 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8468 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8470 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8471 if (sp [0]->opcode != OP_LDADDR)
8472 store->flags |= MONO_INST_FAULT;
8474 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8475 /* insert call to write barrier */
8479 dreg = alloc_preg (cfg);
8480 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8481 emit_write_barrier (cfg, ptr, sp [1], -1);
8484 store->flags |= ins_flag;
8491 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8492 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8493 MonoInst *iargs [4];
8496 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8497 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8498 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8499 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8500 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8501 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8502 CHECK_CFG_EXCEPTION;
8504 g_assert (costs > 0);
8506 cfg->real_offset += 5;
8510 inline_costs += costs;
8512 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8516 if (sp [0]->type == STACK_VTYPE) {
8519 /* Have to compute the address of the variable */
8521 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8523 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8525 g_assert (var->klass == klass);
8527 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8531 if (*ip == CEE_LDFLDA) {
8532 if (sp [0]->type == STACK_OBJ) {
8533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8534 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8537 dreg = alloc_preg (cfg);
8539 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8540 ins->klass = mono_class_from_mono_type (field->type);
8541 ins->type = STACK_MP;
8546 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8548 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8549 load->flags |= ins_flag;
8550 if (sp [0]->opcode != OP_LDADDR)
8551 load->flags |= MONO_INST_FAULT;
8562 MonoClassField *field;
8563 gpointer addr = NULL;
8564 gboolean is_special_static;
8568 token = read32 (ip + 1);
8570 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8571 field = mono_method_get_wrapper_data (method, token);
8572 klass = field->parent;
8575 field = mono_field_from_token (image, token, &klass, generic_context);
8578 mono_class_init (klass);
8579 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8580 FIELD_ACCESS_FAILURE;
8582 /* if the class is Critical then transparent code cannot access it's fields */
8583 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8584 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8587 * We can only support shared generic static
8588 * field access on architectures where the
8589 * trampoline code has been extended to handle
8590 * the generic class init.
8592 #ifndef MONO_ARCH_VTABLE_REG
8593 GENERIC_SHARING_FAILURE (*ip);
8596 if (cfg->generic_sharing_context)
8597 context_used = mono_class_check_context_used (klass);
8599 ftype = mono_field_get_type (field);
8601 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8603 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8604 * to be called here.
8606 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8607 mono_class_vtable (cfg->domain, klass);
8608 CHECK_TYPELOAD (klass);
8610 mono_domain_lock (cfg->domain);
8611 if (cfg->domain->special_static_fields)
8612 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8613 mono_domain_unlock (cfg->domain);
8615 is_special_static = mono_class_field_is_special_static (field);
8617 /* Generate IR to compute the field address */
8618 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8620 * Fast access to TLS data
8621 * Inline version of get_thread_static_data () in
8625 int idx, static_data_reg, array_reg, dreg;
8626 MonoInst *thread_ins;
8628 // offset &= 0x7fffffff;
8629 // idx = (offset >> 24) - 1;
8630 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8632 thread_ins = mono_get_thread_intrinsic (cfg);
8633 MONO_ADD_INS (cfg->cbb, thread_ins);
8634 static_data_reg = alloc_ireg (cfg);
8635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8637 if (cfg->compile_aot) {
8638 int offset_reg, offset2_reg, idx_reg;
8640 /* For TLS variables, this will return the TLS offset */
8641 EMIT_NEW_SFLDACONST (cfg, ins, field);
8642 offset_reg = ins->dreg;
8643 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8644 idx_reg = alloc_ireg (cfg);
8645 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8647 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8648 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8649 array_reg = alloc_ireg (cfg);
8650 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8651 offset2_reg = alloc_ireg (cfg);
8652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8653 dreg = alloc_ireg (cfg);
8654 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8656 offset = (gsize)addr & 0x7fffffff;
8657 idx = (offset >> 24) - 1;
8659 array_reg = alloc_ireg (cfg);
8660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8661 dreg = alloc_ireg (cfg);
8662 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8664 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8665 (cfg->compile_aot && is_special_static) ||
8666 (context_used && is_special_static)) {
8667 MonoInst *iargs [2];
8669 g_assert (field->parent);
8670 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8672 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8673 field, MONO_RGCTX_INFO_CLASS_FIELD);
8675 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8677 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8678 } else if (context_used) {
8679 MonoInst *static_data;
8682 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8683 method->klass->name_space, method->klass->name, method->name,
8684 depth, field->offset);
8687 if (mono_class_needs_cctor_run (klass, method))
8688 emit_generic_class_init (cfg, klass);
8691 * The pointer we're computing here is
8693 * super_info.static_data + field->offset
8695 static_data = emit_get_rgctx_klass (cfg, context_used,
8696 klass, MONO_RGCTX_INFO_STATIC_DATA);
8698 if (field->offset == 0) {
8701 int addr_reg = mono_alloc_preg (cfg);
8702 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8704 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8705 MonoInst *iargs [2];
8707 g_assert (field->parent);
8708 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8709 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8710 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8712 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8714 CHECK_TYPELOAD (klass);
8716 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8717 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8718 if (cfg->verbose_level > 2)
8719 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8720 class_inits = g_slist_prepend (class_inits, vtable);
8722 if (cfg->run_cctors) {
8724 /* This makes so that inline cannot trigger */
8725 /* .cctors: too many apps depend on them */
8726 /* running with a specific order... */
8727 if (! vtable->initialized)
8729 ex = mono_runtime_class_init_full (vtable, FALSE);
8731 set_exception_object (cfg, ex);
8732 goto exception_exit;
8736 addr = (char*)vtable->data + field->offset;
8738 if (cfg->compile_aot)
8739 EMIT_NEW_SFLDACONST (cfg, ins, field);
8741 EMIT_NEW_PCONST (cfg, ins, addr);
8743 MonoInst *iargs [1];
8744 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8745 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8749 /* Generate IR to do the actual load/store operation */
8751 if (*ip == CEE_LDSFLDA) {
8752 ins->klass = mono_class_from_mono_type (ftype);
8753 ins->type = STACK_PTR;
8755 } else if (*ip == CEE_STSFLD) {
8760 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8761 store->flags |= ins_flag;
8763 gboolean is_const = FALSE;
8764 MonoVTable *vtable = NULL;
8766 if (!context_used) {
8767 vtable = mono_class_vtable (cfg->domain, klass);
8768 CHECK_TYPELOAD (klass);
8770 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8771 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8772 gpointer addr = (char*)vtable->data + field->offset;
8773 int ro_type = ftype->type;
8774 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8775 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8777 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8780 case MONO_TYPE_BOOLEAN:
8782 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8786 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8789 case MONO_TYPE_CHAR:
8791 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8795 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8800 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8804 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8810 case MONO_TYPE_FNPTR:
8811 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8812 type_to_eval_stack_type ((cfg), field->type, *sp);
8815 case MONO_TYPE_STRING:
8816 case MONO_TYPE_OBJECT:
8817 case MONO_TYPE_CLASS:
8818 case MONO_TYPE_SZARRAY:
8819 case MONO_TYPE_ARRAY:
8820 if (!mono_gc_is_moving ()) {
8821 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8822 type_to_eval_stack_type ((cfg), field->type, *sp);
8830 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8835 case MONO_TYPE_VALUETYPE:
8845 CHECK_STACK_OVF (1);
8847 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8848 load->flags |= ins_flag;
8861 token = read32 (ip + 1);
8862 klass = mini_get_class (method, token, generic_context);
8863 CHECK_TYPELOAD (klass);
8864 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8865 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8866 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8867 generic_class_is_reference_type (cfg, klass)) {
8868 /* insert call to write barrier */
8869 emit_write_barrier (cfg, sp [0], sp [1], -1);
8881 const char *data_ptr;
8883 guint32 field_token;
8889 token = read32 (ip + 1);
8891 klass = mini_get_class (method, token, generic_context);
8892 CHECK_TYPELOAD (klass);
8894 if (cfg->generic_sharing_context)
8895 context_used = mono_class_check_context_used (klass);
8897 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8898 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8899 ins->sreg1 = sp [0]->dreg;
8900 ins->type = STACK_I4;
8901 ins->dreg = alloc_ireg (cfg);
8902 MONO_ADD_INS (cfg->cbb, ins);
8903 *sp = mono_decompose_opcode (cfg, ins);
8908 MonoClass *array_class = mono_array_class_get (klass, 1);
8909 /* FIXME: we cannot get a managed
8910 allocator because we can't get the
8911 open generic class's vtable. We
8912 have the same problem in
8913 handle_alloc(). This
8914 needs to be solved so that we can
8915 have managed allocs of shared
8918 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8919 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8921 MonoMethod *managed_alloc = NULL;
8923 /* FIXME: Decompose later to help abcrem */
8926 args [0] = emit_get_rgctx_klass (cfg, context_used,
8927 array_class, MONO_RGCTX_INFO_VTABLE);
8932 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8934 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8936 if (cfg->opt & MONO_OPT_SHARED) {
8937 /* Decompose now to avoid problems with references to the domainvar */
8938 MonoInst *iargs [3];
8940 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8941 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8944 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8946 /* Decompose later since it is needed by abcrem */
8947 MonoClass *array_type = mono_array_class_get (klass, 1);
8948 mono_class_vtable (cfg->domain, array_type);
8949 CHECK_TYPELOAD (array_type);
8951 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8952 ins->dreg = alloc_preg (cfg);
8953 ins->sreg1 = sp [0]->dreg;
8954 ins->inst_newa_class = klass;
8955 ins->type = STACK_OBJ;
8957 MONO_ADD_INS (cfg->cbb, ins);
8958 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8959 cfg->cbb->has_array_access = TRUE;
8961 /* Needed so mono_emit_load_get_addr () gets called */
8962 mono_get_got_var (cfg);
8972 * we inline/optimize the initialization sequence if possible.
8973 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8974 * for small sizes open code the memcpy
8975 * ensure the rva field is big enough
8977 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8978 MonoMethod *memcpy_method = get_memcpy_method ();
8979 MonoInst *iargs [3];
8980 int add_reg = alloc_preg (cfg);
8982 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8983 if (cfg->compile_aot) {
8984 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8986 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8988 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8989 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8998 if (sp [0]->type != STACK_OBJ)
9001 dreg = alloc_preg (cfg);
9002 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9003 ins->dreg = alloc_preg (cfg);
9004 ins->sreg1 = sp [0]->dreg;
9005 ins->type = STACK_I4;
9006 /* This flag will be inherited by the decomposition */
9007 ins->flags |= MONO_INST_FAULT;
9008 MONO_ADD_INS (cfg->cbb, ins);
9009 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9010 cfg->cbb->has_array_access = TRUE;
9018 if (sp [0]->type != STACK_OBJ)
9021 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9023 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9024 CHECK_TYPELOAD (klass);
9025 /* we need to make sure that this array is exactly the type it needs
9026 * to be for correctness. the wrappers are lax with their usage
9027 * so we need to ignore them here
9029 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9030 MonoClass *array_class = mono_array_class_get (klass, 1);
9031 mini_emit_check_array_type (cfg, sp [0], array_class);
9032 CHECK_TYPELOAD (array_class);
9036 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9051 case CEE_LDELEM_REF: {
9057 if (*ip == CEE_LDELEM) {
9059 token = read32 (ip + 1);
9060 klass = mini_get_class (method, token, generic_context);
9061 CHECK_TYPELOAD (klass);
9062 mono_class_init (klass);
9065 klass = array_access_to_klass (*ip);
9067 if (sp [0]->type != STACK_OBJ)
9070 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9072 if (sp [1]->opcode == OP_ICONST) {
9073 int array_reg = sp [0]->dreg;
9074 int index_reg = sp [1]->dreg;
9075 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9077 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9078 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9080 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9081 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9084 if (*ip == CEE_LDELEM)
9097 case CEE_STELEM_REF:
9104 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9106 if (*ip == CEE_STELEM) {
9108 token = read32 (ip + 1);
9109 klass = mini_get_class (method, token, generic_context);
9110 CHECK_TYPELOAD (klass);
9111 mono_class_init (klass);
9114 klass = array_access_to_klass (*ip);
9116 if (sp [0]->type != STACK_OBJ)
9119 /* storing a NULL doesn't need any of the complex checks in stelemref */
9120 if (generic_class_is_reference_type (cfg, klass) &&
9121 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9122 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9123 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9124 MonoInst *iargs [3];
9127 mono_class_setup_vtable (obj_array);
9128 g_assert (helper->slot);
9130 if (sp [0]->type != STACK_OBJ)
9132 if (sp [2]->type != STACK_OBJ)
9139 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9141 if (sp [1]->opcode == OP_ICONST) {
9142 int array_reg = sp [0]->dreg;
9143 int index_reg = sp [1]->dreg;
9144 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9146 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9147 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9149 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9150 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9154 if (*ip == CEE_STELEM)
9161 case CEE_CKFINITE: {
9165 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9166 ins->sreg1 = sp [0]->dreg;
9167 ins->dreg = alloc_freg (cfg);
9168 ins->type = STACK_R8;
9169 MONO_ADD_INS (bblock, ins);
9171 *sp++ = mono_decompose_opcode (cfg, ins);
9176 case CEE_REFANYVAL: {
9177 MonoInst *src_var, *src;
9179 int klass_reg = alloc_preg (cfg);
9180 int dreg = alloc_preg (cfg);
9183 MONO_INST_NEW (cfg, ins, *ip);
9186 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9187 CHECK_TYPELOAD (klass);
9188 mono_class_init (klass);
9190 if (cfg->generic_sharing_context)
9191 context_used = mono_class_check_context_used (klass);
9194 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9196 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9197 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9201 MonoInst *klass_ins;
9203 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9204 klass, MONO_RGCTX_INFO_KLASS);
9207 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9208 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9210 mini_emit_class_check (cfg, klass_reg, klass);
9212 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9213 ins->type = STACK_MP;
9218 case CEE_MKREFANY: {
9219 MonoInst *loc, *addr;
9222 MONO_INST_NEW (cfg, ins, *ip);
9225 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9226 CHECK_TYPELOAD (klass);
9227 mono_class_init (klass);
9229 if (cfg->generic_sharing_context)
9230 context_used = mono_class_check_context_used (klass);
9232 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9233 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9236 MonoInst *const_ins;
9237 int type_reg = alloc_preg (cfg);
9239 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9240 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9242 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9243 } else if (cfg->compile_aot) {
9244 int const_reg = alloc_preg (cfg);
9245 int type_reg = alloc_preg (cfg);
9247 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9248 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9249 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9250 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9252 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9253 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9255 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9257 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9258 ins->type = STACK_VTYPE;
9259 ins->klass = mono_defaults.typed_reference_class;
9266 MonoClass *handle_class;
9268 CHECK_STACK_OVF (1);
9271 n = read32 (ip + 1);
9273 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9274 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9275 handle = mono_method_get_wrapper_data (method, n);
9276 handle_class = mono_method_get_wrapper_data (method, n + 1);
9277 if (handle_class == mono_defaults.typehandle_class)
9278 handle = &((MonoClass*)handle)->byval_arg;
9281 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9285 mono_class_init (handle_class);
9286 if (cfg->generic_sharing_context) {
9287 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9288 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9289 /* This case handles ldtoken
9290 of an open type, like for
9293 } else if (handle_class == mono_defaults.typehandle_class) {
9294 /* If we get a MONO_TYPE_CLASS
9295 then we need to provide the
9297 instantiation of it. */
9298 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9301 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9302 } else if (handle_class == mono_defaults.fieldhandle_class)
9303 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9304 else if (handle_class == mono_defaults.methodhandle_class)
9305 context_used = mono_method_check_context_used (handle);
9307 g_assert_not_reached ();
9310 if ((cfg->opt & MONO_OPT_SHARED) &&
9311 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9312 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9313 MonoInst *addr, *vtvar, *iargs [3];
9314 int method_context_used;
9316 if (cfg->generic_sharing_context)
9317 method_context_used = mono_method_check_context_used (method);
9319 method_context_used = 0;
9321 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9323 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9324 EMIT_NEW_ICONST (cfg, iargs [1], n);
9325 if (method_context_used) {
9326 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9327 method, MONO_RGCTX_INFO_METHOD);
9328 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9330 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9331 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9333 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9335 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9337 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9339 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9340 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9341 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9342 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9343 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9344 MonoClass *tclass = mono_class_from_mono_type (handle);
9346 mono_class_init (tclass);
9348 ins = emit_get_rgctx_klass (cfg, context_used,
9349 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9350 } else if (cfg->compile_aot) {
9351 if (method->wrapper_type) {
9352 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9353 /* Special case for static synchronized wrappers */
9354 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9356 /* FIXME: n is not a normal token */
9357 cfg->disable_aot = TRUE;
9358 EMIT_NEW_PCONST (cfg, ins, NULL);
9361 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9364 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9366 ins->type = STACK_OBJ;
9367 ins->klass = cmethod->klass;
9370 MonoInst *addr, *vtvar;
9372 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9375 if (handle_class == mono_defaults.typehandle_class) {
9376 ins = emit_get_rgctx_klass (cfg, context_used,
9377 mono_class_from_mono_type (handle),
9378 MONO_RGCTX_INFO_TYPE);
9379 } else if (handle_class == mono_defaults.methodhandle_class) {
9380 ins = emit_get_rgctx_method (cfg, context_used,
9381 handle, MONO_RGCTX_INFO_METHOD);
9382 } else if (handle_class == mono_defaults.fieldhandle_class) {
9383 ins = emit_get_rgctx_field (cfg, context_used,
9384 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9386 g_assert_not_reached ();
9388 } else if (cfg->compile_aot) {
9389 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9391 EMIT_NEW_PCONST (cfg, ins, handle);
9393 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9395 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9405 MONO_INST_NEW (cfg, ins, OP_THROW);
9407 ins->sreg1 = sp [0]->dreg;
9409 bblock->out_of_line = TRUE;
9410 MONO_ADD_INS (bblock, ins);
9411 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9412 MONO_ADD_INS (bblock, ins);
9415 link_bblock (cfg, bblock, end_bblock);
9416 start_new_bblock = 1;
9418 case CEE_ENDFINALLY:
9419 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9420 MONO_ADD_INS (bblock, ins);
9422 start_new_bblock = 1;
9425 * Control will leave the method so empty the stack, otherwise
9426 * the next basic block will start with a nonempty stack.
9428 while (sp != stack_start) {
9436 if (*ip == CEE_LEAVE) {
9438 target = ip + 5 + (gint32)read32(ip + 1);
9441 target = ip + 2 + (signed char)(ip [1]);
9444 /* empty the stack */
9445 while (sp != stack_start) {
9450 * If this leave statement is in a catch block, check for a
9451 * pending exception, and rethrow it if necessary.
9452 * We avoid doing this in runtime invoke wrappers, since those are called
9453 * by native code which excepts the wrapper to catch all exceptions.
9455 for (i = 0; i < header->num_clauses; ++i) {
9456 MonoExceptionClause *clause = &header->clauses [i];
9459 * Use <= in the final comparison to handle clauses with multiple
9460 * leave statements, like in bug #78024.
9461 * The ordering of the exception clauses guarantees that we find the
9464 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9466 MonoBasicBlock *dont_throw;
9471 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9474 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9476 NEW_BBLOCK (cfg, dont_throw);
9479 * Currently, we always rethrow the abort exception, despite the
9480 * fact that this is not correct. See thread6.cs for an example.
9481 * But propagating the abort exception is more important than
9482 * getting the sematics right.
9484 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9485 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9486 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9488 MONO_START_BB (cfg, dont_throw);
9493 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9495 MonoExceptionClause *clause;
9497 for (tmp = handlers; tmp; tmp = tmp->next) {
9499 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9501 link_bblock (cfg, bblock, tblock);
9502 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9503 ins->inst_target_bb = tblock;
9504 ins->inst_eh_block = clause;
9505 MONO_ADD_INS (bblock, ins);
9506 bblock->has_call_handler = 1;
9507 if (COMPILE_LLVM (cfg)) {
9508 MonoBasicBlock *target_bb;
9511 * Link the finally bblock with the target, since it will
9512 * conceptually branch there.
9513 * FIXME: Have to link the bblock containing the endfinally.
9515 GET_BBLOCK (cfg, target_bb, target);
9516 link_bblock (cfg, tblock, target_bb);
9519 g_list_free (handlers);
9522 MONO_INST_NEW (cfg, ins, OP_BR);
9523 MONO_ADD_INS (bblock, ins);
9524 GET_BBLOCK (cfg, tblock, target);
9525 link_bblock (cfg, bblock, tblock);
9526 ins->inst_target_bb = tblock;
9527 start_new_bblock = 1;
9529 if (*ip == CEE_LEAVE)
9538 * Mono specific opcodes
9540 case MONO_CUSTOM_PREFIX: {
9542 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9546 case CEE_MONO_ICALL: {
9548 MonoJitICallInfo *info;
9550 token = read32 (ip + 2);
9551 func = mono_method_get_wrapper_data (method, token);
9552 info = mono_find_jit_icall_by_addr (func);
9555 CHECK_STACK (info->sig->param_count);
9556 sp -= info->sig->param_count;
9558 ins = mono_emit_jit_icall (cfg, info->func, sp);
9559 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9563 inline_costs += 10 * num_calls++;
9567 case CEE_MONO_LDPTR: {
9570 CHECK_STACK_OVF (1);
9572 token = read32 (ip + 2);
9574 ptr = mono_method_get_wrapper_data (method, token);
9575 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9576 MonoJitICallInfo *callinfo;
9577 const char *icall_name;
9579 icall_name = method->name + strlen ("__icall_wrapper_");
9580 g_assert (icall_name);
9581 callinfo = mono_find_jit_icall_by_name (icall_name);
9582 g_assert (callinfo);
9584 if (ptr == callinfo->func) {
9585 /* Will be transformed into an AOTCONST later */
9586 EMIT_NEW_PCONST (cfg, ins, ptr);
9592 /* FIXME: Generalize this */
9593 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9594 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9599 EMIT_NEW_PCONST (cfg, ins, ptr);
9602 inline_costs += 10 * num_calls++;
9603 /* Can't embed random pointers into AOT code */
9604 cfg->disable_aot = 1;
9607 case CEE_MONO_ICALL_ADDR: {
9608 MonoMethod *cmethod;
9611 CHECK_STACK_OVF (1);
9613 token = read32 (ip + 2);
9615 cmethod = mono_method_get_wrapper_data (method, token);
9617 if (cfg->compile_aot) {
9618 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9620 ptr = mono_lookup_internal_call (cmethod);
9622 EMIT_NEW_PCONST (cfg, ins, ptr);
9628 case CEE_MONO_VTADDR: {
9629 MonoInst *src_var, *src;
9635 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9636 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9641 case CEE_MONO_NEWOBJ: {
9642 MonoInst *iargs [2];
9644 CHECK_STACK_OVF (1);
9646 token = read32 (ip + 2);
9647 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9648 mono_class_init (klass);
9649 NEW_DOMAINCONST (cfg, iargs [0]);
9650 MONO_ADD_INS (cfg->cbb, iargs [0]);
9651 NEW_CLASSCONST (cfg, iargs [1], klass);
9652 MONO_ADD_INS (cfg->cbb, iargs [1]);
9653 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9655 inline_costs += 10 * num_calls++;
9658 case CEE_MONO_OBJADDR:
9661 MONO_INST_NEW (cfg, ins, OP_MOVE);
9662 ins->dreg = alloc_preg (cfg);
9663 ins->sreg1 = sp [0]->dreg;
9664 ins->type = STACK_MP;
9665 MONO_ADD_INS (cfg->cbb, ins);
9669 case CEE_MONO_LDNATIVEOBJ:
9671 * Similar to LDOBJ, but instead load the unmanaged
9672 * representation of the vtype to the stack.
9677 token = read32 (ip + 2);
9678 klass = mono_method_get_wrapper_data (method, token);
9679 g_assert (klass->valuetype);
9680 mono_class_init (klass);
9683 MonoInst *src, *dest, *temp;
9686 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9687 temp->backend.is_pinvoke = 1;
9688 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9689 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9691 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9692 dest->type = STACK_VTYPE;
9693 dest->klass = klass;
9699 case CEE_MONO_RETOBJ: {
9701 * Same as RET, but return the native representation of a vtype
9704 g_assert (cfg->ret);
9705 g_assert (mono_method_signature (method)->pinvoke);
9710 token = read32 (ip + 2);
9711 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9713 if (!cfg->vret_addr) {
9714 g_assert (cfg->ret_var_is_local);
9716 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9718 EMIT_NEW_RETLOADA (cfg, ins);
9720 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9722 if (sp != stack_start)
9725 MONO_INST_NEW (cfg, ins, OP_BR);
9726 ins->inst_target_bb = end_bblock;
9727 MONO_ADD_INS (bblock, ins);
9728 link_bblock (cfg, bblock, end_bblock);
9729 start_new_bblock = 1;
9733 case CEE_MONO_CISINST:
9734 case CEE_MONO_CCASTCLASS: {
9739 token = read32 (ip + 2);
9740 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9741 if (ip [1] == CEE_MONO_CISINST)
9742 ins = handle_cisinst (cfg, klass, sp [0]);
9744 ins = handle_ccastclass (cfg, klass, sp [0]);
9750 case CEE_MONO_SAVE_LMF:
9751 case CEE_MONO_RESTORE_LMF:
9752 #ifdef MONO_ARCH_HAVE_LMF_OPS
9753 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9754 MONO_ADD_INS (bblock, ins);
9755 cfg->need_lmf_area = TRUE;
9759 case CEE_MONO_CLASSCONST:
9760 CHECK_STACK_OVF (1);
9762 token = read32 (ip + 2);
9763 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9766 inline_costs += 10 * num_calls++;
9768 case CEE_MONO_NOT_TAKEN:
9769 bblock->out_of_line = TRUE;
9773 CHECK_STACK_OVF (1);
9775 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9776 ins->dreg = alloc_preg (cfg);
9777 ins->inst_offset = (gint32)read32 (ip + 2);
9778 ins->type = STACK_PTR;
9779 MONO_ADD_INS (bblock, ins);
9783 case CEE_MONO_DYN_CALL: {
9786 /* It would be easier to call a trampoline, but that would put an
9787 * extra frame on the stack, confusing exception handling. So
9788 * implement it inline using an opcode for now.
9791 if (!cfg->dyn_call_var) {
9792 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9793 /* prevent it from being register allocated */
9794 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9797 /* Has to use a call inst since it local regalloc expects it */
9798 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9799 ins = (MonoInst*)call;
9801 ins->sreg1 = sp [0]->dreg;
9802 ins->sreg2 = sp [1]->dreg;
9803 MONO_ADD_INS (bblock, ins);
9805 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9806 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9810 inline_costs += 10 * num_calls++;
9815 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9825 /* somewhat similar to LDTOKEN */
9826 MonoInst *addr, *vtvar;
9827 CHECK_STACK_OVF (1);
9828 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9830 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9831 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9833 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9834 ins->type = STACK_VTYPE;
9835 ins->klass = mono_defaults.argumenthandle_class;
9848 * The following transforms:
9849 * CEE_CEQ into OP_CEQ
9850 * CEE_CGT into OP_CGT
9851 * CEE_CGT_UN into OP_CGT_UN
9852 * CEE_CLT into OP_CLT
9853 * CEE_CLT_UN into OP_CLT_UN
9855 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9857 MONO_INST_NEW (cfg, ins, cmp->opcode);
9859 cmp->sreg1 = sp [0]->dreg;
9860 cmp->sreg2 = sp [1]->dreg;
9861 type_from_op (cmp, sp [0], sp [1]);
9863 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9864 cmp->opcode = OP_LCOMPARE;
9865 else if (sp [0]->type == STACK_R8)
9866 cmp->opcode = OP_FCOMPARE;
9868 cmp->opcode = OP_ICOMPARE;
9869 MONO_ADD_INS (bblock, cmp);
9870 ins->type = STACK_I4;
9871 ins->dreg = alloc_dreg (cfg, ins->type);
9872 type_from_op (ins, sp [0], sp [1]);
9874 if (cmp->opcode == OP_FCOMPARE) {
9876 * The backends expect the fceq opcodes to do the
9879 cmp->opcode = OP_NOP;
9880 ins->sreg1 = cmp->sreg1;
9881 ins->sreg2 = cmp->sreg2;
9883 MONO_ADD_INS (bblock, ins);
9890 MonoMethod *cil_method;
9891 gboolean needs_static_rgctx_invoke;
9893 CHECK_STACK_OVF (1);
9895 n = read32 (ip + 2);
9896 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9897 if (!cmethod || mono_loader_get_last_error ())
9899 mono_class_init (cmethod->klass);
9901 mono_save_token_info (cfg, image, n, cmethod);
9903 if (cfg->generic_sharing_context)
9904 context_used = mono_method_check_context_used (cmethod);
9906 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9908 cil_method = cmethod;
9909 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9910 METHOD_ACCESS_FAILURE;
9912 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9913 if (check_linkdemand (cfg, method, cmethod))
9915 CHECK_CFG_EXCEPTION;
9916 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9917 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9921 * Optimize the common case of ldftn+delegate creation
9923 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9924 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9925 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9926 MonoInst *target_ins;
9928 int invoke_context_used = 0;
9930 invoke = mono_get_delegate_invoke (ctor_method->klass);
9931 if (!invoke || !mono_method_signature (invoke))
9934 if (cfg->generic_sharing_context)
9935 invoke_context_used = mono_method_check_context_used (invoke);
9937 target_ins = sp [-1];
9939 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9940 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9941 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9942 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9943 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9947 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9948 /* FIXME: SGEN support */
9949 if (invoke_context_used == 0) {
9951 if (cfg->verbose_level > 3)
9952 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9954 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9955 CHECK_CFG_EXCEPTION;
9964 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9965 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9969 inline_costs += 10 * num_calls++;
9972 case CEE_LDVIRTFTN: {
9977 n = read32 (ip + 2);
9978 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9979 if (!cmethod || mono_loader_get_last_error ())
9981 mono_class_init (cmethod->klass);
9983 if (cfg->generic_sharing_context)
9984 context_used = mono_method_check_context_used (cmethod);
9986 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9987 if (check_linkdemand (cfg, method, cmethod))
9989 CHECK_CFG_EXCEPTION;
9990 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9991 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9997 args [1] = emit_get_rgctx_method (cfg, context_used,
9998 cmethod, MONO_RGCTX_INFO_METHOD);
10001 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10003 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10006 inline_costs += 10 * num_calls++;
10010 CHECK_STACK_OVF (1);
10012 n = read16 (ip + 2);
10014 EMIT_NEW_ARGLOAD (cfg, ins, n);
10019 CHECK_STACK_OVF (1);
10021 n = read16 (ip + 2);
10023 NEW_ARGLOADA (cfg, ins, n);
10024 MONO_ADD_INS (cfg->cbb, ins);
10032 n = read16 (ip + 2);
10034 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10036 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10040 CHECK_STACK_OVF (1);
10042 n = read16 (ip + 2);
10044 EMIT_NEW_LOCLOAD (cfg, ins, n);
10049 unsigned char *tmp_ip;
10050 CHECK_STACK_OVF (1);
10052 n = read16 (ip + 2);
10055 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10061 EMIT_NEW_LOCLOADA (cfg, ins, n);
10070 n = read16 (ip + 2);
10072 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10074 emit_stloc_ir (cfg, sp, header, n);
10081 if (sp != stack_start)
10083 if (cfg->method != method)
10085 * Inlining this into a loop in a parent could lead to
10086 * stack overflows which is different behavior than the
10087 * non-inlined case, thus disable inlining in this case.
10089 goto inline_failure;
10091 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10092 ins->dreg = alloc_preg (cfg);
10093 ins->sreg1 = sp [0]->dreg;
10094 ins->type = STACK_PTR;
10095 MONO_ADD_INS (cfg->cbb, ins);
10097 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10099 ins->flags |= MONO_INST_INIT;
10104 case CEE_ENDFILTER: {
10105 MonoExceptionClause *clause, *nearest;
10106 int cc, nearest_num;
10110 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10112 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10113 ins->sreg1 = (*sp)->dreg;
10114 MONO_ADD_INS (bblock, ins);
10115 start_new_bblock = 1;
10120 for (cc = 0; cc < header->num_clauses; ++cc) {
10121 clause = &header->clauses [cc];
10122 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10123 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10124 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10129 g_assert (nearest);
10130 if ((ip - header->code) != nearest->handler_offset)
10135 case CEE_UNALIGNED_:
10136 ins_flag |= MONO_INST_UNALIGNED;
10137 /* FIXME: record alignment? we can assume 1 for now */
10141 case CEE_VOLATILE_:
10142 ins_flag |= MONO_INST_VOLATILE;
10146 ins_flag |= MONO_INST_TAILCALL;
10147 cfg->flags |= MONO_CFG_HAS_TAIL;
10148 /* Can't inline tail calls at this time */
10149 inline_costs += 100000;
10156 token = read32 (ip + 2);
10157 klass = mini_get_class (method, token, generic_context);
10158 CHECK_TYPELOAD (klass);
10159 if (generic_class_is_reference_type (cfg, klass))
10160 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10162 mini_emit_initobj (cfg, *sp, NULL, klass);
10166 case CEE_CONSTRAINED_:
10168 token = read32 (ip + 2);
10169 if (method->wrapper_type != MONO_WRAPPER_NONE)
10170 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10172 constrained_call = mono_class_get_full (image, token, generic_context);
10173 CHECK_TYPELOAD (constrained_call);
10177 case CEE_INITBLK: {
10178 MonoInst *iargs [3];
10182 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10183 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10184 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10185 /* emit_memset only works when val == 0 */
10186 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10188 iargs [0] = sp [0];
10189 iargs [1] = sp [1];
10190 iargs [2] = sp [2];
10191 if (ip [1] == CEE_CPBLK) {
10192 MonoMethod *memcpy_method = get_memcpy_method ();
10193 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10195 MonoMethod *memset_method = get_memset_method ();
10196 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10206 ins_flag |= MONO_INST_NOTYPECHECK;
10208 ins_flag |= MONO_INST_NORANGECHECK;
10209 /* we ignore the no-nullcheck for now since we
10210 * really do it explicitly only when doing callvirt->call
10214 case CEE_RETHROW: {
10216 int handler_offset = -1;
10218 for (i = 0; i < header->num_clauses; ++i) {
10219 MonoExceptionClause *clause = &header->clauses [i];
10220 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10221 handler_offset = clause->handler_offset;
10226 bblock->flags |= BB_EXCEPTION_UNSAFE;
10228 g_assert (handler_offset != -1);
10230 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10231 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10232 ins->sreg1 = load->dreg;
10233 MONO_ADD_INS (bblock, ins);
10235 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10236 MONO_ADD_INS (bblock, ins);
10239 link_bblock (cfg, bblock, end_bblock);
10240 start_new_bblock = 1;
10248 CHECK_STACK_OVF (1);
10250 token = read32 (ip + 2);
10251 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10252 MonoType *type = mono_type_create_from_typespec (image, token);
10253 token = mono_type_size (type, &ialign);
10255 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10256 CHECK_TYPELOAD (klass);
10257 mono_class_init (klass);
10258 token = mono_class_value_size (klass, &align);
10260 EMIT_NEW_ICONST (cfg, ins, token);
10265 case CEE_REFANYTYPE: {
10266 MonoInst *src_var, *src;
10272 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10274 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10275 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10276 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10281 case CEE_READONLY_:
10294 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10304 g_warning ("opcode 0x%02x not handled", *ip);
10308 if (start_new_bblock != 1)
10311 bblock->cil_length = ip - bblock->cil_code;
10312 bblock->next_bb = end_bblock;
10314 if (cfg->method == method && cfg->domainvar) {
10316 MonoInst *get_domain;
10318 cfg->cbb = init_localsbb;
10320 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10321 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10324 get_domain->dreg = alloc_preg (cfg);
10325 MONO_ADD_INS (cfg->cbb, get_domain);
10327 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10328 MONO_ADD_INS (cfg->cbb, store);
10331 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10332 if (cfg->compile_aot)
10333 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10334 mono_get_got_var (cfg);
10337 if (cfg->method == method && cfg->got_var)
10338 mono_emit_load_got_addr (cfg);
10343 cfg->cbb = init_localsbb;
10345 for (i = 0; i < header->num_locals; ++i) {
10346 MonoType *ptype = header->locals [i];
10347 int t = ptype->type;
10348 dreg = cfg->locals [i]->dreg;
10350 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10351 t = mono_class_enum_basetype (ptype->data.klass)->type;
10352 if (ptype->byref) {
10353 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10354 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10355 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10356 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10357 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10358 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10359 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10360 ins->type = STACK_R8;
10361 ins->inst_p0 = (void*)&r8_0;
10362 ins->dreg = alloc_dreg (cfg, STACK_R8);
10363 MONO_ADD_INS (init_localsbb, ins);
10364 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10365 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10366 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10367 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10369 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10374 if (cfg->init_ref_vars && cfg->method == method) {
10375 /* Emit initialization for ref vars */
10376 // FIXME: Avoid duplication initialization for IL locals.
10377 for (i = 0; i < cfg->num_varinfo; ++i) {
10378 MonoInst *ins = cfg->varinfo [i];
10380 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10381 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10385 /* Add a sequence point for method entry/exit events */
10387 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10388 MONO_ADD_INS (init_localsbb, ins);
10389 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10390 MONO_ADD_INS (cfg->bb_exit, ins);
10395 if (cfg->method == method) {
10396 MonoBasicBlock *bb;
10397 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10398 bb->region = mono_find_block_region (cfg, bb->real_offset);
10400 mono_create_spvar_for_region (cfg, bb->region);
10401 if (cfg->verbose_level > 2)
10402 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10406 g_slist_free (class_inits);
10407 dont_inline = g_list_remove (dont_inline, method);
10409 if (inline_costs < 0) {
10412 /* Method is too large */
10413 mname = mono_method_full_name (method, TRUE);
10414 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10415 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10417 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10418 mono_basic_block_free (original_bb);
10422 if ((cfg->verbose_level > 2) && (cfg->method == method))
10423 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10425 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10426 mono_basic_block_free (original_bb);
10427 return inline_costs;
10430 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10437 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10441 set_exception_type_from_invalid_il (cfg, method, ip);
10445 g_slist_free (class_inits);
10446 mono_basic_block_free (original_bb);
10447 dont_inline = g_list_remove (dont_inline, method);
10448 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10453 store_membase_reg_to_store_membase_imm (int opcode)
10456 case OP_STORE_MEMBASE_REG:
10457 return OP_STORE_MEMBASE_IMM;
10458 case OP_STOREI1_MEMBASE_REG:
10459 return OP_STOREI1_MEMBASE_IMM;
10460 case OP_STOREI2_MEMBASE_REG:
10461 return OP_STOREI2_MEMBASE_IMM;
10462 case OP_STOREI4_MEMBASE_REG:
10463 return OP_STOREI4_MEMBASE_IMM;
10464 case OP_STOREI8_MEMBASE_REG:
10465 return OP_STOREI8_MEMBASE_IMM;
10467 g_assert_not_reached ();
10473 #endif /* DISABLE_JIT */
10476 mono_op_to_op_imm (int opcode)
10480 return OP_IADD_IMM;
10482 return OP_ISUB_IMM;
10484 return OP_IDIV_IMM;
10486 return OP_IDIV_UN_IMM;
10488 return OP_IREM_IMM;
10490 return OP_IREM_UN_IMM;
10492 return OP_IMUL_IMM;
10494 return OP_IAND_IMM;
10498 return OP_IXOR_IMM;
10500 return OP_ISHL_IMM;
10502 return OP_ISHR_IMM;
10504 return OP_ISHR_UN_IMM;
10507 return OP_LADD_IMM;
10509 return OP_LSUB_IMM;
10511 return OP_LAND_IMM;
10515 return OP_LXOR_IMM;
10517 return OP_LSHL_IMM;
10519 return OP_LSHR_IMM;
10521 return OP_LSHR_UN_IMM;
10524 return OP_COMPARE_IMM;
10526 return OP_ICOMPARE_IMM;
10528 return OP_LCOMPARE_IMM;
10530 case OP_STORE_MEMBASE_REG:
10531 return OP_STORE_MEMBASE_IMM;
10532 case OP_STOREI1_MEMBASE_REG:
10533 return OP_STOREI1_MEMBASE_IMM;
10534 case OP_STOREI2_MEMBASE_REG:
10535 return OP_STOREI2_MEMBASE_IMM;
10536 case OP_STOREI4_MEMBASE_REG:
10537 return OP_STOREI4_MEMBASE_IMM;
10539 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10541 return OP_X86_PUSH_IMM;
10542 case OP_X86_COMPARE_MEMBASE_REG:
10543 return OP_X86_COMPARE_MEMBASE_IMM;
10545 #if defined(TARGET_AMD64)
10546 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10547 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10549 case OP_VOIDCALL_REG:
10550 return OP_VOIDCALL;
10558 return OP_LOCALLOC_IMM;
10565 ldind_to_load_membase (int opcode)
10569 return OP_LOADI1_MEMBASE;
10571 return OP_LOADU1_MEMBASE;
10573 return OP_LOADI2_MEMBASE;
10575 return OP_LOADU2_MEMBASE;
10577 return OP_LOADI4_MEMBASE;
10579 return OP_LOADU4_MEMBASE;
10581 return OP_LOAD_MEMBASE;
10582 case CEE_LDIND_REF:
10583 return OP_LOAD_MEMBASE;
10585 return OP_LOADI8_MEMBASE;
10587 return OP_LOADR4_MEMBASE;
10589 return OP_LOADR8_MEMBASE;
10591 g_assert_not_reached ();
10598 stind_to_store_membase (int opcode)
10602 return OP_STOREI1_MEMBASE_REG;
10604 return OP_STOREI2_MEMBASE_REG;
10606 return OP_STOREI4_MEMBASE_REG;
10608 case CEE_STIND_REF:
10609 return OP_STORE_MEMBASE_REG;
10611 return OP_STOREI8_MEMBASE_REG;
10613 return OP_STORER4_MEMBASE_REG;
10615 return OP_STORER8_MEMBASE_REG;
10617 g_assert_not_reached ();
10624 mono_load_membase_to_load_mem (int opcode)
10626 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10627 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10629 case OP_LOAD_MEMBASE:
10630 return OP_LOAD_MEM;
10631 case OP_LOADU1_MEMBASE:
10632 return OP_LOADU1_MEM;
10633 case OP_LOADU2_MEMBASE:
10634 return OP_LOADU2_MEM;
10635 case OP_LOADI4_MEMBASE:
10636 return OP_LOADI4_MEM;
10637 case OP_LOADU4_MEMBASE:
10638 return OP_LOADU4_MEM;
10639 #if SIZEOF_REGISTER == 8
10640 case OP_LOADI8_MEMBASE:
10641 return OP_LOADI8_MEM;
10650 op_to_op_dest_membase (int store_opcode, int opcode)
10652 #if defined(TARGET_X86)
10653 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10658 return OP_X86_ADD_MEMBASE_REG;
10660 return OP_X86_SUB_MEMBASE_REG;
10662 return OP_X86_AND_MEMBASE_REG;
10664 return OP_X86_OR_MEMBASE_REG;
10666 return OP_X86_XOR_MEMBASE_REG;
10669 return OP_X86_ADD_MEMBASE_IMM;
10672 return OP_X86_SUB_MEMBASE_IMM;
10675 return OP_X86_AND_MEMBASE_IMM;
10678 return OP_X86_OR_MEMBASE_IMM;
10681 return OP_X86_XOR_MEMBASE_IMM;
10687 #if defined(TARGET_AMD64)
10688 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10693 return OP_X86_ADD_MEMBASE_REG;
10695 return OP_X86_SUB_MEMBASE_REG;
10697 return OP_X86_AND_MEMBASE_REG;
10699 return OP_X86_OR_MEMBASE_REG;
10701 return OP_X86_XOR_MEMBASE_REG;
10703 return OP_X86_ADD_MEMBASE_IMM;
10705 return OP_X86_SUB_MEMBASE_IMM;
10707 return OP_X86_AND_MEMBASE_IMM;
10709 return OP_X86_OR_MEMBASE_IMM;
10711 return OP_X86_XOR_MEMBASE_IMM;
10713 return OP_AMD64_ADD_MEMBASE_REG;
10715 return OP_AMD64_SUB_MEMBASE_REG;
10717 return OP_AMD64_AND_MEMBASE_REG;
10719 return OP_AMD64_OR_MEMBASE_REG;
10721 return OP_AMD64_XOR_MEMBASE_REG;
10724 return OP_AMD64_ADD_MEMBASE_IMM;
10727 return OP_AMD64_SUB_MEMBASE_IMM;
10730 return OP_AMD64_AND_MEMBASE_IMM;
10733 return OP_AMD64_OR_MEMBASE_IMM;
10736 return OP_AMD64_XOR_MEMBASE_IMM;
10746 op_to_op_store_membase (int store_opcode, int opcode)
10748 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10751 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10752 return OP_X86_SETEQ_MEMBASE;
10754 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10755 return OP_X86_SETNE_MEMBASE;
10763 op_to_op_src1_membase (int load_opcode, int opcode)
10766 /* FIXME: This has sign extension issues */
10768 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10769 return OP_X86_COMPARE_MEMBASE8_IMM;
10772 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10777 return OP_X86_PUSH_MEMBASE;
10778 case OP_COMPARE_IMM:
10779 case OP_ICOMPARE_IMM:
10780 return OP_X86_COMPARE_MEMBASE_IMM;
10783 return OP_X86_COMPARE_MEMBASE_REG;
10787 #ifdef TARGET_AMD64
10788 /* FIXME: This has sign extension issues */
10790 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10791 return OP_X86_COMPARE_MEMBASE8_IMM;
10796 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10797 return OP_X86_PUSH_MEMBASE;
10799 /* FIXME: This only works for 32 bit immediates
10800 case OP_COMPARE_IMM:
10801 case OP_LCOMPARE_IMM:
10802 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10803 return OP_AMD64_COMPARE_MEMBASE_IMM;
10805 case OP_ICOMPARE_IMM:
10806 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10807 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10811 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10812 return OP_AMD64_COMPARE_MEMBASE_REG;
10815 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10816 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10825 op_to_op_src2_membase (int load_opcode, int opcode)
10828 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10834 return OP_X86_COMPARE_REG_MEMBASE;
10836 return OP_X86_ADD_REG_MEMBASE;
10838 return OP_X86_SUB_REG_MEMBASE;
10840 return OP_X86_AND_REG_MEMBASE;
10842 return OP_X86_OR_REG_MEMBASE;
10844 return OP_X86_XOR_REG_MEMBASE;
10848 #ifdef TARGET_AMD64
10849 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10852 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10854 return OP_X86_ADD_REG_MEMBASE;
10856 return OP_X86_SUB_REG_MEMBASE;
10858 return OP_X86_AND_REG_MEMBASE;
10860 return OP_X86_OR_REG_MEMBASE;
10862 return OP_X86_XOR_REG_MEMBASE;
10864 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10868 return OP_AMD64_COMPARE_REG_MEMBASE;
10870 return OP_AMD64_ADD_REG_MEMBASE;
10872 return OP_AMD64_SUB_REG_MEMBASE;
10874 return OP_AMD64_AND_REG_MEMBASE;
10876 return OP_AMD64_OR_REG_MEMBASE;
10878 return OP_AMD64_XOR_REG_MEMBASE;
10887 mono_op_to_op_imm_noemul (int opcode)
10890 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10896 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10904 return mono_op_to_op_imm (opcode);
10908 #ifndef DISABLE_JIT
10911 * mono_handle_global_vregs:
10913 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10917 mono_handle_global_vregs (MonoCompile *cfg)
10919 gint32 *vreg_to_bb;
10920 MonoBasicBlock *bb;
10923 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10925 #ifdef MONO_ARCH_SIMD_INTRINSICS
10926 if (cfg->uses_simd_intrinsics)
10927 mono_simd_simplify_indirection (cfg);
10930 /* Find local vregs used in more than one bb */
10931 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10932 MonoInst *ins = bb->code;
10933 int block_num = bb->block_num;
10935 if (cfg->verbose_level > 2)
10936 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10939 for (; ins; ins = ins->next) {
10940 const char *spec = INS_INFO (ins->opcode);
10941 int regtype = 0, regindex;
10944 if (G_UNLIKELY (cfg->verbose_level > 2))
10945 mono_print_ins (ins);
10947 g_assert (ins->opcode >= MONO_CEE_LAST);
10949 for (regindex = 0; regindex < 4; regindex ++) {
10952 if (regindex == 0) {
10953 regtype = spec [MONO_INST_DEST];
10954 if (regtype == ' ')
10957 } else if (regindex == 1) {
10958 regtype = spec [MONO_INST_SRC1];
10959 if (regtype == ' ')
10962 } else if (regindex == 2) {
10963 regtype = spec [MONO_INST_SRC2];
10964 if (regtype == ' ')
10967 } else if (regindex == 3) {
10968 regtype = spec [MONO_INST_SRC3];
10969 if (regtype == ' ')
10974 #if SIZEOF_REGISTER == 4
10975 /* In the LLVM case, the long opcodes are not decomposed */
10976 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10978 * Since some instructions reference the original long vreg,
10979 * and some reference the two component vregs, it is quite hard
10980 * to determine when it needs to be global. So be conservative.
10982 if (!get_vreg_to_inst (cfg, vreg)) {
10983 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10985 if (cfg->verbose_level > 2)
10986 printf ("LONG VREG R%d made global.\n", vreg);
10990 * Make the component vregs volatile since the optimizations can
10991 * get confused otherwise.
10993 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10994 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10998 g_assert (vreg != -1);
11000 prev_bb = vreg_to_bb [vreg];
11001 if (prev_bb == 0) {
11002 /* 0 is a valid block num */
11003 vreg_to_bb [vreg] = block_num + 1;
11004 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11005 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11008 if (!get_vreg_to_inst (cfg, vreg)) {
11009 if (G_UNLIKELY (cfg->verbose_level > 2))
11010 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11014 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11017 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11020 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11023 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11026 g_assert_not_reached ();
11030 /* Flag as having been used in more than one bb */
11031 vreg_to_bb [vreg] = -1;
11037 /* If a variable is used in only one bblock, convert it into a local vreg */
11038 for (i = 0; i < cfg->num_varinfo; i++) {
11039 MonoInst *var = cfg->varinfo [i];
11040 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11042 switch (var->type) {
11048 #if SIZEOF_REGISTER == 8
11051 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11052 /* Enabling this screws up the fp stack on x86 */
11055 /* Arguments are implicitly global */
11056 /* Putting R4 vars into registers doesn't work currently */
11057 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11059 * Make that the variable's liveness interval doesn't contain a call, since
11060 * that would cause the lvreg to be spilled, making the whole optimization
11063 /* This is too slow for JIT compilation */
11065 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11067 int def_index, call_index, ins_index;
11068 gboolean spilled = FALSE;
11073 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11074 const char *spec = INS_INFO (ins->opcode);
11076 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11077 def_index = ins_index;
11079 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11080 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11081 if (call_index > def_index) {
11087 if (MONO_IS_CALL (ins))
11088 call_index = ins_index;
11098 if (G_UNLIKELY (cfg->verbose_level > 2))
11099 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11100 var->flags |= MONO_INST_IS_DEAD;
11101 cfg->vreg_to_inst [var->dreg] = NULL;
11108 * Compress the varinfo and vars tables so the liveness computation is faster and
11109 * takes up less space.
11112 for (i = 0; i < cfg->num_varinfo; ++i) {
11113 MonoInst *var = cfg->varinfo [i];
11114 if (pos < i && cfg->locals_start == i)
11115 cfg->locals_start = pos;
11116 if (!(var->flags & MONO_INST_IS_DEAD)) {
11118 cfg->varinfo [pos] = cfg->varinfo [i];
11119 cfg->varinfo [pos]->inst_c0 = pos;
11120 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11121 cfg->vars [pos].idx = pos;
11122 #if SIZEOF_REGISTER == 4
11123 if (cfg->varinfo [pos]->type == STACK_I8) {
11124 /* Modify the two component vars too */
11127 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11128 var1->inst_c0 = pos;
11129 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11130 var1->inst_c0 = pos;
11137 cfg->num_varinfo = pos;
11138 if (cfg->locals_start > cfg->num_varinfo)
11139 cfg->locals_start = cfg->num_varinfo;
11143 * mono_spill_global_vars:
11145 * Generate spill code for variables which are not allocated to registers,
11146 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11147 * code is generated which could be optimized by the local optimization passes.
11150 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11152 MonoBasicBlock *bb;
11154 int orig_next_vreg;
11155 guint32 *vreg_to_lvreg;
11157 guint32 i, lvregs_len;
11158 gboolean dest_has_lvreg = FALSE;
11159 guint32 stacktypes [128];
11160 MonoInst **live_range_start, **live_range_end;
11161 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11163 *need_local_opts = FALSE;
11165 memset (spec2, 0, sizeof (spec2));
11167 /* FIXME: Move this function to mini.c */
11168 stacktypes ['i'] = STACK_PTR;
11169 stacktypes ['l'] = STACK_I8;
11170 stacktypes ['f'] = STACK_R8;
11171 #ifdef MONO_ARCH_SIMD_INTRINSICS
11172 stacktypes ['x'] = STACK_VTYPE;
11175 #if SIZEOF_REGISTER == 4
11176 /* Create MonoInsts for longs */
11177 for (i = 0; i < cfg->num_varinfo; i++) {
11178 MonoInst *ins = cfg->varinfo [i];
11180 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11181 switch (ins->type) {
11186 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11189 g_assert (ins->opcode == OP_REGOFFSET);
11191 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11193 tree->opcode = OP_REGOFFSET;
11194 tree->inst_basereg = ins->inst_basereg;
11195 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11197 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11199 tree->opcode = OP_REGOFFSET;
11200 tree->inst_basereg = ins->inst_basereg;
11201 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11211 /* FIXME: widening and truncation */
11214 * As an optimization, when a variable allocated to the stack is first loaded into
11215 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11216 * the variable again.
11218 orig_next_vreg = cfg->next_vreg;
11219 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11220 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11224 * These arrays contain the first and last instructions accessing a given
11226 * Since we emit bblocks in the same order we process them here, and we
11227 * don't split live ranges, these will precisely describe the live range of
11228 * the variable, i.e. the instruction range where a valid value can be found
11229 * in the variables location.
11230 * The live range is computed using the liveness info computed by the liveness pass.
11231 * We can't use vmv->range, since that is an abstract live range, and we need
11232 * one which is instruction precise.
11233 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11235 /* FIXME: Only do this if debugging info is requested */
11236 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11237 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11238 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11239 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11241 /* Add spill loads/stores */
11242 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11245 if (cfg->verbose_level > 2)
11246 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11248 /* Clear vreg_to_lvreg array */
11249 for (i = 0; i < lvregs_len; i++)
11250 vreg_to_lvreg [lvregs [i]] = 0;
11254 MONO_BB_FOR_EACH_INS (bb, ins) {
11255 const char *spec = INS_INFO (ins->opcode);
11256 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11257 gboolean store, no_lvreg;
11258 int sregs [MONO_MAX_SRC_REGS];
11260 if (G_UNLIKELY (cfg->verbose_level > 2))
11261 mono_print_ins (ins);
11263 if (ins->opcode == OP_NOP)
11267 * We handle LDADDR here as well, since it can only be decomposed
11268 * when variable addresses are known.
11270 if (ins->opcode == OP_LDADDR) {
11271 MonoInst *var = ins->inst_p0;
11273 if (var->opcode == OP_VTARG_ADDR) {
11274 /* Happens on SPARC/S390 where vtypes are passed by reference */
11275 MonoInst *vtaddr = var->inst_left;
11276 if (vtaddr->opcode == OP_REGVAR) {
11277 ins->opcode = OP_MOVE;
11278 ins->sreg1 = vtaddr->dreg;
11280 else if (var->inst_left->opcode == OP_REGOFFSET) {
11281 ins->opcode = OP_LOAD_MEMBASE;
11282 ins->inst_basereg = vtaddr->inst_basereg;
11283 ins->inst_offset = vtaddr->inst_offset;
11287 g_assert (var->opcode == OP_REGOFFSET);
11289 ins->opcode = OP_ADD_IMM;
11290 ins->sreg1 = var->inst_basereg;
11291 ins->inst_imm = var->inst_offset;
11294 *need_local_opts = TRUE;
11295 spec = INS_INFO (ins->opcode);
11298 if (ins->opcode < MONO_CEE_LAST) {
11299 mono_print_ins (ins);
11300 g_assert_not_reached ();
11304 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11308 if (MONO_IS_STORE_MEMBASE (ins)) {
11309 tmp_reg = ins->dreg;
11310 ins->dreg = ins->sreg2;
11311 ins->sreg2 = tmp_reg;
11314 spec2 [MONO_INST_DEST] = ' ';
11315 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11316 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11317 spec2 [MONO_INST_SRC3] = ' ';
11319 } else if (MONO_IS_STORE_MEMINDEX (ins))
11320 g_assert_not_reached ();
11325 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11326 printf ("\t %.3s %d", spec, ins->dreg);
11327 num_sregs = mono_inst_get_src_registers (ins, sregs);
11328 for (srcindex = 0; srcindex < 3; ++srcindex)
11329 printf (" %d", sregs [srcindex]);
11336 regtype = spec [MONO_INST_DEST];
11337 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11340 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11341 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11342 MonoInst *store_ins;
11344 MonoInst *def_ins = ins;
11345 int dreg = ins->dreg; /* The original vreg */
11347 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11349 if (var->opcode == OP_REGVAR) {
11350 ins->dreg = var->dreg;
11351 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11353 * Instead of emitting a load+store, use a _membase opcode.
11355 g_assert (var->opcode == OP_REGOFFSET);
11356 if (ins->opcode == OP_MOVE) {
11360 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11361 ins->inst_basereg = var->inst_basereg;
11362 ins->inst_offset = var->inst_offset;
11365 spec = INS_INFO (ins->opcode);
11369 g_assert (var->opcode == OP_REGOFFSET);
11371 prev_dreg = ins->dreg;
11373 /* Invalidate any previous lvreg for this vreg */
11374 vreg_to_lvreg [ins->dreg] = 0;
11378 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11380 store_opcode = OP_STOREI8_MEMBASE_REG;
11383 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11385 if (regtype == 'l') {
11386 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11387 mono_bblock_insert_after_ins (bb, ins, store_ins);
11388 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11389 mono_bblock_insert_after_ins (bb, ins, store_ins);
11390 def_ins = store_ins;
11393 g_assert (store_opcode != OP_STOREV_MEMBASE);
11395 /* Try to fuse the store into the instruction itself */
11396 /* FIXME: Add more instructions */
11397 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11398 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11399 ins->inst_imm = ins->inst_c0;
11400 ins->inst_destbasereg = var->inst_basereg;
11401 ins->inst_offset = var->inst_offset;
11402 spec = INS_INFO (ins->opcode);
11403 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11404 ins->opcode = store_opcode;
11405 ins->inst_destbasereg = var->inst_basereg;
11406 ins->inst_offset = var->inst_offset;
11410 tmp_reg = ins->dreg;
11411 ins->dreg = ins->sreg2;
11412 ins->sreg2 = tmp_reg;
11415 spec2 [MONO_INST_DEST] = ' ';
11416 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11417 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11418 spec2 [MONO_INST_SRC3] = ' ';
11420 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11421 // FIXME: The backends expect the base reg to be in inst_basereg
11422 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11424 ins->inst_basereg = var->inst_basereg;
11425 ins->inst_offset = var->inst_offset;
11426 spec = INS_INFO (ins->opcode);
11428 /* printf ("INS: "); mono_print_ins (ins); */
11429 /* Create a store instruction */
11430 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11432 /* Insert it after the instruction */
11433 mono_bblock_insert_after_ins (bb, ins, store_ins);
11435 def_ins = store_ins;
11438 * We can't assign ins->dreg to var->dreg here, since the
11439 * sregs could use it. So set a flag, and do it after
11442 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11443 dest_has_lvreg = TRUE;
11448 if (def_ins && !live_range_start [dreg]) {
11449 live_range_start [dreg] = def_ins;
11450 live_range_start_bb [dreg] = bb;
11457 num_sregs = mono_inst_get_src_registers (ins, sregs);
11458 for (srcindex = 0; srcindex < 3; ++srcindex) {
11459 regtype = spec [MONO_INST_SRC1 + srcindex];
11460 sreg = sregs [srcindex];
11462 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11463 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11464 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11465 MonoInst *use_ins = ins;
11466 MonoInst *load_ins;
11467 guint32 load_opcode;
11469 if (var->opcode == OP_REGVAR) {
11470 sregs [srcindex] = var->dreg;
11471 //mono_inst_set_src_registers (ins, sregs);
11472 live_range_end [sreg] = use_ins;
11473 live_range_end_bb [sreg] = bb;
11477 g_assert (var->opcode == OP_REGOFFSET);
11479 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11481 g_assert (load_opcode != OP_LOADV_MEMBASE);
11483 if (vreg_to_lvreg [sreg]) {
11484 g_assert (vreg_to_lvreg [sreg] != -1);
11486 /* The variable is already loaded to an lvreg */
11487 if (G_UNLIKELY (cfg->verbose_level > 2))
11488 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11489 sregs [srcindex] = vreg_to_lvreg [sreg];
11490 //mono_inst_set_src_registers (ins, sregs);
11494 /* Try to fuse the load into the instruction */
11495 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11496 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11497 sregs [0] = var->inst_basereg;
11498 //mono_inst_set_src_registers (ins, sregs);
11499 ins->inst_offset = var->inst_offset;
11500 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11501 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11502 sregs [1] = var->inst_basereg;
11503 //mono_inst_set_src_registers (ins, sregs);
11504 ins->inst_offset = var->inst_offset;
11506 if (MONO_IS_REAL_MOVE (ins)) {
11507 ins->opcode = OP_NOP;
11510 //printf ("%d ", srcindex); mono_print_ins (ins);
11512 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11514 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11515 if (var->dreg == prev_dreg) {
11517 * sreg refers to the value loaded by the load
11518 * emitted below, but we need to use ins->dreg
11519 * since it refers to the store emitted earlier.
11523 g_assert (sreg != -1);
11524 vreg_to_lvreg [var->dreg] = sreg;
11525 g_assert (lvregs_len < 1024);
11526 lvregs [lvregs_len ++] = var->dreg;
11530 sregs [srcindex] = sreg;
11531 //mono_inst_set_src_registers (ins, sregs);
11533 if (regtype == 'l') {
11534 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11535 mono_bblock_insert_before_ins (bb, ins, load_ins);
11536 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11537 mono_bblock_insert_before_ins (bb, ins, load_ins);
11538 use_ins = load_ins;
11541 #if SIZEOF_REGISTER == 4
11542 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11544 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11545 mono_bblock_insert_before_ins (bb, ins, load_ins);
11546 use_ins = load_ins;
11550 if (var->dreg < orig_next_vreg) {
11551 live_range_end [var->dreg] = use_ins;
11552 live_range_end_bb [var->dreg] = bb;
11556 mono_inst_set_src_registers (ins, sregs);
11558 if (dest_has_lvreg) {
11559 g_assert (ins->dreg != -1);
11560 vreg_to_lvreg [prev_dreg] = ins->dreg;
11561 g_assert (lvregs_len < 1024);
11562 lvregs [lvregs_len ++] = prev_dreg;
11563 dest_has_lvreg = FALSE;
11567 tmp_reg = ins->dreg;
11568 ins->dreg = ins->sreg2;
11569 ins->sreg2 = tmp_reg;
11572 if (MONO_IS_CALL (ins)) {
11573 /* Clear vreg_to_lvreg array */
11574 for (i = 0; i < lvregs_len; i++)
11575 vreg_to_lvreg [lvregs [i]] = 0;
11577 } else if (ins->opcode == OP_NOP) {
11579 MONO_INST_NULLIFY_SREGS (ins);
11582 if (cfg->verbose_level > 2)
11583 mono_print_ins_index (1, ins);
11586 /* Extend the live range based on the liveness info */
11587 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11588 for (i = 0; i < cfg->num_varinfo; i ++) {
11589 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11591 if (vreg_is_volatile (cfg, vi->vreg))
11592 /* The liveness info is incomplete */
11595 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11596 /* Live from at least the first ins of this bb */
11597 live_range_start [vi->vreg] = bb->code;
11598 live_range_start_bb [vi->vreg] = bb;
11601 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11602 /* Live at least until the last ins of this bb */
11603 live_range_end [vi->vreg] = bb->last_ins;
11604 live_range_end_bb [vi->vreg] = bb;
11610 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11612 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11613 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11615 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11616 for (i = 0; i < cfg->num_varinfo; ++i) {
11617 int vreg = MONO_VARINFO (cfg, i)->vreg;
11620 if (live_range_start [vreg]) {
11621 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11623 ins->inst_c1 = vreg;
11624 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11626 if (live_range_end [vreg]) {
11627 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11629 ins->inst_c1 = vreg;
11630 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11631 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11633 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11639 g_free (live_range_start);
11640 g_free (live_range_end);
11641 g_free (live_range_start_bb);
11642 g_free (live_range_end_bb);
11647 * - use 'iadd' instead of 'int_add'
11648 * - handling ovf opcodes: decompose in method_to_ir.
11649 * - unify iregs/fregs
11650 * -> partly done, the missing parts are:
11651 * - a more complete unification would involve unifying the hregs as well, so
11652 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11653 * would no longer map to the machine hregs, so the code generators would need to
11654 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11655 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11656 * fp/non-fp branches speeds it up by about 15%.
11657 * - use sext/zext opcodes instead of shifts
11659 * - get rid of TEMPLOADs if possible and use vregs instead
11660 * - clean up usage of OP_P/OP_ opcodes
11661 * - cleanup usage of DUMMY_USE
11662 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11664 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11665 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11666 * - make sure handle_stack_args () is called before the branch is emitted
11667 * - when the new IR is done, get rid of all unused stuff
11668 * - COMPARE/BEQ as separate instructions or unify them ?
11669 * - keeping them separate allows specialized compare instructions like
11670 * compare_imm, compare_membase
11671 * - most back ends unify fp compare+branch, fp compare+ceq
11672 * - integrate mono_save_args into inline_method
11673 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11674 * - handle long shift opts on 32 bit platforms somehow: they require
11675 * 3 sregs (2 for arg1 and 1 for arg2)
11676 * - make byref a 'normal' type.
11677 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11678 * variable if needed.
11679 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11680 * like inline_method.
11681 * - remove inlining restrictions
11682 * - fix LNEG and enable cfold of INEG
11683 * - generalize x86 optimizations like ldelema as a peephole optimization
11684 * - add store_mem_imm for amd64
11685 * - optimize the loading of the interruption flag in the managed->native wrappers
11686 * - avoid special handling of OP_NOP in passes
11687 * - move code inserting instructions into one function/macro.
11688 * - try a coalescing phase after liveness analysis
11689 * - add float -> vreg conversion + local optimizations on !x86
11690 * - figure out how to handle decomposed branches during optimizations, ie.
11691 * compare+branch, op_jump_table+op_br etc.
11692 * - promote RuntimeXHandles to vregs
11693 * - vtype cleanups:
11694 * - add a NEW_VARLOADA_VREG macro
11695 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11696 * accessing vtype fields.
11697 * - get rid of I8CONST on 64 bit platforms
11698 * - dealing with the increase in code size due to branches created during opcode
11700 * - use extended basic blocks
11701 * - all parts of the JIT
11702 * - handle_global_vregs () && local regalloc
11703 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11704 * - sources of increase in code size:
11707 * - isinst and castclass
11708 * - lvregs not allocated to global registers even if used multiple times
11709 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11711 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11712 * - add all micro optimizations from the old JIT
11713 * - put tree optimizations into the deadce pass
11714 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11715 * specific function.
11716 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11717 * fcompare + branchCC.
11718 * - create a helper function for allocating a stack slot, taking into account
11719 * MONO_CFG_HAS_SPILLUP.
11721 * - merge the ia64 switch changes.
11722 * - optimize mono_regstate2_alloc_int/float.
11723 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11724 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11725 * parts of the tree could be separated by other instructions, killing the tree
11726 * arguments, or stores killing loads etc. Also, should we fold loads into other
11727 * instructions if the result of the load is used multiple times ?
11728 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11729 * - LAST MERGE: 108395.
11730 * - when returning vtypes in registers, generate IR and append it to the end of the
11731 * last bb instead of doing it in the epilog.
11732 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11740 - When to decompose opcodes:
11741 - earlier: this makes some optimizations hard to implement, since the low level IR
11742 no longer contains the neccessary information. But it is easier to do.
11743 - later: harder to implement, enables more optimizations.
11744 - Branches inside bblocks:
11745 - created when decomposing complex opcodes.
11746 - branches to another bblock: harmless, but not tracked by the branch
11747 optimizations, so need to branch to a label at the start of the bblock.
11748 - branches to inside the same bblock: very problematic, trips up the local
11749 reg allocator. Can be fixed by spitting the current bblock, but that is a
11750 complex operation, since some local vregs can become global vregs etc.
11751 - Local/global vregs:
11752 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11753 local register allocator.
11754 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11755 structure, created by mono_create_var (). Assigned to hregs or the stack by
11756 the global register allocator.
11757 - When to do optimizations like alu->alu_imm:
11758 - earlier -> saves work later on since the IR will be smaller/simpler
11759 - later -> can work on more instructions
11760 - Handling of valuetypes:
11761 - When a vtype is pushed on the stack, a new temporary is created, an
11762 instruction computing its address (LDADDR) is emitted and pushed on
11763 the stack. Need to optimize cases when the vtype is used immediately as in
11764 argument passing, stloc etc.
11765 - Instead of the to_end stuff in the old JIT, simply call the function handling
11766 the values on the stack before emitting the last instruction of the bb.
11769 #endif /* DISABLE_JIT */