2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
213 switch (type->type) {
216 case MONO_TYPE_BOOLEAN:
228 case MONO_TYPE_FNPTR:
230 case MONO_TYPE_CLASS:
231 case MONO_TYPE_STRING:
232 case MONO_TYPE_OBJECT:
233 case MONO_TYPE_SZARRAY:
234 case MONO_TYPE_ARRAY:
238 #if SIZEOF_REGISTER == 8
247 case MONO_TYPE_VALUETYPE:
248 if (type->data.klass->enumtype) {
249 type = mono_class_enum_basetype (type->data.klass);
252 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
255 case MONO_TYPE_TYPEDBYREF:
257 case MONO_TYPE_GENERICINST:
258 type = &type->data.generic_class->container_class->byval_arg;
262 g_assert (cfg->generic_sharing_context);
265 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
271 mono_print_bb (MonoBasicBlock *bb, const char *msg)
276 printf ("\n%s %d: [IN: ", msg, bb->block_num);
277 for (i = 0; i < bb->in_count; ++i)
278 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
280 for (i = 0; i < bb->out_count; ++i)
281 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
283 for (tree = bb->code; tree; tree = tree->next)
284 mono_print_ins_index (-1, tree);
288 mono_create_helper_signatures (void)
290 helper_sig_domain_get = mono_create_icall_signature ("ptr");
291 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
292 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
293 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
294 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
295 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
296 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
300 * Can't put this at the beginning, since other files reference stuff from this
305 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
307 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
309 #define GET_BBLOCK(cfg,tblock,ip) do { \
310 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
312 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
313 NEW_BBLOCK (cfg, (tblock)); \
314 (tblock)->cil_code = (ip); \
315 ADD_BBLOCK (cfg, (tblock)); \
319 #if defined(TARGET_X86) || defined(TARGET_AMD64)
320 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
321 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
322 (dest)->dreg = alloc_preg ((cfg)); \
323 (dest)->sreg1 = (sr1); \
324 (dest)->sreg2 = (sr2); \
325 (dest)->inst_imm = (imm); \
326 (dest)->backend.shift_amount = (shift); \
327 MONO_ADD_INS ((cfg)->cbb, (dest)); \
331 #if SIZEOF_REGISTER == 8
332 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
333 /* FIXME: Need to add many more cases */ \
334 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
336 int dr = alloc_preg (cfg); \
337 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
338 (ins)->sreg2 = widen->dreg; \
342 #define ADD_WIDEN_OP(ins, arg1, arg2)
345 #define ADD_BINOP(op) do { \
346 MONO_INST_NEW (cfg, ins, (op)); \
348 ins->sreg1 = sp [0]->dreg; \
349 ins->sreg2 = sp [1]->dreg; \
350 type_from_op (ins, sp [0], sp [1]); \
352 /* Have to insert a widening op */ \
353 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
354 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
355 MONO_ADD_INS ((cfg)->cbb, (ins)); \
356 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
359 #define ADD_UNOP(op) do { \
360 MONO_INST_NEW (cfg, ins, (op)); \
362 ins->sreg1 = sp [0]->dreg; \
363 type_from_op (ins, sp [0], NULL); \
365 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
366 MONO_ADD_INS ((cfg)->cbb, (ins)); \
367 *sp++ = mono_decompose_opcode (cfg, ins); \
370 #define ADD_BINCOND(next_block) do { \
373 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
374 cmp->sreg1 = sp [0]->dreg; \
375 cmp->sreg2 = sp [1]->dreg; \
376 type_from_op (cmp, sp [0], sp [1]); \
378 type_from_op (ins, sp [0], sp [1]); \
379 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
380 GET_BBLOCK (cfg, tblock, target); \
381 link_bblock (cfg, bblock, tblock); \
382 ins->inst_true_bb = tblock; \
383 if ((next_block)) { \
384 link_bblock (cfg, bblock, (next_block)); \
385 ins->inst_false_bb = (next_block); \
386 start_new_bblock = 1; \
388 GET_BBLOCK (cfg, tblock, ip); \
389 link_bblock (cfg, bblock, tblock); \
390 ins->inst_false_bb = tblock; \
391 start_new_bblock = 2; \
393 if (sp != stack_start) { \
394 handle_stack_args (cfg, stack_start, sp - stack_start); \
395 CHECK_UNVERIFIABLE (cfg); \
397 MONO_ADD_INS (bblock, cmp); \
398 MONO_ADD_INS (bblock, ins); \
402 * link_bblock: Links two basic blocks
404 * links two basic blocks in the control flow graph, the 'from'
405 * argument is the starting block and the 'to' argument is the block
406 * the control flow ends to after 'from'.
409 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
411 MonoBasicBlock **newa;
415 if (from->cil_code) {
417 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
419 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
422 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
424 printf ("edge from entry to exit\n");
429 for (i = 0; i < from->out_count; ++i) {
430 if (to == from->out_bb [i]) {
436 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
437 for (i = 0; i < from->out_count; ++i) {
438 newa [i] = from->out_bb [i];
446 for (i = 0; i < to->in_count; ++i) {
447 if (from == to->in_bb [i]) {
453 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
454 for (i = 0; i < to->in_count; ++i) {
455 newa [i] = to->in_bb [i];
464 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
466 link_bblock (cfg, from, to);
470 * mono_find_block_region:
472 * We mark each basic block with a region ID. We use that to avoid BB
473 * optimizations when blocks are in different regions.
476 * A region token that encodes where this region is, and information
477 * about the clause owner for this block.
479 * The region encodes the try/catch/filter clause that owns this block
480 * as well as the type. -1 is a special value that represents a block
481 * that is in none of try/catch/filter.
484 mono_find_block_region (MonoCompile *cfg, int offset)
486 MonoMethodHeader *header = cfg->header;
487 MonoExceptionClause *clause;
490 for (i = 0; i < header->num_clauses; ++i) {
491 clause = &header->clauses [i];
492 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
493 (offset < (clause->handler_offset)))
494 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
496 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
497 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
498 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
499 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
500 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
502 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
506 return ((i + 1) << 8) | clause->flags;
513 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
515 MonoMethodHeader *header = cfg->header;
516 MonoExceptionClause *clause;
520 for (i = 0; i < header->num_clauses; ++i) {
521 clause = &header->clauses [i];
522 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
523 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
524 if (clause->flags == type)
525 res = g_list_append (res, clause);
532 mono_create_spvar_for_region (MonoCompile *cfg, int region)
536 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
540 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
541 /* prevent it from being register allocated */
542 var->flags |= MONO_INST_INDIRECT;
544 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
548 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
550 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
554 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
558 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
563 /* prevent it from being register allocated */
564 var->flags |= MONO_INST_INDIRECT;
566 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
572 * Returns the type used in the eval stack when @type is loaded.
573 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
576 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
580 inst->klass = klass = mono_class_from_mono_type (type);
582 inst->type = STACK_MP;
587 switch (type->type) {
589 inst->type = STACK_INV;
593 case MONO_TYPE_BOOLEAN:
599 inst->type = STACK_I4;
604 case MONO_TYPE_FNPTR:
605 inst->type = STACK_PTR;
607 case MONO_TYPE_CLASS:
608 case MONO_TYPE_STRING:
609 case MONO_TYPE_OBJECT:
610 case MONO_TYPE_SZARRAY:
611 case MONO_TYPE_ARRAY:
612 inst->type = STACK_OBJ;
616 inst->type = STACK_I8;
620 inst->type = STACK_R8;
622 case MONO_TYPE_VALUETYPE:
623 if (type->data.klass->enumtype) {
624 type = mono_class_enum_basetype (type->data.klass);
628 inst->type = STACK_VTYPE;
631 case MONO_TYPE_TYPEDBYREF:
632 inst->klass = mono_defaults.typed_reference_class;
633 inst->type = STACK_VTYPE;
635 case MONO_TYPE_GENERICINST:
636 type = &type->data.generic_class->container_class->byval_arg;
639 case MONO_TYPE_MVAR :
640 /* FIXME: all the arguments must be references for now,
641 * later look inside cfg and see if the arg num is
644 g_assert (cfg->generic_sharing_context);
645 inst->type = STACK_OBJ;
648 g_error ("unknown type 0x%02x in eval stack type", type->type);
653 * The following tables are used to quickly validate the IL code in type_from_op ().
656 bin_num_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
669 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
672 /* reduce the size of this table */
674 bin_int_table [STACK_MAX] [STACK_MAX] = {
675 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
676 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
677 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
678 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
679 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
680 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
681 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
686 bin_comp_table [STACK_MAX] [STACK_MAX] = {
687 /* Inv i L p F & O vt */
689 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
690 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
691 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
692 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
693 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
694 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
695 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
698 /* reduce the size of this table */
700 shift_table [STACK_MAX] [STACK_MAX] = {
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
712 * Tables to map from the non-specific opcode to the matching
713 * type-specific opcode.
715 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
717 binops_op_map [STACK_MAX] = {
718 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
721 /* handles from CEE_NEG to CEE_CONV_U8 */
723 unops_op_map [STACK_MAX] = {
724 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
727 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
729 ovfops_op_map [STACK_MAX] = {
730 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
733 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
735 ovf2ops_op_map [STACK_MAX] = {
736 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
739 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
741 ovf3ops_op_map [STACK_MAX] = {
742 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
745 /* handles from CEE_BEQ to CEE_BLT_UN */
747 beqops_op_map [STACK_MAX] = {
748 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
751 /* handles from CEE_CEQ to CEE_CLT_UN */
753 ceqops_op_map [STACK_MAX] = {
754 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
758 * Sets ins->type (the type on the eval stack) according to the
759 * type of the opcode and the arguments to it.
760 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
762 * FIXME: this function sets ins->type unconditionally in some cases, but
763 * it should set it to invalid for some types (a conv.x on an object)
766 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
768 switch (ins->opcode) {
775 /* FIXME: check unverifiable args for STACK_MP */
776 ins->type = bin_num_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
784 ins->type = bin_int_table [src1->type] [src2->type];
785 ins->opcode += binops_op_map [ins->type];
790 ins->type = shift_table [src1->type] [src2->type];
791 ins->opcode += binops_op_map [ins->type];
796 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
797 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
798 ins->opcode = OP_LCOMPARE;
799 else if (src1->type == STACK_R8)
800 ins->opcode = OP_FCOMPARE;
802 ins->opcode = OP_ICOMPARE;
804 case OP_ICOMPARE_IMM:
805 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
806 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
807 ins->opcode = OP_LCOMPARE_IMM;
819 ins->opcode += beqops_op_map [src1->type];
822 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
823 ins->opcode += ceqops_op_map [src1->type];
829 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
830 ins->opcode += ceqops_op_map [src1->type];
834 ins->type = neg_table [src1->type];
835 ins->opcode += unops_op_map [ins->type];
838 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
839 ins->type = src1->type;
841 ins->type = STACK_INV;
842 ins->opcode += unops_op_map [ins->type];
848 ins->type = STACK_I4;
849 ins->opcode += unops_op_map [src1->type];
852 ins->type = STACK_R8;
853 switch (src1->type) {
856 ins->opcode = OP_ICONV_TO_R_UN;
859 ins->opcode = OP_LCONV_TO_R_UN;
863 case CEE_CONV_OVF_I1:
864 case CEE_CONV_OVF_U1:
865 case CEE_CONV_OVF_I2:
866 case CEE_CONV_OVF_U2:
867 case CEE_CONV_OVF_I4:
868 case CEE_CONV_OVF_U4:
869 ins->type = STACK_I4;
870 ins->opcode += ovf3ops_op_map [src1->type];
872 case CEE_CONV_OVF_I_UN:
873 case CEE_CONV_OVF_U_UN:
874 ins->type = STACK_PTR;
875 ins->opcode += ovf2ops_op_map [src1->type];
877 case CEE_CONV_OVF_I1_UN:
878 case CEE_CONV_OVF_I2_UN:
879 case CEE_CONV_OVF_I4_UN:
880 case CEE_CONV_OVF_U1_UN:
881 case CEE_CONV_OVF_U2_UN:
882 case CEE_CONV_OVF_U4_UN:
883 ins->type = STACK_I4;
884 ins->opcode += ovf2ops_op_map [src1->type];
887 ins->type = STACK_PTR;
888 switch (src1->type) {
890 ins->opcode = OP_ICONV_TO_U;
894 #if SIZEOF_REGISTER == 8
895 ins->opcode = OP_LCONV_TO_U;
897 ins->opcode = OP_MOVE;
901 ins->opcode = OP_LCONV_TO_U;
904 ins->opcode = OP_FCONV_TO_U;
910 ins->type = STACK_I8;
911 ins->opcode += unops_op_map [src1->type];
913 case CEE_CONV_OVF_I8:
914 case CEE_CONV_OVF_U8:
915 ins->type = STACK_I8;
916 ins->opcode += ovf3ops_op_map [src1->type];
918 case CEE_CONV_OVF_U8_UN:
919 case CEE_CONV_OVF_I8_UN:
920 ins->type = STACK_I8;
921 ins->opcode += ovf2ops_op_map [src1->type];
925 ins->type = STACK_R8;
926 ins->opcode += unops_op_map [src1->type];
929 ins->type = STACK_R8;
933 ins->type = STACK_I4;
934 ins->opcode += ovfops_op_map [src1->type];
939 ins->type = STACK_PTR;
940 ins->opcode += ovfops_op_map [src1->type];
948 ins->type = bin_num_table [src1->type] [src2->type];
949 ins->opcode += ovfops_op_map [src1->type];
950 if (ins->type == STACK_R8)
951 ins->type = STACK_INV;
953 case OP_LOAD_MEMBASE:
954 ins->type = STACK_PTR;
956 case OP_LOADI1_MEMBASE:
957 case OP_LOADU1_MEMBASE:
958 case OP_LOADI2_MEMBASE:
959 case OP_LOADU2_MEMBASE:
960 case OP_LOADI4_MEMBASE:
961 case OP_LOADU4_MEMBASE:
962 ins->type = STACK_PTR;
964 case OP_LOADI8_MEMBASE:
965 ins->type = STACK_I8;
967 case OP_LOADR4_MEMBASE:
968 case OP_LOADR8_MEMBASE:
969 ins->type = STACK_R8;
972 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
976 if (ins->type == STACK_MP)
977 ins->klass = mono_defaults.object_class;
982 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
988 param_table [STACK_MAX] [STACK_MAX] = {
993 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
997 switch (args->type) {
1007 for (i = 0; i < sig->param_count; ++i) {
1008 switch (args [i].type) {
1012 if (!sig->params [i]->byref)
1016 if (sig->params [i]->byref)
1018 switch (sig->params [i]->type) {
1019 case MONO_TYPE_CLASS:
1020 case MONO_TYPE_STRING:
1021 case MONO_TYPE_OBJECT:
1022 case MONO_TYPE_SZARRAY:
1023 case MONO_TYPE_ARRAY:
1030 if (sig->params [i]->byref)
1032 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1041 /*if (!param_table [args [i].type] [sig->params [i]->type])
1049 * When we need a pointer to the current domain many times in a method, we
1050 * call mono_domain_get() once and we store the result in a local variable.
1051 * This function returns the variable that represents the MonoDomain*.
1053 inline static MonoInst *
1054 mono_get_domainvar (MonoCompile *cfg)
1056 if (!cfg->domainvar)
1057 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1058 return cfg->domainvar;
1062 * The got_var contains the address of the Global Offset Table when AOT
1066 mono_get_got_var (MonoCompile *cfg)
1068 #ifdef MONO_ARCH_NEED_GOT_VAR
1069 if (!cfg->compile_aot)
1071 if (!cfg->got_var) {
1072 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 return cfg->got_var;
1081 mono_get_vtable_var (MonoCompile *cfg)
1083 g_assert (cfg->generic_sharing_context);
1085 if (!cfg->rgctx_var) {
1086 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1087 /* force the var to be stack allocated */
1088 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1091 return cfg->rgctx_var;
1095 type_from_stack_type (MonoInst *ins) {
1096 switch (ins->type) {
1097 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1098 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1099 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1100 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1102 return &ins->klass->this_arg;
1103 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1104 case STACK_VTYPE: return &ins->klass->byval_arg;
1106 g_error ("stack type %d to monotype not handled\n", ins->type);
1111 static G_GNUC_UNUSED int
1112 type_to_stack_type (MonoType *t)
1114 t = mono_type_get_underlying_type (t);
1118 case MONO_TYPE_BOOLEAN:
1121 case MONO_TYPE_CHAR:
1128 case MONO_TYPE_FNPTR:
1130 case MONO_TYPE_CLASS:
1131 case MONO_TYPE_STRING:
1132 case MONO_TYPE_OBJECT:
1133 case MONO_TYPE_SZARRAY:
1134 case MONO_TYPE_ARRAY:
1142 case MONO_TYPE_VALUETYPE:
1143 case MONO_TYPE_TYPEDBYREF:
1145 case MONO_TYPE_GENERICINST:
1146 if (mono_type_generic_inst_is_valuetype (t))
1152 g_assert_not_reached ();
1159 array_access_to_klass (int opcode)
1163 return mono_defaults.byte_class;
1165 return mono_defaults.uint16_class;
1168 return mono_defaults.int_class;
1171 return mono_defaults.sbyte_class;
1174 return mono_defaults.int16_class;
1177 return mono_defaults.int32_class;
1179 return mono_defaults.uint32_class;
1182 return mono_defaults.int64_class;
1185 return mono_defaults.single_class;
1188 return mono_defaults.double_class;
1189 case CEE_LDELEM_REF:
1190 case CEE_STELEM_REF:
1191 return mono_defaults.object_class;
1193 g_assert_not_reached ();
1199 * We try to share variables when possible
1202 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1207 /* inlining can result in deeper stacks */
1208 if (slot >= cfg->header->max_stack)
1209 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1211 pos = ins->type - 1 + slot * STACK_MAX;
1213 switch (ins->type) {
1220 if ((vnum = cfg->intvars [pos]))
1221 return cfg->varinfo [vnum];
1222 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1223 cfg->intvars [pos] = res->inst_c0;
1226 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1232 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1235 * Don't use this if a generic_context is set, since that means AOT can't
1236 * look up the method using just the image+token.
1237 * table == 0 means this is a reference made from a wrapper.
1239 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1240 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1241 jump_info_token->image = image;
1242 jump_info_token->token = token;
1243 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1248 * This function is called to handle items that are left on the evaluation stack
1249 * at basic block boundaries. What happens is that we save the values to local variables
1250 * and we reload them later when first entering the target basic block (with the
1251 * handle_loaded_temps () function).
1252 * A single joint point will use the same variables (stored in the array bb->out_stack or
1253 * bb->in_stack, if the basic block is before or after the joint point).
1255 * This function needs to be called _before_ emitting the last instruction of
1256 * the bb (i.e. before emitting a branch).
1257 * If the stack merge fails at a join point, cfg->unverifiable is set.
1260 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1263 MonoBasicBlock *bb = cfg->cbb;
1264 MonoBasicBlock *outb;
1265 MonoInst *inst, **locals;
1270 if (cfg->verbose_level > 3)
1271 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1272 if (!bb->out_scount) {
1273 bb->out_scount = count;
1274 //printf ("bblock %d has out:", bb->block_num);
1276 for (i = 0; i < bb->out_count; ++i) {
1277 outb = bb->out_bb [i];
1278 /* exception handlers are linked, but they should not be considered for stack args */
1279 if (outb->flags & BB_EXCEPTION_HANDLER)
1281 //printf (" %d", outb->block_num);
1282 if (outb->in_stack) {
1284 bb->out_stack = outb->in_stack;
1290 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1291 for (i = 0; i < count; ++i) {
1293 * try to reuse temps already allocated for this purpouse, if they occupy the same
1294 * stack slot and if they are of the same type.
1295 * This won't cause conflicts since if 'local' is used to
1296 * store one of the values in the in_stack of a bblock, then
1297 * the same variable will be used for the same outgoing stack
1299 * This doesn't work when inlining methods, since the bblocks
1300 * in the inlined methods do not inherit their in_stack from
1301 * the bblock they are inlined to. See bug #58863 for an
1304 if (cfg->inlined_method)
1305 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1307 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1312 for (i = 0; i < bb->out_count; ++i) {
1313 outb = bb->out_bb [i];
1314 /* exception handlers are linked, but they should not be considered for stack args */
1315 if (outb->flags & BB_EXCEPTION_HANDLER)
1317 if (outb->in_scount) {
1318 if (outb->in_scount != bb->out_scount) {
1319 cfg->unverifiable = TRUE;
1322 continue; /* check they are the same locals */
1324 outb->in_scount = count;
1325 outb->in_stack = bb->out_stack;
1328 locals = bb->out_stack;
1330 for (i = 0; i < count; ++i) {
1331 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1332 inst->cil_code = sp [i]->cil_code;
1333 sp [i] = locals [i];
1334 if (cfg->verbose_level > 3)
1335 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1339 * It is possible that the out bblocks already have in_stack assigned, and
1340 * the in_stacks differ. In this case, we will store to all the different
1347 /* Find a bblock which has a different in_stack */
1349 while (bindex < bb->out_count) {
1350 outb = bb->out_bb [bindex];
1351 /* exception handlers are linked, but they should not be considered for stack args */
1352 if (outb->flags & BB_EXCEPTION_HANDLER) {
1356 if (outb->in_stack != locals) {
1357 for (i = 0; i < count; ++i) {
1358 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1359 inst->cil_code = sp [i]->cil_code;
1360 sp [i] = locals [i];
1361 if (cfg->verbose_level > 3)
1362 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1364 locals = outb->in_stack;
1373 /* Emit code which loads interface_offsets [klass->interface_id]
1374 * The array is stored in memory before vtable.
1377 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1379 if (cfg->compile_aot) {
1380 int ioffset_reg = alloc_preg (cfg);
1381 int iid_reg = alloc_preg (cfg);
1383 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1384 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1393 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1395 int ibitmap_reg = alloc_preg (cfg);
1396 #ifdef COMPRESSED_INTERFACE_BITMAP
1398 MonoInst *res, *ins;
1399 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1400 MONO_ADD_INS (cfg->cbb, ins);
1402 if (cfg->compile_aot)
1403 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1405 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1406 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1407 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1409 int ibitmap_byte_reg = alloc_preg (cfg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1413 if (cfg->compile_aot) {
1414 int iid_reg = alloc_preg (cfg);
1415 int shifted_iid_reg = alloc_preg (cfg);
1416 int ibitmap_byte_address_reg = alloc_preg (cfg);
1417 int masked_iid_reg = alloc_preg (cfg);
1418 int iid_one_bit_reg = alloc_preg (cfg);
1419 int iid_bit_reg = alloc_preg (cfg);
1420 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1425 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1426 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1427 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1429 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1436 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1437 * stored in "klass_reg" implements the interface "klass".
1440 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1442 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1446 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1447 * stored in "vtable_reg" implements the interface "klass".
1450 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1452 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1456 * Emit code which checks whenever the interface id of @klass is smaller than
1457 * than the value given by max_iid_reg.
1460 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 if (cfg->compile_aot) {
1464 int iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1466 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1473 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1476 /* Same as above, but obtains max_iid from a vtable */
1478 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1479 MonoBasicBlock *false_target)
1481 int max_iid_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1484 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1487 /* Same as above, but obtains max_iid from a klass */
1489 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1490 MonoBasicBlock *false_target)
1492 int max_iid_reg = alloc_preg (cfg);
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1495 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1499 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1501 int idepth_reg = alloc_preg (cfg);
1502 int stypes_reg = alloc_preg (cfg);
1503 int stype = alloc_preg (cfg);
1505 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1511 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1513 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1514 } else if (cfg->compile_aot) {
1515 int const_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1517 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1525 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1527 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1531 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1533 int intf_reg = alloc_preg (cfg);
1535 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1536 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1541 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1545 * Variant of the above that takes a register to the class, not the vtable.
1548 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1550 int intf_bit_reg = alloc_preg (cfg);
1552 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1553 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1558 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1562 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1565 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1566 } else if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1577 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1579 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1583 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1585 if (cfg->compile_aot) {
1586 int const_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1588 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1596 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1599 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1602 int rank_reg = alloc_preg (cfg);
1603 int eclass_reg = alloc_preg (cfg);
1605 g_assert (!klass_inst);
1606 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1608 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1609 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1611 if (klass->cast_class == mono_defaults.object_class) {
1612 int parent_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1614 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1615 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1616 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1617 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1618 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1619 } else if (klass->cast_class == mono_defaults.enum_class) {
1620 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1621 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1622 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1624 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1625 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1628 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1629 /* Check that the object is a vector too */
1630 int bounds_reg = alloc_preg (cfg);
1631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1633 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1636 int idepth_reg = alloc_preg (cfg);
1637 int stypes_reg = alloc_preg (cfg);
1638 int stype = alloc_preg (cfg);
1640 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1643 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1647 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1652 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1654 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1658 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1662 g_assert (val == 0);
1667 if ((size <= 4) && (size <= align)) {
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1673 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1676 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1678 #if SIZEOF_REGISTER == 8
1680 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1686 val_reg = alloc_preg (cfg);
1688 if (SIZEOF_REGISTER == 8)
1689 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1691 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1694 /* This could be optimized further if neccesary */
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1703 #if !NO_UNALIGNED_ACCESS
1704 if (SIZEOF_REGISTER == 8) {
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1736 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1743 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1744 g_assert (size < 10000);
1747 /* This could be optimized further if neccesary */
1749 cur_reg = alloc_preg (cfg);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1758 #if !NO_UNALIGNED_ACCESS
1759 if (SIZEOF_REGISTER == 8) {
1761 cur_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1798 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 type = mini_get_basic_type_from_generic (gsctx, type);
1805 switch (type->type) {
1806 case MONO_TYPE_VOID:
1807 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1810 case MONO_TYPE_BOOLEAN:
1813 case MONO_TYPE_CHAR:
1816 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1820 case MONO_TYPE_FNPTR:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1822 case MONO_TYPE_CLASS:
1823 case MONO_TYPE_STRING:
1824 case MONO_TYPE_OBJECT:
1825 case MONO_TYPE_SZARRAY:
1826 case MONO_TYPE_ARRAY:
1827 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1830 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1833 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1834 case MONO_TYPE_VALUETYPE:
1835 if (type->data.klass->enumtype) {
1836 type = mono_class_enum_basetype (type->data.klass);
1839 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1840 case MONO_TYPE_TYPEDBYREF:
1841 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1842 case MONO_TYPE_GENERICINST:
1843 type = &type->data.generic_class->container_class->byval_arg;
1846 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1852 * target_type_is_incompatible:
1853 * @cfg: MonoCompile context
1855 * Check that the item @arg on the evaluation stack can be stored
1856 * in the target type (can be a local, or field, etc).
1857 * The cfg arg can be used to check if we need verification or just
1860 * Returns: non-0 value if arg can't be stored on a target.
1863 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1865 MonoType *simple_type;
1868 if (target->byref) {
1869 /* FIXME: check that the pointed to types match */
1870 if (arg->type == STACK_MP)
1871 return arg->klass != mono_class_from_mono_type (target);
1872 if (arg->type == STACK_PTR)
1877 simple_type = mono_type_get_underlying_type (target);
1878 switch (simple_type->type) {
1879 case MONO_TYPE_VOID:
1883 case MONO_TYPE_BOOLEAN:
1886 case MONO_TYPE_CHAR:
1889 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1893 /* STACK_MP is needed when setting pinned locals */
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1899 case MONO_TYPE_FNPTR:
1900 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1903 case MONO_TYPE_CLASS:
1904 case MONO_TYPE_STRING:
1905 case MONO_TYPE_OBJECT:
1906 case MONO_TYPE_SZARRAY:
1907 case MONO_TYPE_ARRAY:
1908 if (arg->type != STACK_OBJ)
1910 /* FIXME: check type compatibility */
1914 if (arg->type != STACK_I8)
1919 if (arg->type != STACK_R8)
1922 case MONO_TYPE_VALUETYPE:
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1929 case MONO_TYPE_TYPEDBYREF:
1930 if (arg->type != STACK_VTYPE)
1932 klass = mono_class_from_mono_type (simple_type);
1933 if (klass != arg->klass)
1936 case MONO_TYPE_GENERICINST:
1937 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1938 if (arg->type != STACK_VTYPE)
1940 klass = mono_class_from_mono_type (simple_type);
1941 if (klass != arg->klass)
1945 if (arg->type != STACK_OBJ)
1947 /* FIXME: check type compatibility */
1951 case MONO_TYPE_MVAR:
1952 /* FIXME: all the arguments must be references for now,
1953 * later look inside cfg and see if the arg num is
1954 * really a reference
1956 g_assert (cfg->generic_sharing_context);
1957 if (arg->type != STACK_OBJ)
1961 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1967 * Prepare arguments for passing to a function call.
1968 * Return a non-zero value if the arguments can't be passed to the given
1970 * The type checks are not yet complete and some conversions may need
1971 * casts on 32 or 64 bit architectures.
1973 * FIXME: implement this using target_type_is_incompatible ()
1976 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1978 MonoType *simple_type;
1982 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1986 for (i = 0; i < sig->param_count; ++i) {
1987 if (sig->params [i]->byref) {
1988 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1992 simple_type = sig->params [i];
1993 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1995 switch (simple_type->type) {
1996 case MONO_TYPE_VOID:
2001 case MONO_TYPE_BOOLEAN:
2004 case MONO_TYPE_CHAR:
2007 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2013 case MONO_TYPE_FNPTR:
2014 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2017 case MONO_TYPE_CLASS:
2018 case MONO_TYPE_STRING:
2019 case MONO_TYPE_OBJECT:
2020 case MONO_TYPE_SZARRAY:
2021 case MONO_TYPE_ARRAY:
2022 if (args [i]->type != STACK_OBJ)
2027 if (args [i]->type != STACK_I8)
2032 if (args [i]->type != STACK_R8)
2035 case MONO_TYPE_VALUETYPE:
2036 if (simple_type->data.klass->enumtype) {
2037 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2040 if (args [i]->type != STACK_VTYPE)
2043 case MONO_TYPE_TYPEDBYREF:
2044 if (args [i]->type != STACK_VTYPE)
2047 case MONO_TYPE_GENERICINST:
2048 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2052 g_error ("unknown type 0x%02x in check_call_signature",
2060 callvirt_to_call (int opcode)
2065 case OP_VOIDCALLVIRT:
2074 g_assert_not_reached ();
2081 callvirt_to_call_membase (int opcode)
2085 return OP_CALL_MEMBASE;
2086 case OP_VOIDCALLVIRT:
2087 return OP_VOIDCALL_MEMBASE;
2089 return OP_FCALL_MEMBASE;
2091 return OP_LCALL_MEMBASE;
2093 return OP_VCALL_MEMBASE;
2095 g_assert_not_reached ();
2101 #ifdef MONO_ARCH_HAVE_IMT
2103 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2107 if (COMPILE_LLVM (cfg)) {
2108 method_reg = alloc_preg (cfg);
2111 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2112 } else if (cfg->compile_aot) {
2113 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2116 MONO_INST_NEW (cfg, ins, OP_PCONST);
2117 ins->inst_p0 = call->method;
2118 ins->dreg = method_reg;
2119 MONO_ADD_INS (cfg->cbb, ins);
2123 call->imt_arg_reg = method_reg;
2125 #ifdef MONO_ARCH_IMT_REG
2126 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2128 /* Need this to keep the IMT arg alive */
2129 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2134 #ifdef MONO_ARCH_IMT_REG
2135 method_reg = alloc_preg (cfg);
2138 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2139 } else if (cfg->compile_aot) {
2140 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2143 MONO_INST_NEW (cfg, ins, OP_PCONST);
2144 ins->inst_p0 = call->method;
2145 ins->dreg = method_reg;
2146 MONO_ADD_INS (cfg->cbb, ins);
2149 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2151 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2156 static MonoJumpInfo *
2157 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2159 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2163 ji->data.target = target;
2168 inline static MonoCallInst *
2169 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2170 MonoInst **args, int calli, int virtual, int tail)
2173 #ifdef MONO_ARCH_SOFT_FLOAT
2178 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2180 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2183 call->signature = sig;
2185 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2188 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2189 call->vret_var = cfg->vret_addr;
2190 //g_assert_not_reached ();
2192 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2193 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2196 temp->backend.is_pinvoke = sig->pinvoke;
2199 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2200 * address of return value to increase optimization opportunities.
2201 * Before vtype decomposition, the dreg of the call ins itself represents the
2202 * fact the call modifies the return value. After decomposition, the call will
2203 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2204 * will be transformed into an LDADDR.
2206 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2207 loada->dreg = alloc_preg (cfg);
2208 loada->inst_p0 = temp;
2209 /* We reference the call too since call->dreg could change during optimization */
2210 loada->inst_p1 = call;
2211 MONO_ADD_INS (cfg->cbb, loada);
2213 call->inst.dreg = temp->dreg;
2215 call->vret_var = loada;
2216 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2217 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2219 #ifdef MONO_ARCH_SOFT_FLOAT
2220 if (COMPILE_SOFT_FLOAT (cfg)) {
2222 * If the call has a float argument, we would need to do an r8->r4 conversion using
2223 * an icall, but that cannot be done during the call sequence since it would clobber
2224 * the call registers + the stack. So we do it before emitting the call.
2226 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2228 MonoInst *in = call->args [i];
2230 if (i >= sig->hasthis)
2231 t = sig->params [i - sig->hasthis];
2233 t = &mono_defaults.int_class->byval_arg;
2234 t = mono_type_get_underlying_type (t);
2236 if (!t->byref && t->type == MONO_TYPE_R4) {
2237 MonoInst *iargs [1];
2241 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2243 /* The result will be in an int vreg */
2244 call->args [i] = conv;
2251 if (COMPILE_LLVM (cfg))
2252 mono_llvm_emit_call (cfg, call);
2254 mono_arch_emit_call (cfg, call);
2256 mono_arch_emit_call (cfg, call);
2259 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2260 cfg->flags |= MONO_CFG_HAS_CALLS;
2265 inline static MonoInst*
2266 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2268 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2270 call->inst.sreg1 = addr->dreg;
2272 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2274 return (MonoInst*)call;
2278 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2280 #ifdef MONO_ARCH_RGCTX_REG
2281 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2282 cfg->uses_rgctx_reg = TRUE;
2283 call->rgctx_reg = TRUE;
2285 call->rgctx_arg_reg = rgctx_reg;
2292 inline static MonoInst*
2293 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2299 rgctx_reg = mono_alloc_preg (cfg);
2300 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2302 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2304 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2305 return (MonoInst*)call;
2309 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2311 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2314 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2315 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2317 gboolean might_be_remote;
2318 gboolean virtual = this != NULL;
2319 gboolean enable_for_aot = TRUE;
2323 if (method->string_ctor) {
2324 /* Create the real signature */
2325 /* FIXME: Cache these */
2326 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2327 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2332 might_be_remote = this && sig->hasthis &&
2333 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2334 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2336 context_used = mono_method_check_context_used (method);
2337 if (might_be_remote && context_used) {
2340 g_assert (cfg->generic_sharing_context);
2342 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2344 return mono_emit_calli (cfg, sig, args, addr);
2347 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2349 if (might_be_remote)
2350 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2352 call->method = method;
2353 call->inst.flags |= MONO_INST_HAS_METHOD;
2354 call->inst.inst_left = this;
2357 int vtable_reg, slot_reg, this_reg;
2359 this_reg = this->dreg;
2361 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2362 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2363 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2365 /* Make a call to delegate->invoke_impl */
2366 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2367 call->inst.inst_basereg = this_reg;
2368 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2369 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2371 return (MonoInst*)call;
2375 if ((!cfg->compile_aot || enable_for_aot) &&
2376 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2377 (MONO_METHOD_IS_FINAL (method) &&
2378 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2379 !(method->klass->marshalbyref && context_used)) {
2381 * the method is not virtual, we just need to ensure this is not null
2382 * and then we can call the method directly.
2384 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2386 * The check above ensures method is not gshared, this is needed since
2387 * gshared methods can't have wrappers.
2389 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2392 if (!method->string_ctor)
2393 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2395 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2397 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2399 return (MonoInst*)call;
2402 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2404 * the method is virtual, but we can statically dispatch since either
2405 * it's class or the method itself are sealed.
2406 * But first we need to ensure it's not a null reference.
2408 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2410 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2411 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2413 return (MonoInst*)call;
2416 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2418 vtable_reg = alloc_preg (cfg);
2419 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2420 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2422 #ifdef MONO_ARCH_HAVE_IMT
2424 guint32 imt_slot = mono_method_get_imt_slot (method);
2425 emit_imt_argument (cfg, call, imt_arg);
2426 slot_reg = vtable_reg;
2427 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2430 if (slot_reg == -1) {
2431 slot_reg = alloc_preg (cfg);
2432 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2433 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2436 slot_reg = vtable_reg;
2437 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2438 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2439 #ifdef MONO_ARCH_HAVE_IMT
2441 g_assert (mono_method_signature (method)->generic_param_count);
2442 emit_imt_argument (cfg, call, imt_arg);
2447 call->inst.sreg1 = slot_reg;
2448 call->virtual = TRUE;
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2457 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2458 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2465 rgctx_reg = mono_alloc_preg (cfg);
2466 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2468 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2470 call = (MonoCallInst*)ins;
2472 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2478 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2480 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2484 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2491 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2494 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2496 return (MonoInst*)call;
2500 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2502 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2506 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2510 * mono_emit_abs_call:
2512 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2514 inline static MonoInst*
2515 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2516 MonoMethodSignature *sig, MonoInst **args)
2518 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2522 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2525 if (cfg->abs_patches == NULL)
2526 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2527 g_hash_table_insert (cfg->abs_patches, ji, ji);
2528 ins = mono_emit_native_call (cfg, ji, sig, args);
2529 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2534 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2536 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2537 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2541 * Native code might return non register sized integers
2542 * without initializing the upper bits.
2544 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2545 case OP_LOADI1_MEMBASE:
2546 widen_op = OP_ICONV_TO_I1;
2548 case OP_LOADU1_MEMBASE:
2549 widen_op = OP_ICONV_TO_U1;
2551 case OP_LOADI2_MEMBASE:
2552 widen_op = OP_ICONV_TO_I2;
2554 case OP_LOADU2_MEMBASE:
2555 widen_op = OP_ICONV_TO_U2;
2561 if (widen_op != -1) {
2562 int dreg = alloc_preg (cfg);
2565 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2566 widen->type = ins->type;
2576 get_memcpy_method (void)
2578 static MonoMethod *memcpy_method = NULL;
2579 if (!memcpy_method) {
2580 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2582 g_error ("Old corlib found. Install a new one");
2584 return memcpy_method;
2588 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2590 MonoClassField *field;
2591 gpointer iter = NULL;
2593 while ((field = mono_class_get_fields (klass, &iter))) {
2596 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2598 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2599 if (mono_type_is_reference (field->type)) {
2600 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2601 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2603 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2604 MonoClass *field_class = mono_class_from_mono_type (field->type);
2605 if (field_class->has_references)
2606 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2612 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2614 int card_table_shift_bits;
2615 gpointer card_table_mask;
2617 MonoInst *dummy_use;
2618 int nursery_shift_bits;
2619 size_t nursery_size;
2620 gboolean has_card_table_wb = FALSE;
2622 if (!cfg->gen_write_barriers)
2625 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2627 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2629 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2630 has_card_table_wb = TRUE;
2633 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2636 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2637 wbarrier->sreg1 = ptr->dreg;
2639 wbarrier->sreg2 = value->dreg;
2641 wbarrier->sreg2 = value_reg;
2642 MONO_ADD_INS (cfg->cbb, wbarrier);
2643 } else if (card_table) {
2644 int offset_reg = alloc_preg (cfg);
2645 int card_reg = alloc_preg (cfg);
2648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2649 if (card_table_mask)
2650 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2652 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2653 * IMM's larger than 32bits.
2655 if (cfg->compile_aot) {
2656 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2658 MONO_INST_NEW (cfg, ins, OP_PCONST);
2659 ins->inst_p0 = card_table;
2660 ins->dreg = card_reg;
2661 MONO_ADD_INS (cfg->cbb, ins);
2664 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2665 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2667 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2668 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2672 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2674 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2675 dummy_use->sreg1 = value_reg;
2676 MONO_ADD_INS (cfg->cbb, dummy_use);
2681 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2683 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2684 unsigned need_wb = 0;
2689 /*types with references can't have alignment smaller than sizeof(void*) */
2690 if (align < SIZEOF_VOID_P)
2693 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2694 if (size > 32 * SIZEOF_VOID_P)
2697 create_write_barrier_bitmap (klass, &need_wb, 0);
2699 /* We don't unroll more than 5 stores to avoid code bloat. */
2700 if (size > 5 * SIZEOF_VOID_P) {
2701 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2702 size += (SIZEOF_VOID_P - 1);
2703 size &= ~(SIZEOF_VOID_P - 1);
2705 EMIT_NEW_ICONST (cfg, iargs [2], size);
2706 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2707 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2711 destreg = iargs [0]->dreg;
2712 srcreg = iargs [1]->dreg;
2715 dest_ptr_reg = alloc_preg (cfg);
2716 tmp_reg = alloc_preg (cfg);
2719 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2721 while (size >= SIZEOF_VOID_P) {
2722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2726 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2728 offset += SIZEOF_VOID_P;
2729 size -= SIZEOF_VOID_P;
2732 /*tmp += sizeof (void*)*/
2733 if (size >= SIZEOF_VOID_P) {
2734 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2735 MONO_ADD_INS (cfg->cbb, iargs [0]);
2739 /* Those cannot be references since size < sizeof (void*) */
2741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2748 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2765 * Emit code to copy a valuetype of type @klass whose address is stored in
2766 * @src->dreg to memory whose address is stored at @dest->dreg.
2769 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2771 MonoInst *iargs [4];
2774 MonoMethod *memcpy_method;
2778 * This check breaks with spilled vars... need to handle it during verification anyway.
2779 * g_assert (klass && klass == src->klass && klass == dest->klass);
2783 n = mono_class_native_size (klass, &align);
2785 n = mono_class_value_size (klass, &align);
2787 /* if native is true there should be no references in the struct */
2788 if (cfg->gen_write_barriers && klass->has_references && !native) {
2789 /* Avoid barriers when storing to the stack */
2790 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2791 (dest->opcode == OP_LDADDR))) {
2792 int context_used = 0;
2797 if (cfg->generic_sharing_context)
2798 context_used = mono_class_check_context_used (klass);
2800 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2801 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2803 } else if (context_used) {
2804 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2806 if (cfg->compile_aot) {
2807 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2809 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2810 mono_class_compute_gc_descriptor (klass);
2814 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2819 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2820 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2821 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2825 EMIT_NEW_ICONST (cfg, iargs [2], n);
2827 memcpy_method = get_memcpy_method ();
2828 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2833 get_memset_method (void)
2835 static MonoMethod *memset_method = NULL;
2836 if (!memset_method) {
2837 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2839 g_error ("Old corlib found. Install a new one");
2841 return memset_method;
2845 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2847 MonoInst *iargs [3];
2850 MonoMethod *memset_method;
2852 /* FIXME: Optimize this for the case when dest is an LDADDR */
2854 mono_class_init (klass);
2855 n = mono_class_value_size (klass, &align);
2857 if (n <= sizeof (gpointer) * 5) {
2858 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2861 memset_method = get_memset_method ();
2863 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2864 EMIT_NEW_ICONST (cfg, iargs [2], n);
2865 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2870 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2872 MonoInst *this = NULL;
2874 g_assert (cfg->generic_sharing_context);
2876 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2877 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2878 !method->klass->valuetype)
2879 EMIT_NEW_ARGLOAD (cfg, this, 0);
2881 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2882 MonoInst *mrgctx_loc, *mrgctx_var;
2885 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2887 mrgctx_loc = mono_get_vtable_var (cfg);
2888 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2891 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2892 MonoInst *vtable_loc, *vtable_var;
2896 vtable_loc = mono_get_vtable_var (cfg);
2897 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2899 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2900 MonoInst *mrgctx_var = vtable_var;
2903 vtable_reg = alloc_preg (cfg);
2904 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2905 vtable_var->type = STACK_PTR;
2911 int vtable_reg, res_reg;
2913 vtable_reg = alloc_preg (cfg);
2914 res_reg = alloc_preg (cfg);
2915 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2920 static MonoJumpInfoRgctxEntry *
2921 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2923 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2924 res->method = method;
2925 res->in_mrgctx = in_mrgctx;
2926 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2927 res->data->type = patch_type;
2928 res->data->data.target = patch_data;
2929 res->info_type = info_type;
2934 static inline MonoInst*
2935 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2937 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2941 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2942 MonoClass *klass, int rgctx_type)
2944 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2945 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2947 return emit_rgctx_fetch (cfg, rgctx, entry);
2951 * emit_get_rgctx_method:
2953 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2954 * normal constants, else emit a load from the rgctx.
2957 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2958 MonoMethod *cmethod, int rgctx_type)
2960 if (!context_used) {
2963 switch (rgctx_type) {
2964 case MONO_RGCTX_INFO_METHOD:
2965 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2967 case MONO_RGCTX_INFO_METHOD_RGCTX:
2968 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2971 g_assert_not_reached ();
2974 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2975 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2977 return emit_rgctx_fetch (cfg, rgctx, entry);
2982 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2983 MonoClassField *field, int rgctx_type)
2985 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2986 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2988 return emit_rgctx_fetch (cfg, rgctx, entry);
2992 * On return the caller must check @klass for load errors.
2995 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2997 MonoInst *vtable_arg;
2999 int context_used = 0;
3001 if (cfg->generic_sharing_context)
3002 context_used = mono_class_check_context_used (klass);
3005 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3006 klass, MONO_RGCTX_INFO_VTABLE);
3008 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3012 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3015 if (COMPILE_LLVM (cfg))
3016 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3018 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3019 #ifdef MONO_ARCH_VTABLE_REG
3020 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3021 cfg->uses_vtable_reg = TRUE;
3028 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3030 if (mini_get_debug_options ()->better_cast_details) {
3031 int to_klass_reg = alloc_preg (cfg);
3032 int vtable_reg = alloc_preg (cfg);
3033 int klass_reg = alloc_preg (cfg);
3034 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3037 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3041 MONO_ADD_INS (cfg->cbb, tls_get);
3042 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3045 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3046 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3047 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3052 reset_cast_details (MonoCompile *cfg)
3054 /* Reset the variables holding the cast details */
3055 if (mini_get_debug_options ()->better_cast_details) {
3056 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3058 MONO_ADD_INS (cfg->cbb, tls_get);
3059 /* It is enough to reset the from field */
3060 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3065 * On return the caller must check @array_class for load errors
3068 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3070 int vtable_reg = alloc_preg (cfg);
3071 int context_used = 0;
3073 if (cfg->generic_sharing_context)
3074 context_used = mono_class_check_context_used (array_class);
3076 save_cast_details (cfg, array_class, obj->dreg);
3078 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3080 if (cfg->opt & MONO_OPT_SHARED) {
3081 int class_reg = alloc_preg (cfg);
3082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3083 if (cfg->compile_aot) {
3084 int klass_reg = alloc_preg (cfg);
3085 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3086 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3090 } else if (context_used) {
3091 MonoInst *vtable_ins;
3093 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3094 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3096 if (cfg->compile_aot) {
3100 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3102 vt_reg = alloc_preg (cfg);
3103 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3104 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3107 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3113 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3115 reset_cast_details (cfg);
3119 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3120 * generic code is generated.
3123 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3125 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3128 MonoInst *rgctx, *addr;
3130 /* FIXME: What if the class is shared? We might not
3131 have to get the address of the method from the
3133 addr = emit_get_rgctx_method (cfg, context_used, method,
3134 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3136 rgctx = emit_get_rgctx (cfg, method, context_used);
3138 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3140 return mono_emit_method_call (cfg, method, &val, NULL);
3145 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3149 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3150 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3151 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3152 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3154 obj_reg = sp [0]->dreg;
3155 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3156 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3158 /* FIXME: generics */
3159 g_assert (klass->rank == 0);
3162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3163 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3166 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3169 MonoInst *element_class;
3171 /* This assertion is from the unboxcast insn */
3172 g_assert (klass->rank == 0);
3174 element_class = emit_get_rgctx_klass (cfg, context_used,
3175 klass->element_class, MONO_RGCTX_INFO_KLASS);
3177 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3178 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3180 save_cast_details (cfg, klass->element_class, obj_reg);
3181 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3182 reset_cast_details (cfg);
3185 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3186 MONO_ADD_INS (cfg->cbb, add);
3187 add->type = STACK_MP;
3194 * Returns NULL and set the cfg exception on error.
3197 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3199 MonoInst *iargs [2];
3205 MonoInst *iargs [2];
3208 FIXME: we cannot get managed_alloc here because we can't get
3209 the class's vtable (because it's not a closed class)
3211 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3212 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3215 if (cfg->opt & MONO_OPT_SHARED)
3216 rgctx_info = MONO_RGCTX_INFO_KLASS;
3218 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3219 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3221 if (cfg->opt & MONO_OPT_SHARED) {
3222 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3224 alloc_ftn = mono_object_new;
3227 alloc_ftn = mono_object_new_specific;
3230 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3233 if (cfg->opt & MONO_OPT_SHARED) {
3234 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3235 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3237 alloc_ftn = mono_object_new;
3238 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3239 /* This happens often in argument checking code, eg. throw new FooException... */
3240 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3241 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3242 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3244 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3245 MonoMethod *managed_alloc = NULL;
3249 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3250 cfg->exception_ptr = klass;
3254 #ifndef MONO_CROSS_COMPILE
3255 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3258 if (managed_alloc) {
3259 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3260 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3262 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3264 guint32 lw = vtable->klass->instance_size;
3265 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3266 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3267 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3270 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3274 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3278 * Returns NULL and set the cfg exception on error.
3281 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3283 MonoInst *alloc, *ins;
3285 if (mono_class_is_nullable (klass)) {
3286 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3289 /* FIXME: What if the class is shared? We might not
3290 have to get the method address from the RGCTX. */
3291 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3292 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3293 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3295 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3297 return mono_emit_method_call (cfg, method, &val, NULL);
3301 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3305 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3312 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3315 MonoGenericContainer *container;
3316 MonoGenericInst *ginst;
3318 if (klass->generic_class) {
3319 container = klass->generic_class->container_class->generic_container;
3320 ginst = klass->generic_class->context.class_inst;
3321 } else if (klass->generic_container && context_used) {
3322 container = klass->generic_container;
3323 ginst = container->context.class_inst;
3328 for (i = 0; i < container->type_argc; ++i) {
3330 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3332 type = ginst->type_argv [i];
3333 if (MONO_TYPE_IS_REFERENCE (type))
3336 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3342 // FIXME: This doesn't work yet (class libs tests fail?)
3343 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3346 * Returns NULL and set the cfg exception on error.
3349 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3351 MonoBasicBlock *is_null_bb;
3352 int obj_reg = src->dreg;
3353 int vtable_reg = alloc_preg (cfg);
3354 MonoInst *klass_inst = NULL;
3359 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3360 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3361 MonoInst *cache_ins;
3363 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3368 /* klass - it's the second element of the cache entry*/
3369 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3372 args [2] = cache_ins;
3374 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3377 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3379 if (is_complex_isinst (klass)) {
3380 /* Complex case, handle by an icall */
3386 args [1] = klass_inst;
3388 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3390 /* Simple case, handled by the code below */
3394 NEW_BBLOCK (cfg, is_null_bb);
3396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3397 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3399 save_cast_details (cfg, klass, obj_reg);
3401 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3402 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3403 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3405 int klass_reg = alloc_preg (cfg);
3407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3409 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3410 /* the remoting code is broken, access the class for now */
3411 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3412 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3414 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3415 cfg->exception_ptr = klass;
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3423 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3426 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3430 MONO_START_BB (cfg, is_null_bb);
3432 reset_cast_details (cfg);
3438 * Returns NULL and set the cfg exception on error.
3441 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3444 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3445 int obj_reg = src->dreg;
3446 int vtable_reg = alloc_preg (cfg);
3447 int res_reg = alloc_preg (cfg);
3448 MonoInst *klass_inst = NULL;
3453 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3454 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3455 MonoInst *cache_ins;
3457 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3462 /* klass - it's the second element of the cache entry*/
3463 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3466 args [2] = cache_ins;
3468 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3471 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3473 if (is_complex_isinst (klass)) {
3474 /* Complex case, handle by an icall */
3480 args [1] = klass_inst;
3482 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3484 /* Simple case, the code below can handle it */
3488 NEW_BBLOCK (cfg, is_null_bb);
3489 NEW_BBLOCK (cfg, false_bb);
3490 NEW_BBLOCK (cfg, end_bb);
3492 /* Do the assignment at the beginning, so the other assignment can be if converted */
3493 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3494 ins->type = STACK_OBJ;
3497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3502 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3503 g_assert (!context_used);
3504 /* the is_null_bb target simply copies the input register to the output */
3505 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3507 int klass_reg = alloc_preg (cfg);
3510 int rank_reg = alloc_preg (cfg);
3511 int eclass_reg = alloc_preg (cfg);
3513 g_assert (!context_used);
3514 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3516 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3517 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3519 if (klass->cast_class == mono_defaults.object_class) {
3520 int parent_reg = alloc_preg (cfg);
3521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3522 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3523 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3525 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3526 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3527 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3528 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3529 } else if (klass->cast_class == mono_defaults.enum_class) {
3530 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3531 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3532 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3533 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3535 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3536 /* Check that the object is a vector too */
3537 int bounds_reg = alloc_preg (cfg);
3538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3540 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3543 /* the is_null_bb target simply copies the input register to the output */
3544 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3546 } else if (mono_class_is_nullable (klass)) {
3547 g_assert (!context_used);
3548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3549 /* the is_null_bb target simply copies the input register to the output */
3550 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3552 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3553 g_assert (!context_used);
3554 /* the remoting code is broken, access the class for now */
3555 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3556 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3558 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3559 cfg->exception_ptr = klass;
3562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3571 /* the is_null_bb target simply copies the input register to the output */
3572 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3577 MONO_START_BB (cfg, false_bb);
3579 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3582 MONO_START_BB (cfg, is_null_bb);
3584 MONO_START_BB (cfg, end_bb);
3590 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3592 /* This opcode takes as input an object reference and a class, and returns:
3593 0) if the object is an instance of the class,
3594 1) if the object is not instance of the class,
3595 2) if the object is a proxy whose type cannot be determined */
3598 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3599 int obj_reg = src->dreg;
3600 int dreg = alloc_ireg (cfg);
3602 int klass_reg = alloc_preg (cfg);
3604 NEW_BBLOCK (cfg, true_bb);
3605 NEW_BBLOCK (cfg, false_bb);
3606 NEW_BBLOCK (cfg, false2_bb);
3607 NEW_BBLOCK (cfg, end_bb);
3608 NEW_BBLOCK (cfg, no_proxy_bb);
3610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3613 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3614 NEW_BBLOCK (cfg, interface_fail_bb);
3616 tmp_reg = alloc_preg (cfg);
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3618 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3619 MONO_START_BB (cfg, interface_fail_bb);
3620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3622 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3624 tmp_reg = alloc_preg (cfg);
3625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3627 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3629 tmp_reg = alloc_preg (cfg);
3630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3633 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3634 tmp_reg = alloc_preg (cfg);
3635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3638 tmp_reg = alloc_preg (cfg);
3639 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3643 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3646 MONO_START_BB (cfg, no_proxy_bb);
3648 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3651 MONO_START_BB (cfg, false_bb);
3653 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3656 MONO_START_BB (cfg, false2_bb);
3658 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3661 MONO_START_BB (cfg, true_bb);
3663 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3665 MONO_START_BB (cfg, end_bb);
3668 MONO_INST_NEW (cfg, ins, OP_ICONST);
3670 ins->type = STACK_I4;
3676 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3678 /* This opcode takes as input an object reference and a class, and returns:
3679 0) if the object is an instance of the class,
3680 1) if the object is a proxy whose type cannot be determined
3681 an InvalidCastException exception is thrown otherwhise*/
3684 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3685 int obj_reg = src->dreg;
3686 int dreg = alloc_ireg (cfg);
3687 int tmp_reg = alloc_preg (cfg);
3688 int klass_reg = alloc_preg (cfg);
3690 NEW_BBLOCK (cfg, end_bb);
3691 NEW_BBLOCK (cfg, ok_result_bb);
3693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3696 save_cast_details (cfg, klass, obj_reg);
3698 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3699 NEW_BBLOCK (cfg, interface_fail_bb);
3701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3702 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3703 MONO_START_BB (cfg, interface_fail_bb);
3704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3706 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3708 tmp_reg = alloc_preg (cfg);
3709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3711 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3713 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3717 NEW_BBLOCK (cfg, no_proxy_bb);
3719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3721 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3723 tmp_reg = alloc_preg (cfg);
3724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3727 tmp_reg = alloc_preg (cfg);
3728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3732 NEW_BBLOCK (cfg, fail_1_bb);
3734 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3736 MONO_START_BB (cfg, fail_1_bb);
3738 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3741 MONO_START_BB (cfg, no_proxy_bb);
3743 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3746 MONO_START_BB (cfg, ok_result_bb);
3748 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3750 MONO_START_BB (cfg, end_bb);
3753 MONO_INST_NEW (cfg, ins, OP_ICONST);
3755 ins->type = STACK_I4;
3761 * Returns NULL and set the cfg exception on error.
3763 static G_GNUC_UNUSED MonoInst*
3764 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3768 gpointer *trampoline;
3769 MonoInst *obj, *method_ins, *tramp_ins;
3773 obj = handle_alloc (cfg, klass, FALSE, 0);
3777 /* Inline the contents of mono_delegate_ctor */
3779 /* Set target field */
3780 /* Optimize away setting of NULL target */
3781 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3783 if (cfg->gen_write_barriers) {
3784 dreg = alloc_preg (cfg);
3785 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3786 emit_write_barrier (cfg, ptr, target, 0);
3790 /* Set method field */
3791 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3793 if (cfg->gen_write_barriers) {
3794 dreg = alloc_preg (cfg);
3795 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3796 emit_write_barrier (cfg, ptr, method_ins, 0);
3799 * To avoid looking up the compiled code belonging to the target method
3800 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3801 * store it, and we fill it after the method has been compiled.
3803 if (!cfg->compile_aot && !method->dynamic) {
3804 MonoInst *code_slot_ins;
3807 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3809 domain = mono_domain_get ();
3810 mono_domain_lock (domain);
3811 if (!domain_jit_info (domain)->method_code_hash)
3812 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3813 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3815 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3816 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3818 mono_domain_unlock (domain);
3820 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3825 /* Set invoke_impl field */
3826 if (cfg->compile_aot) {
3827 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3829 trampoline = mono_create_delegate_trampoline (klass);
3830 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3832 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3834 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3840 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3842 MonoJitICallInfo *info;
3844 /* Need to register the icall so it gets an icall wrapper */
3845 info = mono_get_array_new_va_icall (rank);
3847 cfg->flags |= MONO_CFG_HAS_VARARGS;
3849 /* mono_array_new_va () needs a vararg calling convention */
3850 cfg->disable_llvm = TRUE;
3852 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3853 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3857 mono_emit_load_got_addr (MonoCompile *cfg)
3859 MonoInst *getaddr, *dummy_use;
3861 if (!cfg->got_var || cfg->got_var_allocated)
3864 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3865 getaddr->dreg = cfg->got_var->dreg;
3867 /* Add it to the start of the first bblock */
3868 if (cfg->bb_entry->code) {
3869 getaddr->next = cfg->bb_entry->code;
3870 cfg->bb_entry->code = getaddr;
3873 MONO_ADD_INS (cfg->bb_entry, getaddr);
3875 cfg->got_var_allocated = TRUE;
3878 * Add a dummy use to keep the got_var alive, since real uses might
3879 * only be generated by the back ends.
3880 * Add it to end_bblock, so the variable's lifetime covers the whole
3882 * It would be better to make the usage of the got var explicit in all
3883 * cases when the backend needs it (i.e. calls, throw etc.), so this
3884 * wouldn't be needed.
3886 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3887 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3890 static int inline_limit;
3891 static gboolean inline_limit_inited;
3894 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3896 MonoMethodHeaderSummary header;
3898 #ifdef MONO_ARCH_SOFT_FLOAT
3899 MonoMethodSignature *sig = mono_method_signature (method);
3903 if (cfg->generic_sharing_context)
3906 if (cfg->inline_depth > 10)
3909 #ifdef MONO_ARCH_HAVE_LMF_OPS
3910 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3911 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3912 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3917 if (!mono_method_get_header_summary (method, &header))
3920 /*runtime, icall and pinvoke are checked by summary call*/
3921 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3922 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3923 (method->klass->marshalbyref) ||
3927 /* also consider num_locals? */
3928 /* Do the size check early to avoid creating vtables */
3929 if (!inline_limit_inited) {
3930 if (getenv ("MONO_INLINELIMIT"))
3931 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3933 inline_limit = INLINE_LENGTH_LIMIT;
3934 inline_limit_inited = TRUE;
3936 if (header.code_size >= inline_limit)
3940 * if we can initialize the class of the method right away, we do,
3941 * otherwise we don't allow inlining if the class needs initialization,
3942 * since it would mean inserting a call to mono_runtime_class_init()
3943 * inside the inlined code
3945 if (!(cfg->opt & MONO_OPT_SHARED)) {
3946 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3947 if (cfg->run_cctors && method->klass->has_cctor) {
3948 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3949 if (!method->klass->runtime_info)
3950 /* No vtable created yet */
3952 vtable = mono_class_vtable (cfg->domain, method->klass);
3955 /* This makes so that inline cannot trigger */
3956 /* .cctors: too many apps depend on them */
3957 /* running with a specific order... */
3958 if (! vtable->initialized)
3960 mono_runtime_class_init (vtable);
3962 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3963 if (!method->klass->runtime_info)
3964 /* No vtable created yet */
3966 vtable = mono_class_vtable (cfg->domain, method->klass);
3969 if (!vtable->initialized)
3974 * If we're compiling for shared code
3975 * the cctor will need to be run at aot method load time, for example,
3976 * or at the end of the compilation of the inlining method.
3978 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3983 * CAS - do not inline methods with declarative security
3984 * Note: this has to be before any possible return TRUE;
3986 if (mono_method_has_declsec (method))
3989 #ifdef MONO_ARCH_SOFT_FLOAT
3991 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3993 for (i = 0; i < sig->param_count; ++i)
3994 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4002 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4004 if (vtable->initialized && !cfg->compile_aot)
4007 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4010 if (!mono_class_needs_cctor_run (vtable->klass, method))
4013 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4014 /* The initialization is already done before the method is called */
4021 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4025 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4027 mono_class_init (klass);
4028 size = mono_class_array_element_size (klass);
4030 mult_reg = alloc_preg (cfg);
4031 array_reg = arr->dreg;
4032 index_reg = index->dreg;
4034 #if SIZEOF_REGISTER == 8
4035 /* The array reg is 64 bits but the index reg is only 32 */
4036 if (COMPILE_LLVM (cfg)) {
4038 index2_reg = index_reg;
4040 index2_reg = alloc_preg (cfg);
4041 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4044 if (index->type == STACK_I8) {
4045 index2_reg = alloc_preg (cfg);
4046 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4048 index2_reg = index_reg;
4053 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4055 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4056 if (size == 1 || size == 2 || size == 4 || size == 8) {
4057 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4059 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4060 ins->type = STACK_PTR;
4066 add_reg = alloc_preg (cfg);
4068 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4069 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4070 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4071 ins->type = STACK_PTR;
4072 MONO_ADD_INS (cfg->cbb, ins);
4077 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4079 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4081 int bounds_reg = alloc_preg (cfg);
4082 int add_reg = alloc_preg (cfg);
4083 int mult_reg = alloc_preg (cfg);
4084 int mult2_reg = alloc_preg (cfg);
4085 int low1_reg = alloc_preg (cfg);
4086 int low2_reg = alloc_preg (cfg);
4087 int high1_reg = alloc_preg (cfg);
4088 int high2_reg = alloc_preg (cfg);
4089 int realidx1_reg = alloc_preg (cfg);
4090 int realidx2_reg = alloc_preg (cfg);
4091 int sum_reg = alloc_preg (cfg);
4096 mono_class_init (klass);
4097 size = mono_class_array_element_size (klass);
4099 index1 = index_ins1->dreg;
4100 index2 = index_ins2->dreg;
4102 /* range checking */
4103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4104 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4106 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4107 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4108 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4109 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4110 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4111 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4112 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4114 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4115 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4116 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4117 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4118 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4119 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4120 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4122 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4123 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4125 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4126 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4128 ins->type = STACK_MP;
4130 MONO_ADD_INS (cfg->cbb, ins);
4137 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4141 MonoMethod *addr_method;
4144 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4147 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4149 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4150 /* emit_ldelema_2 depends on OP_LMUL */
4151 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4152 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4156 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4157 addr_method = mono_marshal_get_array_address (rank, element_size);
4158 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4163 static MonoBreakPolicy
4164 always_insert_breakpoint (MonoMethod *method)
4166 return MONO_BREAK_POLICY_ALWAYS;
4169 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4172 * mono_set_break_policy:
4173 * policy_callback: the new callback function
4175 * Allow embedders to decide wherther to actually obey breakpoint instructions
4176 * (both break IL instructions and Debugger.Break () method calls), for example
4177 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4178 * untrusted or semi-trusted code.
4180 * @policy_callback will be called every time a break point instruction needs to
4181 * be inserted with the method argument being the method that calls Debugger.Break()
4182 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4183 * if it wants the breakpoint to not be effective in the given method.
4184 * #MONO_BREAK_POLICY_ALWAYS is the default.
4187 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4189 if (policy_callback)
4190 break_policy_func = policy_callback;
4192 break_policy_func = always_insert_breakpoint;
4196 should_insert_brekpoint (MonoMethod *method) {
4197 switch (break_policy_func (method)) {
4198 case MONO_BREAK_POLICY_ALWAYS:
4200 case MONO_BREAK_POLICY_NEVER:
4202 case MONO_BREAK_POLICY_ON_DBG:
4203 return mono_debug_using_mono_debugger ();
4205 g_warning ("Incorrect value returned from break policy callback");
4210 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4212 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4214 MonoInst *addr, *store, *load;
4215 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4217 /* the bounds check is already done by the callers */
4218 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4220 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4221 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4223 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4224 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4230 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4232 MonoInst *ins = NULL;
4233 #ifdef MONO_ARCH_SIMD_INTRINSICS
4234 if (cfg->opt & MONO_OPT_SIMD) {
4235 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4245 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4247 MonoInst *ins = NULL;
4249 static MonoClass *runtime_helpers_class = NULL;
4250 if (! runtime_helpers_class)
4251 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4252 "System.Runtime.CompilerServices", "RuntimeHelpers");
4254 if (cmethod->klass == mono_defaults.string_class) {
4255 if (strcmp (cmethod->name, "get_Chars") == 0) {
4256 int dreg = alloc_ireg (cfg);
4257 int index_reg = alloc_preg (cfg);
4258 int mult_reg = alloc_preg (cfg);
4259 int add_reg = alloc_preg (cfg);
4261 #if SIZEOF_REGISTER == 8
4262 /* The array reg is 64 bits but the index reg is only 32 */
4263 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4265 index_reg = args [1]->dreg;
4267 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4269 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4270 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4271 add_reg = ins->dreg;
4272 /* Avoid a warning */
4274 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4277 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4278 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4279 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4280 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4282 type_from_op (ins, NULL, NULL);
4284 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4285 int dreg = alloc_ireg (cfg);
4286 /* Decompose later to allow more optimizations */
4287 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4288 ins->type = STACK_I4;
4289 ins->flags |= MONO_INST_FAULT;
4290 cfg->cbb->has_array_access = TRUE;
4291 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4294 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4295 int mult_reg = alloc_preg (cfg);
4296 int add_reg = alloc_preg (cfg);
4298 /* The corlib functions check for oob already. */
4299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4300 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4301 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4302 return cfg->cbb->last_ins;
4305 } else if (cmethod->klass == mono_defaults.object_class) {
4307 if (strcmp (cmethod->name, "GetType") == 0) {
4308 int dreg = alloc_preg (cfg);
4309 int vt_reg = alloc_preg (cfg);
4310 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4311 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4312 type_from_op (ins, NULL, NULL);
4315 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4316 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4317 int dreg = alloc_ireg (cfg);
4318 int t1 = alloc_ireg (cfg);
4320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4321 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4322 ins->type = STACK_I4;
4326 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4327 MONO_INST_NEW (cfg, ins, OP_NOP);
4328 MONO_ADD_INS (cfg->cbb, ins);
4332 } else if (cmethod->klass == mono_defaults.array_class) {
4333 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4334 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4336 #ifndef MONO_BIG_ARRAYS
4338 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4341 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4342 int dreg = alloc_ireg (cfg);
4343 int bounds_reg = alloc_ireg (cfg);
4344 MonoBasicBlock *end_bb, *szarray_bb;
4345 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4347 NEW_BBLOCK (cfg, end_bb);
4348 NEW_BBLOCK (cfg, szarray_bb);
4350 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4351 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4354 /* Non-szarray case */
4356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4357 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4359 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4360 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4362 MONO_START_BB (cfg, szarray_bb);
4365 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4366 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4368 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4369 MONO_START_BB (cfg, end_bb);
4371 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4372 ins->type = STACK_I4;
4378 if (cmethod->name [0] != 'g')
4381 if (strcmp (cmethod->name, "get_Rank") == 0) {
4382 int dreg = alloc_ireg (cfg);
4383 int vtable_reg = alloc_preg (cfg);
4384 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4385 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4386 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4387 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4388 type_from_op (ins, NULL, NULL);
4391 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4392 int dreg = alloc_ireg (cfg);
4394 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4395 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4396 type_from_op (ins, NULL, NULL);
4401 } else if (cmethod->klass == runtime_helpers_class) {
4403 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4404 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4408 } else if (cmethod->klass == mono_defaults.thread_class) {
4409 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4410 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4411 MONO_ADD_INS (cfg->cbb, ins);
4413 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4414 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4415 MONO_ADD_INS (cfg->cbb, ins);
4418 } else if (cmethod->klass == mono_defaults.monitor_class) {
4419 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4420 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4423 if (COMPILE_LLVM (cfg)) {
4425 * Pass the argument normally, the LLVM backend will handle the
4426 * calling convention problems.
4428 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4430 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4431 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4432 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4433 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4436 return (MonoInst*)call;
4437 } else if (strcmp (cmethod->name, "Exit") == 0) {
4440 if (COMPILE_LLVM (cfg)) {
4441 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4443 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4444 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4445 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4446 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4449 return (MonoInst*)call;
4451 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4452 MonoMethod *fast_method = NULL;
4454 /* Avoid infinite recursion */
4455 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4456 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4457 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4460 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4461 strcmp (cmethod->name, "Exit") == 0)
4462 fast_method = mono_monitor_get_fast_path (cmethod);
4466 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4468 } else if (cmethod->klass->image == mono_defaults.corlib &&
4469 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4470 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4473 #if SIZEOF_REGISTER == 8
4474 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4475 /* 64 bit reads are already atomic */
4476 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4477 ins->dreg = mono_alloc_preg (cfg);
4478 ins->inst_basereg = args [0]->dreg;
4479 ins->inst_offset = 0;
4480 MONO_ADD_INS (cfg->cbb, ins);
4484 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4485 if (strcmp (cmethod->name, "Increment") == 0) {
4486 MonoInst *ins_iconst;
4489 if (fsig->params [0]->type == MONO_TYPE_I4)
4490 opcode = OP_ATOMIC_ADD_NEW_I4;
4491 #if SIZEOF_REGISTER == 8
4492 else if (fsig->params [0]->type == MONO_TYPE_I8)
4493 opcode = OP_ATOMIC_ADD_NEW_I8;
4496 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4497 ins_iconst->inst_c0 = 1;
4498 ins_iconst->dreg = mono_alloc_ireg (cfg);
4499 MONO_ADD_INS (cfg->cbb, ins_iconst);
4501 MONO_INST_NEW (cfg, ins, opcode);
4502 ins->dreg = mono_alloc_ireg (cfg);
4503 ins->inst_basereg = args [0]->dreg;
4504 ins->inst_offset = 0;
4505 ins->sreg2 = ins_iconst->dreg;
4506 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4507 MONO_ADD_INS (cfg->cbb, ins);
4509 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4510 MonoInst *ins_iconst;
4513 if (fsig->params [0]->type == MONO_TYPE_I4)
4514 opcode = OP_ATOMIC_ADD_NEW_I4;
4515 #if SIZEOF_REGISTER == 8
4516 else if (fsig->params [0]->type == MONO_TYPE_I8)
4517 opcode = OP_ATOMIC_ADD_NEW_I8;
4520 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4521 ins_iconst->inst_c0 = -1;
4522 ins_iconst->dreg = mono_alloc_ireg (cfg);
4523 MONO_ADD_INS (cfg->cbb, ins_iconst);
4525 MONO_INST_NEW (cfg, ins, opcode);
4526 ins->dreg = mono_alloc_ireg (cfg);
4527 ins->inst_basereg = args [0]->dreg;
4528 ins->inst_offset = 0;
4529 ins->sreg2 = ins_iconst->dreg;
4530 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4531 MONO_ADD_INS (cfg->cbb, ins);
4533 } else if (strcmp (cmethod->name, "Add") == 0) {
4536 if (fsig->params [0]->type == MONO_TYPE_I4)
4537 opcode = OP_ATOMIC_ADD_NEW_I4;
4538 #if SIZEOF_REGISTER == 8
4539 else if (fsig->params [0]->type == MONO_TYPE_I8)
4540 opcode = OP_ATOMIC_ADD_NEW_I8;
4544 MONO_INST_NEW (cfg, ins, opcode);
4545 ins->dreg = mono_alloc_ireg (cfg);
4546 ins->inst_basereg = args [0]->dreg;
4547 ins->inst_offset = 0;
4548 ins->sreg2 = args [1]->dreg;
4549 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4550 MONO_ADD_INS (cfg->cbb, ins);
4553 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4555 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4556 if (strcmp (cmethod->name, "Exchange") == 0) {
4558 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4560 if (fsig->params [0]->type == MONO_TYPE_I4)
4561 opcode = OP_ATOMIC_EXCHANGE_I4;
4562 #if SIZEOF_REGISTER == 8
4563 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4564 (fsig->params [0]->type == MONO_TYPE_I))
4565 opcode = OP_ATOMIC_EXCHANGE_I8;
4567 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4568 opcode = OP_ATOMIC_EXCHANGE_I4;
4573 MONO_INST_NEW (cfg, ins, opcode);
4574 ins->dreg = mono_alloc_ireg (cfg);
4575 ins->inst_basereg = args [0]->dreg;
4576 ins->inst_offset = 0;
4577 ins->sreg2 = args [1]->dreg;
4578 MONO_ADD_INS (cfg->cbb, ins);
4580 switch (fsig->params [0]->type) {
4582 ins->type = STACK_I4;
4586 ins->type = STACK_I8;
4588 case MONO_TYPE_OBJECT:
4589 ins->type = STACK_OBJ;
4592 g_assert_not_reached ();
4595 if (cfg->gen_write_barriers && is_ref)
4596 emit_write_barrier (cfg, args [0], args [1], -1);
4598 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4600 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4601 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4603 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4604 if (fsig->params [1]->type == MONO_TYPE_I4)
4606 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4607 size = sizeof (gpointer);
4608 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4611 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4612 ins->dreg = alloc_ireg (cfg);
4613 ins->sreg1 = args [0]->dreg;
4614 ins->sreg2 = args [1]->dreg;
4615 ins->sreg3 = args [2]->dreg;
4616 ins->type = STACK_I4;
4617 MONO_ADD_INS (cfg->cbb, ins);
4618 } else if (size == 8) {
4619 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4620 ins->dreg = alloc_ireg (cfg);
4621 ins->sreg1 = args [0]->dreg;
4622 ins->sreg2 = args [1]->dreg;
4623 ins->sreg3 = args [2]->dreg;
4624 ins->type = STACK_I8;
4625 MONO_ADD_INS (cfg->cbb, ins);
4627 /* g_assert_not_reached (); */
4629 if (cfg->gen_write_barriers && is_ref)
4630 emit_write_barrier (cfg, args [0], args [1], -1);
4632 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4636 } else if (cmethod->klass->image == mono_defaults.corlib) {
4637 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4638 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4639 if (should_insert_brekpoint (cfg->method))
4640 MONO_INST_NEW (cfg, ins, OP_BREAK);
4642 MONO_INST_NEW (cfg, ins, OP_NOP);
4643 MONO_ADD_INS (cfg->cbb, ins);
4646 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4647 && strcmp (cmethod->klass->name, "Environment") == 0) {
4649 EMIT_NEW_ICONST (cfg, ins, 1);
4651 EMIT_NEW_ICONST (cfg, ins, 0);
4655 } else if (cmethod->klass == mono_defaults.math_class) {
4657 * There is general branches code for Min/Max, but it does not work for
4659 * http://everything2.com/?node_id=1051618
4663 #ifdef MONO_ARCH_SIMD_INTRINSICS
4664 if (cfg->opt & MONO_OPT_SIMD) {
4665 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4671 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4675 * This entry point could be used later for arbitrary method
4678 inline static MonoInst*
4679 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4680 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4682 if (method->klass == mono_defaults.string_class) {
4683 /* managed string allocation support */
4684 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4685 MonoInst *iargs [2];
4686 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4687 MonoMethod *managed_alloc = NULL;
4689 g_assert (vtable); /*Should not fail since it System.String*/
4690 #ifndef MONO_CROSS_COMPILE
4691 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4695 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4696 iargs [1] = args [0];
4697 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4704 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4706 MonoInst *store, *temp;
4709 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4710 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4713 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4714 * would be different than the MonoInst's used to represent arguments, and
4715 * the ldelema implementation can't deal with that.
4716 * Solution: When ldelema is used on an inline argument, create a var for
4717 * it, emit ldelema on that var, and emit the saving code below in
4718 * inline_method () if needed.
4720 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4721 cfg->args [i] = temp;
4722 /* This uses cfg->args [i] which is set by the preceeding line */
4723 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4724 store->cil_code = sp [0]->cil_code;
4729 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4730 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4732 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4734 check_inline_called_method_name_limit (MonoMethod *called_method)
4737 static char *limit = NULL;
4739 if (limit == NULL) {
4740 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4742 if (limit_string != NULL)
4743 limit = limit_string;
4745 limit = (char *) "";
4748 if (limit [0] != '\0') {
4749 char *called_method_name = mono_method_full_name (called_method, TRUE);
4751 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4752 g_free (called_method_name);
4754 //return (strncmp_result <= 0);
4755 return (strncmp_result == 0);
4762 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4764 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4767 static char *limit = NULL;
4769 if (limit == NULL) {
4770 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4771 if (limit_string != NULL) {
4772 limit = limit_string;
4774 limit = (char *) "";
4778 if (limit [0] != '\0') {
4779 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4781 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4782 g_free (caller_method_name);
4784 //return (strncmp_result <= 0);
4785 return (strncmp_result == 0);
4793 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4794 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4796 MonoInst *ins, *rvar = NULL;
4797 MonoMethodHeader *cheader;
4798 MonoBasicBlock *ebblock, *sbblock;
4800 MonoMethod *prev_inlined_method;
4801 MonoInst **prev_locals, **prev_args;
4802 MonoType **prev_arg_types;
4803 guint prev_real_offset;
4804 GHashTable *prev_cbb_hash;
4805 MonoBasicBlock **prev_cil_offset_to_bb;
4806 MonoBasicBlock *prev_cbb;
4807 unsigned char* prev_cil_start;
4808 guint32 prev_cil_offset_to_bb_len;
4809 MonoMethod *prev_current_method;
4810 MonoGenericContext *prev_generic_context;
4811 gboolean ret_var_set, prev_ret_var_set;
4813 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4815 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4816 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4819 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4820 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4824 if (cfg->verbose_level > 2)
4825 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4827 if (!cmethod->inline_info) {
4828 mono_jit_stats.inlineable_methods++;
4829 cmethod->inline_info = 1;
4832 /* allocate local variables */
4833 cheader = mono_method_get_header (cmethod);
4835 if (cheader == NULL || mono_loader_get_last_error ()) {
4836 MonoLoaderError *error = mono_loader_get_last_error ();
4839 mono_metadata_free_mh (cheader);
4840 if (inline_always && error)
4841 mono_cfg_set_exception (cfg, error->exception_type);
4843 mono_loader_clear_error ();
4847 /*Must verify before creating locals as it can cause the JIT to assert.*/
4848 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4849 mono_metadata_free_mh (cheader);
4853 /* allocate space to store the return value */
4854 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4855 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4859 prev_locals = cfg->locals;
4860 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4861 for (i = 0; i < cheader->num_locals; ++i)
4862 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4864 /* allocate start and end blocks */
4865 /* This is needed so if the inline is aborted, we can clean up */
4866 NEW_BBLOCK (cfg, sbblock);
4867 sbblock->real_offset = real_offset;
4869 NEW_BBLOCK (cfg, ebblock);
4870 ebblock->block_num = cfg->num_bblocks++;
4871 ebblock->real_offset = real_offset;
4873 prev_args = cfg->args;
4874 prev_arg_types = cfg->arg_types;
4875 prev_inlined_method = cfg->inlined_method;
4876 cfg->inlined_method = cmethod;
4877 cfg->ret_var_set = FALSE;
4878 cfg->inline_depth ++;
4879 prev_real_offset = cfg->real_offset;
4880 prev_cbb_hash = cfg->cbb_hash;
4881 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4882 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4883 prev_cil_start = cfg->cil_start;
4884 prev_cbb = cfg->cbb;
4885 prev_current_method = cfg->current_method;
4886 prev_generic_context = cfg->generic_context;
4887 prev_ret_var_set = cfg->ret_var_set;
4889 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4891 ret_var_set = cfg->ret_var_set;
4893 cfg->inlined_method = prev_inlined_method;
4894 cfg->real_offset = prev_real_offset;
4895 cfg->cbb_hash = prev_cbb_hash;
4896 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4897 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4898 cfg->cil_start = prev_cil_start;
4899 cfg->locals = prev_locals;
4900 cfg->args = prev_args;
4901 cfg->arg_types = prev_arg_types;
4902 cfg->current_method = prev_current_method;
4903 cfg->generic_context = prev_generic_context;
4904 cfg->ret_var_set = prev_ret_var_set;
4905 cfg->inline_depth --;
4907 if ((costs >= 0 && costs < 60) || inline_always) {
4908 if (cfg->verbose_level > 2)
4909 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4911 mono_jit_stats.inlined_methods++;
4913 /* always add some code to avoid block split failures */
4914 MONO_INST_NEW (cfg, ins, OP_NOP);
4915 MONO_ADD_INS (prev_cbb, ins);
4917 prev_cbb->next_bb = sbblock;
4918 link_bblock (cfg, prev_cbb, sbblock);
4921 * Get rid of the begin and end bblocks if possible to aid local
4924 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4926 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4927 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4929 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4930 MonoBasicBlock *prev = ebblock->in_bb [0];
4931 mono_merge_basic_blocks (cfg, prev, ebblock);
4933 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4934 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4935 cfg->cbb = prev_cbb;
4943 * If the inlined method contains only a throw, then the ret var is not
4944 * set, so set it to a dummy value.
4947 static double r8_0 = 0.0;
4949 switch (rvar->type) {
4951 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4954 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4959 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4962 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4963 ins->type = STACK_R8;
4964 ins->inst_p0 = (void*)&r8_0;
4965 ins->dreg = rvar->dreg;
4966 MONO_ADD_INS (cfg->cbb, ins);
4969 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4972 g_assert_not_reached ();
4976 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4979 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4982 if (cfg->verbose_level > 2)
4983 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4984 cfg->exception_type = MONO_EXCEPTION_NONE;
4985 mono_loader_clear_error ();
4987 /* This gets rid of the newly added bblocks */
4988 cfg->cbb = prev_cbb;
4990 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4995 * Some of these comments may well be out-of-date.
4996 * Design decisions: we do a single pass over the IL code (and we do bblock
4997 * splitting/merging in the few cases when it's required: a back jump to an IL
4998 * address that was not already seen as bblock starting point).
4999 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5000 * Complex operations are decomposed in simpler ones right away. We need to let the
5001 * arch-specific code peek and poke inside this process somehow (except when the
5002 * optimizations can take advantage of the full semantic info of coarse opcodes).
5003 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5004 * MonoInst->opcode initially is the IL opcode or some simplification of that
5005 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5006 * opcode with value bigger than OP_LAST.
5007 * At this point the IR can be handed over to an interpreter, a dumb code generator
5008 * or to the optimizing code generator that will translate it to SSA form.
5010 * Profiling directed optimizations.
5011 * We may compile by default with few or no optimizations and instrument the code
5012 * or the user may indicate what methods to optimize the most either in a config file
5013 * or through repeated runs where the compiler applies offline the optimizations to
5014 * each method and then decides if it was worth it.
5017 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5018 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5019 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5020 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5021 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5022 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5023 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5024 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5026 /* offset from br.s -> br like opcodes */
5027 #define BIG_BRANCH_OFFSET 13
5030 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5032 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5034 return b == NULL || b == bb;
5038 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5040 unsigned char *ip = start;
5041 unsigned char *target;
5044 MonoBasicBlock *bblock;
5045 const MonoOpcode *opcode;
5048 cli_addr = ip - start;
5049 i = mono_opcode_value ((const guint8 **)&ip, end);
5052 opcode = &mono_opcodes [i];
5053 switch (opcode->argument) {
5054 case MonoInlineNone:
5057 case MonoInlineString:
5058 case MonoInlineType:
5059 case MonoInlineField:
5060 case MonoInlineMethod:
5063 case MonoShortInlineR:
5070 case MonoShortInlineVar:
5071 case MonoShortInlineI:
5074 case MonoShortInlineBrTarget:
5075 target = start + cli_addr + 2 + (signed char)ip [1];
5076 GET_BBLOCK (cfg, bblock, target);
5079 GET_BBLOCK (cfg, bblock, ip);
5081 case MonoInlineBrTarget:
5082 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5083 GET_BBLOCK (cfg, bblock, target);
5086 GET_BBLOCK (cfg, bblock, ip);
5088 case MonoInlineSwitch: {
5089 guint32 n = read32 (ip + 1);
5092 cli_addr += 5 + 4 * n;
5093 target = start + cli_addr;
5094 GET_BBLOCK (cfg, bblock, target);
5096 for (j = 0; j < n; ++j) {
5097 target = start + cli_addr + (gint32)read32 (ip);
5098 GET_BBLOCK (cfg, bblock, target);
5108 g_assert_not_reached ();
5111 if (i == CEE_THROW) {
5112 unsigned char *bb_start = ip - 1;
5114 /* Find the start of the bblock containing the throw */
5116 while ((bb_start >= start) && !bblock) {
5117 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5121 bblock->out_of_line = 1;
5130 static inline MonoMethod *
5131 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5135 if (m->wrapper_type != MONO_WRAPPER_NONE)
5136 return mono_method_get_wrapper_data (m, token);
5138 method = mono_get_method_full (m->klass->image, token, klass, context);
5143 static inline MonoMethod *
5144 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5146 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5148 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5154 static inline MonoClass*
5155 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5159 if (method->wrapper_type != MONO_WRAPPER_NONE)
5160 klass = mono_method_get_wrapper_data (method, token);
5162 klass = mono_class_get_full (method->klass->image, token, context);
5164 mono_class_init (klass);
5169 * Returns TRUE if the JIT should abort inlining because "callee"
5170 * is influenced by security attributes.
5173 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5177 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5181 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5182 if (result == MONO_JIT_SECURITY_OK)
5185 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5186 /* Generate code to throw a SecurityException before the actual call/link */
5187 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5190 NEW_ICONST (cfg, args [0], 4);
5191 NEW_METHODCONST (cfg, args [1], caller);
5192 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5193 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5194 /* don't hide previous results */
5195 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5196 cfg->exception_data = result;
5204 throw_exception (void)
5206 static MonoMethod *method = NULL;
5209 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5210 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5217 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5219 MonoMethod *thrower = throw_exception ();
5222 EMIT_NEW_PCONST (cfg, args [0], ex);
5223 mono_emit_method_call (cfg, thrower, args, NULL);
5227 * Return the original method is a wrapper is specified. We can only access
5228 * the custom attributes from the original method.
5231 get_original_method (MonoMethod *method)
5233 if (method->wrapper_type == MONO_WRAPPER_NONE)
5236 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5237 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5240 /* in other cases we need to find the original method */
5241 return mono_marshal_method_from_wrapper (method);
5245 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5246 MonoBasicBlock *bblock, unsigned char *ip)
5248 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5249 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5251 emit_throw_exception (cfg, ex);
5255 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5256 MonoBasicBlock *bblock, unsigned char *ip)
5258 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5259 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5261 emit_throw_exception (cfg, ex);
5265 * Check that the IL instructions at ip are the array initialization
5266 * sequence and return the pointer to the data and the size.
5269 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5272 * newarr[System.Int32]
5274 * ldtoken field valuetype ...
5275 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5277 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5278 guint32 token = read32 (ip + 7);
5279 guint32 field_token = read32 (ip + 2);
5280 guint32 field_index = field_token & 0xffffff;
5282 const char *data_ptr;
5284 MonoMethod *cmethod;
5285 MonoClass *dummy_class;
5286 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5292 *out_field_token = field_token;
5294 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5297 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5299 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5300 case MONO_TYPE_BOOLEAN:
5304 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5305 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5306 case MONO_TYPE_CHAR:
5316 return NULL; /* stupid ARM FP swapped format */
5326 if (size > mono_type_size (field->type, &dummy_align))
5329 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5330 if (!method->klass->image->dynamic) {
5331 field_index = read32 (ip + 2) & 0xffffff;
5332 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5333 data_ptr = mono_image_rva_map (method->klass->image, rva);
5334 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5335 /* for aot code we do the lookup on load */
5336 if (aot && data_ptr)
5337 return GUINT_TO_POINTER (rva);
5339 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5341 data_ptr = mono_field_get_data (field);
5349 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5351 char *method_fname = mono_method_full_name (method, TRUE);
5353 MonoMethodHeader *header = mono_method_get_header (method);
5355 if (header->code_size == 0)
5356 method_code = g_strdup ("method body is empty.");
5358 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5359 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5360 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5361 g_free (method_fname);
5362 g_free (method_code);
5363 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5367 set_exception_object (MonoCompile *cfg, MonoException *exception)
5369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5370 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5371 cfg->exception_ptr = exception;
5375 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5379 if (cfg->generic_sharing_context)
5380 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5382 type = &klass->byval_arg;
5383 return MONO_TYPE_IS_REFERENCE (type);
5387 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5390 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5391 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5392 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5393 /* Optimize reg-reg moves away */
5395 * Can't optimize other opcodes, since sp[0] might point to
5396 * the last ins of a decomposed opcode.
5398 sp [0]->dreg = (cfg)->locals [n]->dreg;
5400 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5405 * ldloca inhibits many optimizations so try to get rid of it in common
5408 static inline unsigned char *
5409 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5418 local = read16 (ip + 2);
5422 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5423 gboolean skip = FALSE;
5425 /* From the INITOBJ case */
5426 token = read32 (ip + 2);
5427 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5428 CHECK_TYPELOAD (klass);
5429 if (generic_class_is_reference_type (cfg, klass)) {
5430 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5431 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5432 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5433 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5434 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5447 is_exception_class (MonoClass *class)
5450 if (class == mono_defaults.exception_class)
5452 class = class->parent;
5458 * is_jit_optimizer_disabled:
5460 * Determine whenever M's assembly has a DebuggableAttribute with the
5461 * IsJITOptimizerDisabled flag set.
5464 is_jit_optimizer_disabled (MonoMethod *m)
5466 MonoAssembly *ass = m->klass->image->assembly;
5467 MonoCustomAttrInfo* attrs;
5468 static MonoClass *klass;
5470 gboolean val = FALSE;
5473 if (ass->jit_optimizer_disabled_inited)
5474 return ass->jit_optimizer_disabled;
5476 klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5478 attrs = mono_custom_attrs_from_assembly (ass);
5480 for (i = 0; i < attrs->num_attrs; ++i) {
5481 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5484 MonoMethodSignature *sig;
5486 if (!attr->ctor || attr->ctor->klass != klass)
5488 /* Decode the attribute. See reflection.c */
5489 len = attr->data_size;
5490 p = (const char*)attr->data;
5491 g_assert (read16 (p) == 0x0001);
5494 // FIXME: Support named parameters
5495 sig = mono_method_signature (attr->ctor);
5496 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5498 /* Two boolean arguments */
5502 mono_custom_attrs_free (attrs);
5505 ass->jit_optimizer_disabled = val;
5506 mono_memory_barrier ();
5507 ass->jit_optimizer_disabled_inited = TRUE;
5513 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5515 gboolean supported_tail_call;
5518 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5519 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5521 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5524 for (i = 0; i < fsig->param_count; ++i) {
5525 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5526 /* These can point to the current method's stack */
5527 supported_tail_call = FALSE;
5529 if (fsig->hasthis && cmethod->klass->valuetype)
5530 /* this might point to the current method's stack */
5531 supported_tail_call = FALSE;
5532 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5533 supported_tail_call = FALSE;
5534 if (cfg->method->save_lmf)
5535 supported_tail_call = FALSE;
5536 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5537 supported_tail_call = FALSE;
5539 /* Debugging support */
5541 if (supported_tail_call) {
5542 static int count = 0;
5544 if (getenv ("COUNT")) {
5545 if (count == atoi (getenv ("COUNT")))
5546 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5547 if (count > atoi (getenv ("COUNT")))
5548 supported_tail_call = FALSE;
5553 return supported_tail_call;
5557 * mono_method_to_ir:
5559 * Translate the .net IL into linear IR.
5562 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5563 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5564 guint inline_offset, gboolean is_virtual_call)
5567 MonoInst *ins, **sp, **stack_start;
5568 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5569 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5570 MonoMethod *cmethod, *method_definition;
5571 MonoInst **arg_array;
5572 MonoMethodHeader *header;
5574 guint32 token, ins_flag;
5576 MonoClass *constrained_call = NULL;
5577 unsigned char *ip, *end, *target, *err_pos;
5578 static double r8_0 = 0.0;
5579 MonoMethodSignature *sig;
5580 MonoGenericContext *generic_context = NULL;
5581 MonoGenericContainer *generic_container = NULL;
5582 MonoType **param_types;
5583 int i, n, start_new_bblock, dreg;
5584 int num_calls = 0, inline_costs = 0;
5585 int breakpoint_id = 0;
5587 MonoBoolean security, pinvoke;
5588 MonoSecurityManager* secman = NULL;
5589 MonoDeclSecurityActions actions;
5590 GSList *class_inits = NULL;
5591 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5593 gboolean init_locals, seq_points, skip_dead_blocks;
5594 gboolean disable_inline;
5596 disable_inline = is_jit_optimizer_disabled (method);
5598 /* serialization and xdomain stuff may need access to private fields and methods */
5599 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5600 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5601 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5602 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5603 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5604 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5606 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5608 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5609 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5610 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5611 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5613 image = method->klass->image;
5614 header = mono_method_get_header (method);
5616 MonoLoaderError *error;
5618 if ((error = mono_loader_get_last_error ())) {
5619 mono_cfg_set_exception (cfg, error->exception_type);
5621 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5622 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5624 goto exception_exit;
5626 generic_container = mono_method_get_generic_container (method);
5627 sig = mono_method_signature (method);
5628 num_args = sig->hasthis + sig->param_count;
5629 ip = (unsigned char*)header->code;
5630 cfg->cil_start = ip;
5631 end = ip + header->code_size;
5632 mono_jit_stats.cil_code_size += header->code_size;
5633 init_locals = header->init_locals;
5635 seq_points = cfg->gen_seq_points && cfg->method == method;
5638 * Methods without init_locals set could cause asserts in various passes
5643 method_definition = method;
5644 while (method_definition->is_inflated) {
5645 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5646 method_definition = imethod->declaring;
5649 /* SkipVerification is not allowed if core-clr is enabled */
5650 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5652 dont_verify_stloc = TRUE;
5655 if (mono_debug_using_mono_debugger ())
5656 cfg->keep_cil_nops = TRUE;
5658 if (sig->is_inflated)
5659 generic_context = mono_method_get_context (method);
5660 else if (generic_container)
5661 generic_context = &generic_container->context;
5662 cfg->generic_context = generic_context;
5664 if (!cfg->generic_sharing_context)
5665 g_assert (!sig->has_type_parameters);
5667 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5668 g_assert (method->is_inflated);
5669 g_assert (mono_method_get_context (method)->method_inst);
5671 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5672 g_assert (sig->generic_param_count);
5674 if (cfg->method == method) {
5675 cfg->real_offset = 0;
5677 cfg->real_offset = inline_offset;
5680 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5681 cfg->cil_offset_to_bb_len = header->code_size;
5683 cfg->current_method = method;
5685 if (cfg->verbose_level > 2)
5686 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5688 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5690 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5691 for (n = 0; n < sig->param_count; ++n)
5692 param_types [n + sig->hasthis] = sig->params [n];
5693 cfg->arg_types = param_types;
5695 dont_inline = g_list_prepend (dont_inline, method);
5696 if (cfg->method == method) {
5698 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5699 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5702 NEW_BBLOCK (cfg, start_bblock);
5703 cfg->bb_entry = start_bblock;
5704 start_bblock->cil_code = NULL;
5705 start_bblock->cil_length = 0;
5708 NEW_BBLOCK (cfg, end_bblock);
5709 cfg->bb_exit = end_bblock;
5710 end_bblock->cil_code = NULL;
5711 end_bblock->cil_length = 0;
5712 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5713 g_assert (cfg->num_bblocks == 2);
5715 arg_array = cfg->args;
5717 if (header->num_clauses) {
5718 cfg->spvars = g_hash_table_new (NULL, NULL);
5719 cfg->exvars = g_hash_table_new (NULL, NULL);
5721 /* handle exception clauses */
5722 for (i = 0; i < header->num_clauses; ++i) {
5723 MonoBasicBlock *try_bb;
5724 MonoExceptionClause *clause = &header->clauses [i];
5725 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5726 try_bb->real_offset = clause->try_offset;
5727 try_bb->try_start = TRUE;
5728 try_bb->region = ((i + 1) << 8) | clause->flags;
5729 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5730 tblock->real_offset = clause->handler_offset;
5731 tblock->flags |= BB_EXCEPTION_HANDLER;
5733 link_bblock (cfg, try_bb, tblock);
5735 if (*(ip + clause->handler_offset) == CEE_POP)
5736 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5738 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5739 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5740 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5742 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5743 MONO_ADD_INS (tblock, ins);
5745 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5746 MONO_ADD_INS (tblock, ins);
5748 /* todo: is a fault block unsafe to optimize? */
5749 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5750 tblock->flags |= BB_EXCEPTION_UNSAFE;
5754 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5756 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5758 /* catch and filter blocks get the exception object on the stack */
5759 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5760 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5761 MonoInst *dummy_use;
5763 /* mostly like handle_stack_args (), but just sets the input args */
5764 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5765 tblock->in_scount = 1;
5766 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5767 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5770 * Add a dummy use for the exvar so its liveness info will be
5774 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5776 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5777 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5778 tblock->flags |= BB_EXCEPTION_HANDLER;
5779 tblock->real_offset = clause->data.filter_offset;
5780 tblock->in_scount = 1;
5781 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5782 /* The filter block shares the exvar with the handler block */
5783 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5784 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5785 MONO_ADD_INS (tblock, ins);
5789 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5790 clause->data.catch_class &&
5791 cfg->generic_sharing_context &&
5792 mono_class_check_context_used (clause->data.catch_class)) {
5794 * In shared generic code with catch
5795 * clauses containing type variables
5796 * the exception handling code has to
5797 * be able to get to the rgctx.
5798 * Therefore we have to make sure that
5799 * the vtable/mrgctx argument (for
5800 * static or generic methods) or the
5801 * "this" argument (for non-static
5802 * methods) are live.
5804 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5805 mini_method_get_context (method)->method_inst ||
5806 method->klass->valuetype) {
5807 mono_get_vtable_var (cfg);
5809 MonoInst *dummy_use;
5811 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5816 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5817 cfg->cbb = start_bblock;
5818 cfg->args = arg_array;
5819 mono_save_args (cfg, sig, inline_args);
5822 /* FIRST CODE BLOCK */
5823 NEW_BBLOCK (cfg, bblock);
5824 bblock->cil_code = ip;
5828 ADD_BBLOCK (cfg, bblock);
5830 if (cfg->method == method) {
5831 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5832 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5833 MONO_INST_NEW (cfg, ins, OP_BREAK);
5834 MONO_ADD_INS (bblock, ins);
5838 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5839 secman = mono_security_manager_get_methods ();
5841 security = (secman && mono_method_has_declsec (method));
5842 /* at this point having security doesn't mean we have any code to generate */
5843 if (security && (cfg->method == method)) {
5844 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5845 * And we do not want to enter the next section (with allocation) if we
5846 * have nothing to generate */
5847 security = mono_declsec_get_demands (method, &actions);
5850 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5851 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5853 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5854 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5855 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5857 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5858 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5862 mono_custom_attrs_free (custom);
5865 custom = mono_custom_attrs_from_class (wrapped->klass);
5866 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5870 mono_custom_attrs_free (custom);
5873 /* not a P/Invoke after all */
5878 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5879 /* we use a separate basic block for the initialization code */
5880 NEW_BBLOCK (cfg, init_localsbb);
5881 cfg->bb_init = init_localsbb;
5882 init_localsbb->real_offset = cfg->real_offset;
5883 start_bblock->next_bb = init_localsbb;
5884 init_localsbb->next_bb = bblock;
5885 link_bblock (cfg, start_bblock, init_localsbb);
5886 link_bblock (cfg, init_localsbb, bblock);
5888 cfg->cbb = init_localsbb;
5890 start_bblock->next_bb = bblock;
5891 link_bblock (cfg, start_bblock, bblock);
5894 /* at this point we know, if security is TRUE, that some code needs to be generated */
5895 if (security && (cfg->method == method)) {
5898 mono_jit_stats.cas_demand_generation++;
5900 if (actions.demand.blob) {
5901 /* Add code for SecurityAction.Demand */
5902 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5903 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5904 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5905 mono_emit_method_call (cfg, secman->demand, args, NULL);
5907 if (actions.noncasdemand.blob) {
5908 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5909 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5910 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5911 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5912 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5913 mono_emit_method_call (cfg, secman->demand, args, NULL);
5915 if (actions.demandchoice.blob) {
5916 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5917 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5918 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5919 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5920 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5924 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5926 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5929 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5930 /* check if this is native code, e.g. an icall or a p/invoke */
5931 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5932 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5934 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5935 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5937 /* if this ia a native call then it can only be JITted from platform code */
5938 if ((icall || pinvk) && method->klass && method->klass->image) {
5939 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5940 MonoException *ex = icall ? mono_get_exception_security () :
5941 mono_get_exception_method_access ();
5942 emit_throw_exception (cfg, ex);
5949 if (header->code_size == 0)
5952 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5957 if (cfg->method == method)
5958 mono_debug_init_method (cfg, bblock, breakpoint_id);
5960 for (n = 0; n < header->num_locals; ++n) {
5961 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5966 /* We force the vtable variable here for all shared methods
5967 for the possibility that they might show up in a stack
5968 trace where their exact instantiation is needed. */
5969 if (cfg->generic_sharing_context && method == cfg->method) {
5970 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5971 mini_method_get_context (method)->method_inst ||
5972 method->klass->valuetype) {
5973 mono_get_vtable_var (cfg);
5975 /* FIXME: Is there a better way to do this?
5976 We need the variable live for the duration
5977 of the whole method. */
5978 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5982 /* add a check for this != NULL to inlined methods */
5983 if (is_virtual_call) {
5986 NEW_ARGLOAD (cfg, arg_ins, 0);
5987 MONO_ADD_INS (cfg->cbb, arg_ins);
5988 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5991 skip_dead_blocks = !dont_verify;
5992 if (skip_dead_blocks) {
5993 original_bb = bb = mono_basic_block_split (method, &error);
5994 if (!mono_error_ok (&error)) {
5995 mono_error_cleanup (&error);
6001 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6002 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6005 start_new_bblock = 0;
6008 if (cfg->method == method)
6009 cfg->real_offset = ip - header->code;
6011 cfg->real_offset = inline_offset;
6016 if (start_new_bblock) {
6017 bblock->cil_length = ip - bblock->cil_code;
6018 if (start_new_bblock == 2) {
6019 g_assert (ip == tblock->cil_code);
6021 GET_BBLOCK (cfg, tblock, ip);
6023 bblock->next_bb = tblock;
6026 start_new_bblock = 0;
6027 for (i = 0; i < bblock->in_scount; ++i) {
6028 if (cfg->verbose_level > 3)
6029 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6030 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6034 g_slist_free (class_inits);
6037 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6038 link_bblock (cfg, bblock, tblock);
6039 if (sp != stack_start) {
6040 handle_stack_args (cfg, stack_start, sp - stack_start);
6042 CHECK_UNVERIFIABLE (cfg);
6044 bblock->next_bb = tblock;
6047 for (i = 0; i < bblock->in_scount; ++i) {
6048 if (cfg->verbose_level > 3)
6049 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6050 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6053 g_slist_free (class_inits);
6058 if (skip_dead_blocks) {
6059 int ip_offset = ip - header->code;
6061 if (ip_offset == bb->end)
6065 int op_size = mono_opcode_size (ip, end);
6066 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6068 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6070 if (ip_offset + op_size == bb->end) {
6071 MONO_INST_NEW (cfg, ins, OP_NOP);
6072 MONO_ADD_INS (bblock, ins);
6073 start_new_bblock = 1;
6081 * Sequence points are points where the debugger can place a breakpoint.
6082 * Currently, we generate these automatically at points where the IL
6085 if (seq_points && sp == stack_start) {
6086 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6087 MONO_ADD_INS (cfg->cbb, ins);
6090 bblock->real_offset = cfg->real_offset;
6092 if ((cfg->method == method) && cfg->coverage_info) {
6093 guint32 cil_offset = ip - header->code;
6094 cfg->coverage_info->data [cil_offset].cil_code = ip;
6096 /* TODO: Use an increment here */
6097 #if defined(TARGET_X86)
6098 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6099 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6101 MONO_ADD_INS (cfg->cbb, ins);
6103 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6104 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6108 if (cfg->verbose_level > 3)
6109 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6113 if (cfg->keep_cil_nops)
6114 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6116 MONO_INST_NEW (cfg, ins, OP_NOP);
6118 MONO_ADD_INS (bblock, ins);
6121 if (should_insert_brekpoint (cfg->method))
6122 MONO_INST_NEW (cfg, ins, OP_BREAK);
6124 MONO_INST_NEW (cfg, ins, OP_NOP);
6126 MONO_ADD_INS (bblock, ins);
6132 CHECK_STACK_OVF (1);
6133 n = (*ip)-CEE_LDARG_0;
6135 EMIT_NEW_ARGLOAD (cfg, ins, n);
6143 CHECK_STACK_OVF (1);
6144 n = (*ip)-CEE_LDLOC_0;
6146 EMIT_NEW_LOCLOAD (cfg, ins, n);
6155 n = (*ip)-CEE_STLOC_0;
6158 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6160 emit_stloc_ir (cfg, sp, header, n);
6167 CHECK_STACK_OVF (1);
6170 EMIT_NEW_ARGLOAD (cfg, ins, n);
6176 CHECK_STACK_OVF (1);
6179 NEW_ARGLOADA (cfg, ins, n);
6180 MONO_ADD_INS (cfg->cbb, ins);
6190 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6192 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6197 CHECK_STACK_OVF (1);
6200 EMIT_NEW_LOCLOAD (cfg, ins, n);
6204 case CEE_LDLOCA_S: {
6205 unsigned char *tmp_ip;
6207 CHECK_STACK_OVF (1);
6208 CHECK_LOCAL (ip [1]);
6210 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6216 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6225 CHECK_LOCAL (ip [1]);
6226 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6228 emit_stloc_ir (cfg, sp, header, ip [1]);
6233 CHECK_STACK_OVF (1);
6234 EMIT_NEW_PCONST (cfg, ins, NULL);
6235 ins->type = STACK_OBJ;
6240 CHECK_STACK_OVF (1);
6241 EMIT_NEW_ICONST (cfg, ins, -1);
6254 CHECK_STACK_OVF (1);
6255 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6261 CHECK_STACK_OVF (1);
6263 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6269 CHECK_STACK_OVF (1);
6270 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6276 CHECK_STACK_OVF (1);
6277 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6278 ins->type = STACK_I8;
6279 ins->dreg = alloc_dreg (cfg, STACK_I8);
6281 ins->inst_l = (gint64)read64 (ip);
6282 MONO_ADD_INS (bblock, ins);
6288 gboolean use_aotconst = FALSE;
6290 #ifdef TARGET_POWERPC
6291 /* FIXME: Clean this up */
6292 if (cfg->compile_aot)
6293 use_aotconst = TRUE;
6296 /* FIXME: we should really allocate this only late in the compilation process */
6297 f = mono_domain_alloc (cfg->domain, sizeof (float));
6299 CHECK_STACK_OVF (1);
6305 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6307 dreg = alloc_freg (cfg);
6308 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6309 ins->type = STACK_R8;
6311 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6312 ins->type = STACK_R8;
6313 ins->dreg = alloc_dreg (cfg, STACK_R8);
6315 MONO_ADD_INS (bblock, ins);
6325 gboolean use_aotconst = FALSE;
6327 #ifdef TARGET_POWERPC
6328 /* FIXME: Clean this up */
6329 if (cfg->compile_aot)
6330 use_aotconst = TRUE;
6333 /* FIXME: we should really allocate this only late in the compilation process */
6334 d = mono_domain_alloc (cfg->domain, sizeof (double));
6336 CHECK_STACK_OVF (1);
6342 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6344 dreg = alloc_freg (cfg);
6345 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6346 ins->type = STACK_R8;
6348 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6349 ins->type = STACK_R8;
6350 ins->dreg = alloc_dreg (cfg, STACK_R8);
6352 MONO_ADD_INS (bblock, ins);
6361 MonoInst *temp, *store;
6363 CHECK_STACK_OVF (1);
6367 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6368 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6370 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6373 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6386 if (sp [0]->type == STACK_R8)
6387 /* we need to pop the value from the x86 FP stack */
6388 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6397 if (stack_start != sp)
6399 token = read32 (ip + 1);
6400 /* FIXME: check the signature matches */
6401 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6403 if (!cmethod || mono_loader_get_last_error ())
6406 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6407 GENERIC_SHARING_FAILURE (CEE_JMP);
6409 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6410 CHECK_CFG_EXCEPTION;
6412 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6414 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6417 /* Handle tail calls similarly to calls */
6418 n = fsig->param_count + fsig->hasthis;
6420 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6421 call->method = cmethod;
6422 call->tail_call = TRUE;
6423 call->signature = mono_method_signature (cmethod);
6424 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6425 call->inst.inst_p0 = cmethod;
6426 for (i = 0; i < n; ++i)
6427 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6429 mono_arch_emit_call (cfg, call);
6430 MONO_ADD_INS (bblock, (MonoInst*)call);
6433 for (i = 0; i < num_args; ++i)
6434 /* Prevent arguments from being optimized away */
6435 arg_array [i]->flags |= MONO_INST_VOLATILE;
6437 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6438 ins = (MonoInst*)call;
6439 ins->inst_p0 = cmethod;
6440 MONO_ADD_INS (bblock, ins);
6444 start_new_bblock = 1;
6449 case CEE_CALLVIRT: {
6450 MonoInst *addr = NULL;
6451 MonoMethodSignature *fsig = NULL;
6453 int virtual = *ip == CEE_CALLVIRT;
6454 int calli = *ip == CEE_CALLI;
6455 gboolean pass_imt_from_rgctx = FALSE;
6456 MonoInst *imt_arg = NULL;
6457 gboolean pass_vtable = FALSE;
6458 gboolean pass_mrgctx = FALSE;
6459 MonoInst *vtable_arg = NULL;
6460 gboolean check_this = FALSE;
6461 gboolean supported_tail_call = FALSE;
6464 token = read32 (ip + 1);
6471 if (method->wrapper_type != MONO_WRAPPER_NONE)
6472 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6474 fsig = mono_metadata_parse_signature (image, token);
6476 n = fsig->param_count + fsig->hasthis;
6478 if (method->dynamic && fsig->pinvoke) {
6482 * This is a call through a function pointer using a pinvoke
6483 * signature. Have to create a wrapper and call that instead.
6484 * FIXME: This is very slow, need to create a wrapper at JIT time
6485 * instead based on the signature.
6487 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6488 EMIT_NEW_PCONST (cfg, args [1], fsig);
6490 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6493 MonoMethod *cil_method;
6495 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6496 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6497 cil_method = cmethod;
6498 } else if (constrained_call) {
6499 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6501 * This is needed since get_method_constrained can't find
6502 * the method in klass representing a type var.
6503 * The type var is guaranteed to be a reference type in this
6506 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6507 cil_method = cmethod;
6508 g_assert (!cmethod->klass->valuetype);
6510 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6513 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6514 cil_method = cmethod;
6517 if (!cmethod || mono_loader_get_last_error ())
6519 if (!dont_verify && !cfg->skip_visibility) {
6520 MonoMethod *target_method = cil_method;
6521 if (method->is_inflated) {
6522 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6524 if (!mono_method_can_access_method (method_definition, target_method) &&
6525 !mono_method_can_access_method (method, cil_method))
6526 METHOD_ACCESS_FAILURE;
6529 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6530 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6532 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6533 /* MS.NET seems to silently convert this to a callvirt */
6538 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6539 * converts to a callvirt.
6541 * tests/bug-515884.il is an example of this behavior
6543 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6544 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6545 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6549 if (!cmethod->klass->inited)
6550 if (!mono_class_init (cmethod->klass))
6553 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6554 mini_class_is_system_array (cmethod->klass)) {
6555 array_rank = cmethod->klass->rank;
6556 fsig = mono_method_signature (cmethod);
6558 fsig = mono_method_signature (cmethod);
6563 if (fsig->pinvoke) {
6564 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6565 check_for_pending_exc, FALSE);
6566 fsig = mono_method_signature (wrapper);
6567 } else if (constrained_call) {
6568 fsig = mono_method_signature (cmethod);
6570 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6574 mono_save_token_info (cfg, image, token, cil_method);
6576 n = fsig->param_count + fsig->hasthis;
6578 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6579 if (check_linkdemand (cfg, method, cmethod))
6581 CHECK_CFG_EXCEPTION;
6584 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6585 g_assert_not_reached ();
6588 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6591 if (!cfg->generic_sharing_context && cmethod)
6592 g_assert (!mono_method_check_context_used (cmethod));
6596 //g_assert (!virtual || fsig->hasthis);
6600 if (constrained_call) {
6602 * We have the `constrained.' prefix opcode.
6604 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6606 * The type parameter is instantiated as a valuetype,
6607 * but that type doesn't override the method we're
6608 * calling, so we need to box `this'.
6610 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6611 ins->klass = constrained_call;
6612 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6613 CHECK_CFG_EXCEPTION;
6614 } else if (!constrained_call->valuetype) {
6615 int dreg = alloc_preg (cfg);
6618 * The type parameter is instantiated as a reference
6619 * type. We have a managed pointer on the stack, so
6620 * we need to dereference it here.
6622 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6623 ins->type = STACK_OBJ;
6625 } else if (cmethod->klass->valuetype)
6627 constrained_call = NULL;
6630 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6634 * If the callee is a shared method, then its static cctor
6635 * might not get called after the call was patched.
6637 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6638 emit_generic_class_init (cfg, cmethod->klass);
6639 CHECK_TYPELOAD (cmethod->klass);
6642 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6643 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6644 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6645 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6646 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6649 * Pass vtable iff target method might
6650 * be shared, which means that sharing
6651 * is enabled for its class and its
6652 * context is sharable (and it's not a
6655 if (sharing_enabled && context_sharable &&
6656 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6660 if (cmethod && mini_method_get_context (cmethod) &&
6661 mini_method_get_context (cmethod)->method_inst) {
6662 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6663 MonoGenericContext *context = mini_method_get_context (cmethod);
6664 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6666 g_assert (!pass_vtable);
6668 if (sharing_enabled && context_sharable)
6672 if (cfg->generic_sharing_context && cmethod) {
6673 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6675 context_used = mono_method_check_context_used (cmethod);
6677 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6678 /* Generic method interface
6679 calls are resolved via a
6680 helper function and don't
6682 if (!cmethod_context || !cmethod_context->method_inst)
6683 pass_imt_from_rgctx = TRUE;
6687 * If a shared method calls another
6688 * shared method then the caller must
6689 * have a generic sharing context
6690 * because the magic trampoline
6691 * requires it. FIXME: We shouldn't
6692 * have to force the vtable/mrgctx
6693 * variable here. Instead there
6694 * should be a flag in the cfg to
6695 * request a generic sharing context.
6698 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6699 mono_get_vtable_var (cfg);
6704 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6706 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6708 CHECK_TYPELOAD (cmethod->klass);
6709 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6714 g_assert (!vtable_arg);
6716 if (!cfg->compile_aot) {
6718 * emit_get_rgctx_method () calls mono_class_vtable () so check
6719 * for type load errors before.
6721 mono_class_setup_vtable (cmethod->klass);
6722 CHECK_TYPELOAD (cmethod->klass);
6725 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6727 /* !marshalbyref is needed to properly handle generic methods + remoting */
6728 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6729 MONO_METHOD_IS_FINAL (cmethod)) &&
6730 !cmethod->klass->marshalbyref) {
6737 if (pass_imt_from_rgctx) {
6738 g_assert (!pass_vtable);
6741 imt_arg = emit_get_rgctx_method (cfg, context_used,
6742 cmethod, MONO_RGCTX_INFO_METHOD);
6746 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6748 /* Calling virtual generic methods */
6749 if (cmethod && virtual &&
6750 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6751 !(MONO_METHOD_IS_FINAL (cmethod) &&
6752 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6753 mono_method_signature (cmethod)->generic_param_count) {
6754 MonoInst *this_temp, *this_arg_temp, *store;
6755 MonoInst *iargs [4];
6757 g_assert (mono_method_signature (cmethod)->is_inflated);
6759 /* Prevent inlining of methods that contain indirect calls */
6762 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6763 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6764 g_assert (!imt_arg);
6766 g_assert (cmethod->is_inflated);
6767 imt_arg = emit_get_rgctx_method (cfg, context_used,
6768 cmethod, MONO_RGCTX_INFO_METHOD);
6769 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6773 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6774 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6775 MONO_ADD_INS (bblock, store);
6777 /* FIXME: This should be a managed pointer */
6778 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6780 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6781 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6782 cmethod, MONO_RGCTX_INFO_METHOD);
6783 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6784 addr = mono_emit_jit_icall (cfg,
6785 mono_helper_compile_generic_method, iargs);
6787 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6789 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6792 if (!MONO_TYPE_IS_VOID (fsig->ret))
6793 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6795 CHECK_CFG_EXCEPTION;
6803 * Implement a workaround for the inherent races involved in locking:
6809 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6810 * try block, the Exit () won't be executed, see:
6811 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6812 * To work around this, we extend such try blocks to include the last x bytes
6813 * of the Monitor.Enter () call.
6815 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6816 MonoBasicBlock *tbb;
6818 GET_BBLOCK (cfg, tbb, ip + 5);
6820 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6821 * from Monitor.Enter like ArgumentNullException.
6823 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6824 /* Mark this bblock as needing to be extended */
6825 tbb->extend_try_block = TRUE;
6829 /* Conversion to a JIT intrinsic */
6830 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6832 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6833 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6838 CHECK_CFG_EXCEPTION;
6846 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6847 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6848 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6849 !g_list_find (dont_inline, cmethod)) {
6851 gboolean always = FALSE;
6853 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6854 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6855 /* Prevent inlining of methods that call wrappers */
6857 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6861 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6863 cfg->real_offset += 5;
6866 if (!MONO_TYPE_IS_VOID (fsig->ret))
6867 /* *sp is already set by inline_method */
6870 inline_costs += costs;
6876 inline_costs += 10 * num_calls++;
6878 /* Tail recursion elimination */
6879 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6880 gboolean has_vtargs = FALSE;
6883 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6886 /* keep it simple */
6887 for (i = fsig->param_count - 1; i >= 0; i--) {
6888 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6893 for (i = 0; i < n; ++i)
6894 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6895 MONO_INST_NEW (cfg, ins, OP_BR);
6896 MONO_ADD_INS (bblock, ins);
6897 tblock = start_bblock->out_bb [0];
6898 link_bblock (cfg, bblock, tblock);
6899 ins->inst_target_bb = tblock;
6900 start_new_bblock = 1;
6902 /* skip the CEE_RET, too */
6903 if (ip_in_bb (cfg, bblock, ip + 5))
6913 /* Generic sharing */
6914 /* FIXME: only do this for generic methods if
6915 they are not shared! */
6916 if (context_used && !imt_arg && !array_rank &&
6917 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6918 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6919 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6920 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6923 g_assert (cfg->generic_sharing_context && cmethod);
6927 * We are compiling a call to a
6928 * generic method from shared code,
6929 * which means that we have to look up
6930 * the method in the rgctx and do an
6933 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6936 /* Indirect calls */
6938 g_assert (!imt_arg);
6940 if (*ip == CEE_CALL)
6941 g_assert (context_used);
6942 else if (*ip == CEE_CALLI)
6943 g_assert (!vtable_arg);
6945 /* FIXME: what the hell is this??? */
6946 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6947 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6949 /* Prevent inlining of methods with indirect calls */
6954 int rgctx_reg = mono_alloc_preg (cfg);
6956 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6957 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6958 call = (MonoCallInst*)ins;
6959 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6961 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6963 * Instead of emitting an indirect call, emit a direct call
6964 * with the contents of the aotconst as the patch info.
6966 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6968 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6969 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6972 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6975 if (!MONO_TYPE_IS_VOID (fsig->ret))
6976 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6978 CHECK_CFG_EXCEPTION;
6989 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6990 MonoInst *val = sp [fsig->param_count];
6992 if (val->type == STACK_OBJ) {
6993 MonoInst *iargs [2];
6998 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7001 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7002 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7003 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7004 emit_write_barrier (cfg, addr, val, 0);
7005 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7006 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7008 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7011 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7012 if (!cmethod->klass->element_class->valuetype && !readonly)
7013 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7014 CHECK_TYPELOAD (cmethod->klass);
7017 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7020 g_assert_not_reached ();
7023 CHECK_CFG_EXCEPTION;
7030 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7032 if (!MONO_TYPE_IS_VOID (fsig->ret))
7033 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7035 CHECK_CFG_EXCEPTION;
7042 /* Tail prefix / tail call optimization */
7044 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7045 /* FIXME: runtime generic context pointer for jumps? */
7046 /* FIXME: handle this for generic sharing eventually */
7047 supported_tail_call = cmethod &&
7048 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7049 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7050 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7052 if (supported_tail_call) {
7055 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7058 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7060 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7061 /* Handle tail calls similarly to calls */
7062 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
7064 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7065 call->tail_call = TRUE;
7066 call->method = cmethod;
7067 call->signature = mono_method_signature (cmethod);
7070 * We implement tail calls by storing the actual arguments into the
7071 * argument variables, then emitting a CEE_JMP.
7073 for (i = 0; i < n; ++i) {
7074 /* Prevent argument from being register allocated */
7075 arg_array [i]->flags |= MONO_INST_VOLATILE;
7076 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7080 ins = (MonoInst*)call;
7081 ins->inst_p0 = cmethod;
7082 ins->inst_p1 = arg_array [0];
7083 MONO_ADD_INS (bblock, ins);
7084 link_bblock (cfg, bblock, end_bblock);
7085 start_new_bblock = 1;
7087 CHECK_CFG_EXCEPTION;
7092 // FIXME: Eliminate unreachable epilogs
7095 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7096 * only reachable from this call.
7098 GET_BBLOCK (cfg, tblock, ip);
7099 if (tblock == bblock || tblock->in_count == 0)
7107 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7109 } else if (imt_arg) {
7110 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
7112 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
7115 if (!MONO_TYPE_IS_VOID (fsig->ret))
7116 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7118 CHECK_CFG_EXCEPTION;
7125 if (cfg->method != method) {
7126 /* return from inlined method */
7128 * If in_count == 0, that means the ret is unreachable due to
7129 * being preceeded by a throw. In that case, inline_method () will
7130 * handle setting the return value
7131 * (test case: test_0_inline_throw ()).
7133 if (return_var && cfg->cbb->in_count) {
7137 //g_assert (returnvar != -1);
7138 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7139 cfg->ret_var_set = TRUE;
7143 MonoType *ret_type = mono_method_signature (method)->ret;
7147 * Place a seq point here too even through the IL stack is not
7148 * empty, so a step over on
7151 * will work correctly.
7153 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7154 MONO_ADD_INS (cfg->cbb, ins);
7157 g_assert (!return_var);
7161 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7164 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7167 if (!cfg->vret_addr) {
7170 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7172 EMIT_NEW_RETLOADA (cfg, ret_addr);
7174 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7175 ins->klass = mono_class_from_mono_type (ret_type);
7178 #ifdef MONO_ARCH_SOFT_FLOAT
7179 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7180 MonoInst *iargs [1];
7184 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7185 mono_arch_emit_setret (cfg, method, conv);
7187 mono_arch_emit_setret (cfg, method, *sp);
7190 mono_arch_emit_setret (cfg, method, *sp);
7195 if (sp != stack_start)
7197 MONO_INST_NEW (cfg, ins, OP_BR);
7199 ins->inst_target_bb = end_bblock;
7200 MONO_ADD_INS (bblock, ins);
7201 link_bblock (cfg, bblock, end_bblock);
7202 start_new_bblock = 1;
7206 MONO_INST_NEW (cfg, ins, OP_BR);
7208 target = ip + 1 + (signed char)(*ip);
7210 GET_BBLOCK (cfg, tblock, target);
7211 link_bblock (cfg, bblock, tblock);
7212 ins->inst_target_bb = tblock;
7213 if (sp != stack_start) {
7214 handle_stack_args (cfg, stack_start, sp - stack_start);
7216 CHECK_UNVERIFIABLE (cfg);
7218 MONO_ADD_INS (bblock, ins);
7219 start_new_bblock = 1;
7220 inline_costs += BRANCH_COST;
7234 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7236 target = ip + 1 + *(signed char*)ip;
7242 inline_costs += BRANCH_COST;
7246 MONO_INST_NEW (cfg, ins, OP_BR);
7249 target = ip + 4 + (gint32)read32(ip);
7251 GET_BBLOCK (cfg, tblock, target);
7252 link_bblock (cfg, bblock, tblock);
7253 ins->inst_target_bb = tblock;
7254 if (sp != stack_start) {
7255 handle_stack_args (cfg, stack_start, sp - stack_start);
7257 CHECK_UNVERIFIABLE (cfg);
7260 MONO_ADD_INS (bblock, ins);
7262 start_new_bblock = 1;
7263 inline_costs += BRANCH_COST;
7270 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7271 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7272 guint32 opsize = is_short ? 1 : 4;
7274 CHECK_OPSIZE (opsize);
7276 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7279 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7284 GET_BBLOCK (cfg, tblock, target);
7285 link_bblock (cfg, bblock, tblock);
7286 GET_BBLOCK (cfg, tblock, ip);
7287 link_bblock (cfg, bblock, tblock);
7289 if (sp != stack_start) {
7290 handle_stack_args (cfg, stack_start, sp - stack_start);
7291 CHECK_UNVERIFIABLE (cfg);
7294 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7295 cmp->sreg1 = sp [0]->dreg;
7296 type_from_op (cmp, sp [0], NULL);
7299 #if SIZEOF_REGISTER == 4
7300 if (cmp->opcode == OP_LCOMPARE_IMM) {
7301 /* Convert it to OP_LCOMPARE */
7302 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7303 ins->type = STACK_I8;
7304 ins->dreg = alloc_dreg (cfg, STACK_I8);
7306 MONO_ADD_INS (bblock, ins);
7307 cmp->opcode = OP_LCOMPARE;
7308 cmp->sreg2 = ins->dreg;
7311 MONO_ADD_INS (bblock, cmp);
7313 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7314 type_from_op (ins, sp [0], NULL);
7315 MONO_ADD_INS (bblock, ins);
7316 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7317 GET_BBLOCK (cfg, tblock, target);
7318 ins->inst_true_bb = tblock;
7319 GET_BBLOCK (cfg, tblock, ip);
7320 ins->inst_false_bb = tblock;
7321 start_new_bblock = 2;
7324 inline_costs += BRANCH_COST;
7339 MONO_INST_NEW (cfg, ins, *ip);
7341 target = ip + 4 + (gint32)read32(ip);
7347 inline_costs += BRANCH_COST;
7351 MonoBasicBlock **targets;
7352 MonoBasicBlock *default_bblock;
7353 MonoJumpInfoBBTable *table;
7354 int offset_reg = alloc_preg (cfg);
7355 int target_reg = alloc_preg (cfg);
7356 int table_reg = alloc_preg (cfg);
7357 int sum_reg = alloc_preg (cfg);
7358 gboolean use_op_switch;
7362 n = read32 (ip + 1);
7365 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7369 CHECK_OPSIZE (n * sizeof (guint32));
7370 target = ip + n * sizeof (guint32);
7372 GET_BBLOCK (cfg, default_bblock, target);
7373 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7375 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7376 for (i = 0; i < n; ++i) {
7377 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7378 targets [i] = tblock;
7379 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7383 if (sp != stack_start) {
7385 * Link the current bb with the targets as well, so handle_stack_args
7386 * will set their in_stack correctly.
7388 link_bblock (cfg, bblock, default_bblock);
7389 for (i = 0; i < n; ++i)
7390 link_bblock (cfg, bblock, targets [i]);
7392 handle_stack_args (cfg, stack_start, sp - stack_start);
7394 CHECK_UNVERIFIABLE (cfg);
7397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7398 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7401 for (i = 0; i < n; ++i)
7402 link_bblock (cfg, bblock, targets [i]);
7404 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7405 table->table = targets;
7406 table->table_size = n;
7408 use_op_switch = FALSE;
7410 /* ARM implements SWITCH statements differently */
7411 /* FIXME: Make it use the generic implementation */
7412 if (!cfg->compile_aot)
7413 use_op_switch = TRUE;
7416 if (COMPILE_LLVM (cfg))
7417 use_op_switch = TRUE;
7419 cfg->cbb->has_jump_table = 1;
7421 if (use_op_switch) {
7422 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7423 ins->sreg1 = src1->dreg;
7424 ins->inst_p0 = table;
7425 ins->inst_many_bb = targets;
7426 ins->klass = GUINT_TO_POINTER (n);
7427 MONO_ADD_INS (cfg->cbb, ins);
7429 if (sizeof (gpointer) == 8)
7430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7434 #if SIZEOF_REGISTER == 8
7435 /* The upper word might not be zero, and we add it to a 64 bit address later */
7436 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7439 if (cfg->compile_aot) {
7440 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7442 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7443 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7444 ins->inst_p0 = table;
7445 ins->dreg = table_reg;
7446 MONO_ADD_INS (cfg->cbb, ins);
7449 /* FIXME: Use load_memindex */
7450 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7451 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7452 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7454 start_new_bblock = 1;
7455 inline_costs += (BRANCH_COST * 2);
7475 dreg = alloc_freg (cfg);
7478 dreg = alloc_lreg (cfg);
7481 dreg = alloc_preg (cfg);
7484 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7485 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7486 ins->flags |= ins_flag;
7488 MONO_ADD_INS (bblock, ins);
7503 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7504 ins->flags |= ins_flag;
7506 MONO_ADD_INS (bblock, ins);
7508 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7509 emit_write_barrier (cfg, sp [0], sp [1], -1);
7518 MONO_INST_NEW (cfg, ins, (*ip));
7520 ins->sreg1 = sp [0]->dreg;
7521 ins->sreg2 = sp [1]->dreg;
7522 type_from_op (ins, sp [0], sp [1]);
7524 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7526 /* Use the immediate opcodes if possible */
7527 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7528 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7529 if (imm_opcode != -1) {
7530 ins->opcode = imm_opcode;
7531 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7534 sp [1]->opcode = OP_NOP;
7538 MONO_ADD_INS ((cfg)->cbb, (ins));
7540 *sp++ = mono_decompose_opcode (cfg, ins);
7557 MONO_INST_NEW (cfg, ins, (*ip));
7559 ins->sreg1 = sp [0]->dreg;
7560 ins->sreg2 = sp [1]->dreg;
7561 type_from_op (ins, sp [0], sp [1]);
7563 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7564 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7566 /* FIXME: Pass opcode to is_inst_imm */
7568 /* Use the immediate opcodes if possible */
7569 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7572 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7573 if (imm_opcode != -1) {
7574 ins->opcode = imm_opcode;
7575 if (sp [1]->opcode == OP_I8CONST) {
7576 #if SIZEOF_REGISTER == 8
7577 ins->inst_imm = sp [1]->inst_l;
7579 ins->inst_ls_word = sp [1]->inst_ls_word;
7580 ins->inst_ms_word = sp [1]->inst_ms_word;
7584 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7587 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7588 if (sp [1]->next == NULL)
7589 sp [1]->opcode = OP_NOP;
7592 MONO_ADD_INS ((cfg)->cbb, (ins));
7594 *sp++ = mono_decompose_opcode (cfg, ins);
7607 case CEE_CONV_OVF_I8:
7608 case CEE_CONV_OVF_U8:
7612 /* Special case this earlier so we have long constants in the IR */
7613 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7614 int data = sp [-1]->inst_c0;
7615 sp [-1]->opcode = OP_I8CONST;
7616 sp [-1]->type = STACK_I8;
7617 #if SIZEOF_REGISTER == 8
7618 if ((*ip) == CEE_CONV_U8)
7619 sp [-1]->inst_c0 = (guint32)data;
7621 sp [-1]->inst_c0 = data;
7623 sp [-1]->inst_ls_word = data;
7624 if ((*ip) == CEE_CONV_U8)
7625 sp [-1]->inst_ms_word = 0;
7627 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7629 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7636 case CEE_CONV_OVF_I4:
7637 case CEE_CONV_OVF_I1:
7638 case CEE_CONV_OVF_I2:
7639 case CEE_CONV_OVF_I:
7640 case CEE_CONV_OVF_U:
7643 if (sp [-1]->type == STACK_R8) {
7644 ADD_UNOP (CEE_CONV_OVF_I8);
7651 case CEE_CONV_OVF_U1:
7652 case CEE_CONV_OVF_U2:
7653 case CEE_CONV_OVF_U4:
7656 if (sp [-1]->type == STACK_R8) {
7657 ADD_UNOP (CEE_CONV_OVF_U8);
7664 case CEE_CONV_OVF_I1_UN:
7665 case CEE_CONV_OVF_I2_UN:
7666 case CEE_CONV_OVF_I4_UN:
7667 case CEE_CONV_OVF_I8_UN:
7668 case CEE_CONV_OVF_U1_UN:
7669 case CEE_CONV_OVF_U2_UN:
7670 case CEE_CONV_OVF_U4_UN:
7671 case CEE_CONV_OVF_U8_UN:
7672 case CEE_CONV_OVF_I_UN:
7673 case CEE_CONV_OVF_U_UN:
7680 CHECK_CFG_EXCEPTION;
7684 case CEE_ADD_OVF_UN:
7686 case CEE_MUL_OVF_UN:
7688 case CEE_SUB_OVF_UN:
7696 token = read32 (ip + 1);
7697 klass = mini_get_class (method, token, generic_context);
7698 CHECK_TYPELOAD (klass);
7700 if (generic_class_is_reference_type (cfg, klass)) {
7701 MonoInst *store, *load;
7702 int dreg = alloc_preg (cfg);
7704 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7705 load->flags |= ins_flag;
7706 MONO_ADD_INS (cfg->cbb, load);
7708 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7709 store->flags |= ins_flag;
7710 MONO_ADD_INS (cfg->cbb, store);
7712 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7713 emit_write_barrier (cfg, sp [0], sp [1], -1);
7715 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7727 token = read32 (ip + 1);
7728 klass = mini_get_class (method, token, generic_context);
7729 CHECK_TYPELOAD (klass);
7731 /* Optimize the common ldobj+stloc combination */
7741 loc_index = ip [5] - CEE_STLOC_0;
7748 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7749 CHECK_LOCAL (loc_index);
7751 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7752 ins->dreg = cfg->locals [loc_index]->dreg;
7758 /* Optimize the ldobj+stobj combination */
7759 /* The reference case ends up being a load+store anyway */
7760 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7765 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7772 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7781 CHECK_STACK_OVF (1);
7783 n = read32 (ip + 1);
7785 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7786 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7787 ins->type = STACK_OBJ;
7790 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7791 MonoInst *iargs [1];
7793 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7794 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7796 if (cfg->opt & MONO_OPT_SHARED) {
7797 MonoInst *iargs [3];
7799 if (cfg->compile_aot) {
7800 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7802 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7803 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7804 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7805 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7806 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7808 if (bblock->out_of_line) {
7809 MonoInst *iargs [2];
7811 if (image == mono_defaults.corlib) {
7813 * Avoid relocations in AOT and save some space by using a
7814 * version of helper_ldstr specialized to mscorlib.
7816 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7817 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7819 /* Avoid creating the string object */
7820 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7821 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7822 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7826 if (cfg->compile_aot) {
7827 NEW_LDSTRCONST (cfg, ins, image, n);
7829 MONO_ADD_INS (bblock, ins);
7832 NEW_PCONST (cfg, ins, NULL);
7833 ins->type = STACK_OBJ;
7834 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7836 OUT_OF_MEMORY_FAILURE;
7839 MONO_ADD_INS (bblock, ins);
7848 MonoInst *iargs [2];
7849 MonoMethodSignature *fsig;
7852 MonoInst *vtable_arg = NULL;
7855 token = read32 (ip + 1);
7856 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7857 if (!cmethod || mono_loader_get_last_error ())
7859 fsig = mono_method_get_signature (cmethod, image, token);
7863 mono_save_token_info (cfg, image, token, cmethod);
7865 if (!mono_class_init (cmethod->klass))
7868 if (cfg->generic_sharing_context)
7869 context_used = mono_method_check_context_used (cmethod);
7871 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7872 if (check_linkdemand (cfg, method, cmethod))
7874 CHECK_CFG_EXCEPTION;
7875 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7876 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7879 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7880 emit_generic_class_init (cfg, cmethod->klass);
7881 CHECK_TYPELOAD (cmethod->klass);
7884 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7885 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7886 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7887 mono_class_vtable (cfg->domain, cmethod->klass);
7888 CHECK_TYPELOAD (cmethod->klass);
7890 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7891 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7894 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7895 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7897 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7899 CHECK_TYPELOAD (cmethod->klass);
7900 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7905 n = fsig->param_count;
7909 * Generate smaller code for the common newobj <exception> instruction in
7910 * argument checking code.
7912 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7913 is_exception_class (cmethod->klass) && n <= 2 &&
7914 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7915 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7916 MonoInst *iargs [3];
7918 g_assert (!vtable_arg);
7922 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7925 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7929 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7934 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7937 g_assert_not_reached ();
7945 /* move the args to allow room for 'this' in the first position */
7951 /* check_call_signature () requires sp[0] to be set */
7952 this_ins.type = STACK_OBJ;
7954 if (check_call_signature (cfg, fsig, sp))
7959 if (mini_class_is_system_array (cmethod->klass)) {
7960 g_assert (!vtable_arg);
7962 *sp = emit_get_rgctx_method (cfg, context_used,
7963 cmethod, MONO_RGCTX_INFO_METHOD);
7965 /* Avoid varargs in the common case */
7966 if (fsig->param_count == 1)
7967 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7968 else if (fsig->param_count == 2)
7969 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7970 else if (fsig->param_count == 3)
7971 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7973 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7974 } else if (cmethod->string_ctor) {
7975 g_assert (!context_used);
7976 g_assert (!vtable_arg);
7977 /* we simply pass a null pointer */
7978 EMIT_NEW_PCONST (cfg, *sp, NULL);
7979 /* now call the string ctor */
7980 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7982 MonoInst* callvirt_this_arg = NULL;
7984 if (cmethod->klass->valuetype) {
7985 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7986 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7987 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7992 * The code generated by mini_emit_virtual_call () expects
7993 * iargs [0] to be a boxed instance, but luckily the vcall
7994 * will be transformed into a normal call there.
7996 } else if (context_used) {
7997 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8000 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8002 CHECK_TYPELOAD (cmethod->klass);
8005 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8006 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8007 * As a workaround, we call class cctors before allocating objects.
8009 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8010 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8011 if (cfg->verbose_level > 2)
8012 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8013 class_inits = g_slist_prepend (class_inits, vtable);
8016 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8019 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8022 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8024 /* Now call the actual ctor */
8025 /* Avoid virtual calls to ctors if possible */
8026 if (cmethod->klass->marshalbyref)
8027 callvirt_this_arg = sp [0];
8030 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8031 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8032 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8037 CHECK_CFG_EXCEPTION;
8038 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8039 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8040 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8041 !g_list_find (dont_inline, cmethod)) {
8044 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8045 cfg->real_offset += 5;
8048 inline_costs += costs - 5;
8051 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
8053 } else if (context_used &&
8054 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8055 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8056 MonoInst *cmethod_addr;
8058 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8059 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8061 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8064 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
8065 callvirt_this_arg, NULL, vtable_arg);
8069 if (alloc == NULL) {
8071 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8072 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8086 token = read32 (ip + 1);
8087 klass = mini_get_class (method, token, generic_context);
8088 CHECK_TYPELOAD (klass);
8089 if (sp [0]->type != STACK_OBJ)
8092 if (cfg->generic_sharing_context)
8093 context_used = mono_class_check_context_used (klass);
8095 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8096 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8103 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8106 /*FIXME AOT support*/
8107 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8109 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8110 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8113 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8114 MonoMethod *mono_castclass;
8115 MonoInst *iargs [1];
8118 mono_castclass = mono_marshal_get_castclass (klass);
8121 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8122 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8123 CHECK_CFG_EXCEPTION;
8124 g_assert (costs > 0);
8127 cfg->real_offset += 5;
8132 inline_costs += costs;
8135 ins = handle_castclass (cfg, klass, *sp, context_used);
8136 CHECK_CFG_EXCEPTION;
8146 token = read32 (ip + 1);
8147 klass = mini_get_class (method, token, generic_context);
8148 CHECK_TYPELOAD (klass);
8149 if (sp [0]->type != STACK_OBJ)
8152 if (cfg->generic_sharing_context)
8153 context_used = mono_class_check_context_used (klass);
8155 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8156 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8163 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8166 /*FIXME AOT support*/
8167 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8169 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8172 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8173 MonoMethod *mono_isinst;
8174 MonoInst *iargs [1];
8177 mono_isinst = mono_marshal_get_isinst (klass);
8180 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8181 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8182 CHECK_CFG_EXCEPTION;
8183 g_assert (costs > 0);
8186 cfg->real_offset += 5;
8191 inline_costs += costs;
8194 ins = handle_isinst (cfg, klass, *sp, context_used);
8195 CHECK_CFG_EXCEPTION;
8202 case CEE_UNBOX_ANY: {
8206 token = read32 (ip + 1);
8207 klass = mini_get_class (method, token, generic_context);
8208 CHECK_TYPELOAD (klass);
8210 mono_save_token_info (cfg, image, token, klass);
8212 if (cfg->generic_sharing_context)
8213 context_used = mono_class_check_context_used (klass);
8215 if (generic_class_is_reference_type (cfg, klass)) {
8216 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8217 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8218 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8225 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8228 /*FIXME AOT support*/
8229 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8231 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8232 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8235 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8236 MonoMethod *mono_castclass;
8237 MonoInst *iargs [1];
8240 mono_castclass = mono_marshal_get_castclass (klass);
8243 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8244 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8245 CHECK_CFG_EXCEPTION;
8246 g_assert (costs > 0);
8249 cfg->real_offset += 5;
8253 inline_costs += costs;
8255 ins = handle_castclass (cfg, klass, *sp, context_used);
8256 CHECK_CFG_EXCEPTION;
8264 if (mono_class_is_nullable (klass)) {
8265 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8272 ins = handle_unbox (cfg, klass, sp, context_used);
8278 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8291 token = read32 (ip + 1);
8292 klass = mini_get_class (method, token, generic_context);
8293 CHECK_TYPELOAD (klass);
8295 mono_save_token_info (cfg, image, token, klass);
8297 if (cfg->generic_sharing_context)
8298 context_used = mono_class_check_context_used (klass);
8300 if (generic_class_is_reference_type (cfg, klass)) {
8306 if (klass == mono_defaults.void_class)
8308 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8310 /* frequent check in generic code: box (struct), brtrue */
8312 // FIXME: LLVM can't handle the inconsistent bb linking
8313 if (!mono_class_is_nullable (klass) &&
8314 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8315 (ip [5] == CEE_BRTRUE ||
8316 ip [5] == CEE_BRTRUE_S ||
8317 ip [5] == CEE_BRFALSE ||
8318 ip [5] == CEE_BRFALSE_S)) {
8319 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8321 MonoBasicBlock *true_bb, *false_bb;
8325 if (cfg->verbose_level > 3) {
8326 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8327 printf ("<box+brtrue opt>\n");
8335 target = ip + 1 + (signed char)(*ip);
8342 target = ip + 4 + (gint)(read32 (ip));
8346 g_assert_not_reached ();
8350 * We need to link both bblocks, since it is needed for handling stack
8351 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8352 * Branching to only one of them would lead to inconsistencies, so
8353 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8355 GET_BBLOCK (cfg, true_bb, target);
8356 GET_BBLOCK (cfg, false_bb, ip);
8358 mono_link_bblock (cfg, cfg->cbb, true_bb);
8359 mono_link_bblock (cfg, cfg->cbb, false_bb);
8361 if (sp != stack_start) {
8362 handle_stack_args (cfg, stack_start, sp - stack_start);
8364 CHECK_UNVERIFIABLE (cfg);
8367 if (COMPILE_LLVM (cfg)) {
8368 dreg = alloc_ireg (cfg);
8369 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8370 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8372 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8374 /* The JIT can't eliminate the iconst+compare */
8375 MONO_INST_NEW (cfg, ins, OP_BR);
8376 ins->inst_target_bb = is_true ? true_bb : false_bb;
8377 MONO_ADD_INS (cfg->cbb, ins);
8380 start_new_bblock = 1;
8384 *sp++ = handle_box (cfg, val, klass, context_used);
8386 CHECK_CFG_EXCEPTION;
8395 token = read32 (ip + 1);
8396 klass = mini_get_class (method, token, generic_context);
8397 CHECK_TYPELOAD (klass);
8399 mono_save_token_info (cfg, image, token, klass);
8401 if (cfg->generic_sharing_context)
8402 context_used = mono_class_check_context_used (klass);
8404 if (mono_class_is_nullable (klass)) {
8407 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8408 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8412 ins = handle_unbox (cfg, klass, sp, context_used);
8422 MonoClassField *field;
8426 if (*ip == CEE_STFLD) {
8433 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8435 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8438 token = read32 (ip + 1);
8439 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8440 field = mono_method_get_wrapper_data (method, token);
8441 klass = field->parent;
8444 field = mono_field_from_token (image, token, &klass, generic_context);
8448 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8449 FIELD_ACCESS_FAILURE;
8450 mono_class_init (klass);
8452 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8453 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8454 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8455 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8458 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8459 if (*ip == CEE_STFLD) {
8460 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8462 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8463 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8464 MonoInst *iargs [5];
8467 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8468 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8469 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8473 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8474 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8475 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8476 CHECK_CFG_EXCEPTION;
8477 g_assert (costs > 0);
8479 cfg->real_offset += 5;
8482 inline_costs += costs;
8484 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8489 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8491 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8492 if (sp [0]->opcode != OP_LDADDR)
8493 store->flags |= MONO_INST_FAULT;
8495 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8496 /* insert call to write barrier */
8500 dreg = alloc_preg (cfg);
8501 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8502 emit_write_barrier (cfg, ptr, sp [1], -1);
8505 store->flags |= ins_flag;
8512 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8513 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8514 MonoInst *iargs [4];
8517 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8518 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8519 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8520 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8521 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8522 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8523 CHECK_CFG_EXCEPTION;
8525 g_assert (costs > 0);
8527 cfg->real_offset += 5;
8531 inline_costs += costs;
8533 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8537 if (sp [0]->type == STACK_VTYPE) {
8540 /* Have to compute the address of the variable */
8542 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8544 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8546 g_assert (var->klass == klass);
8548 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8552 if (*ip == CEE_LDFLDA) {
8553 if (sp [0]->type == STACK_OBJ) {
8554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8555 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8558 dreg = alloc_preg (cfg);
8560 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8561 ins->klass = mono_class_from_mono_type (field->type);
8562 ins->type = STACK_MP;
8567 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8569 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8570 load->flags |= ins_flag;
8571 if (sp [0]->opcode != OP_LDADDR)
8572 load->flags |= MONO_INST_FAULT;
8583 MonoClassField *field;
8584 gpointer addr = NULL;
8585 gboolean is_special_static;
8589 token = read32 (ip + 1);
8591 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8592 field = mono_method_get_wrapper_data (method, token);
8593 klass = field->parent;
8596 field = mono_field_from_token (image, token, &klass, generic_context);
8599 mono_class_init (klass);
8600 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8601 FIELD_ACCESS_FAILURE;
8603 /* if the class is Critical then transparent code cannot access it's fields */
8604 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8605 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8608 * We can only support shared generic static
8609 * field access on architectures where the
8610 * trampoline code has been extended to handle
8611 * the generic class init.
8613 #ifndef MONO_ARCH_VTABLE_REG
8614 GENERIC_SHARING_FAILURE (*ip);
8617 if (cfg->generic_sharing_context)
8618 context_used = mono_class_check_context_used (klass);
8620 ftype = mono_field_get_type (field);
8622 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8624 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8625 * to be called here.
8627 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8628 mono_class_vtable (cfg->domain, klass);
8629 CHECK_TYPELOAD (klass);
8631 mono_domain_lock (cfg->domain);
8632 if (cfg->domain->special_static_fields)
8633 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8634 mono_domain_unlock (cfg->domain);
8636 is_special_static = mono_class_field_is_special_static (field);
8638 /* Generate IR to compute the field address */
8639 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8641 * Fast access to TLS data
8642 * Inline version of get_thread_static_data () in
8646 int idx, static_data_reg, array_reg, dreg;
8647 MonoInst *thread_ins;
8649 // offset &= 0x7fffffff;
8650 // idx = (offset >> 24) - 1;
8651 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8653 thread_ins = mono_get_thread_intrinsic (cfg);
8654 MONO_ADD_INS (cfg->cbb, thread_ins);
8655 static_data_reg = alloc_ireg (cfg);
8656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8658 if (cfg->compile_aot) {
8659 int offset_reg, offset2_reg, idx_reg;
8661 /* For TLS variables, this will return the TLS offset */
8662 EMIT_NEW_SFLDACONST (cfg, ins, field);
8663 offset_reg = ins->dreg;
8664 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8665 idx_reg = alloc_ireg (cfg);
8666 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8667 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8668 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8669 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8670 array_reg = alloc_ireg (cfg);
8671 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8672 offset2_reg = alloc_ireg (cfg);
8673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8674 dreg = alloc_ireg (cfg);
8675 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8677 offset = (gsize)addr & 0x7fffffff;
8678 idx = (offset >> 24) - 1;
8680 array_reg = alloc_ireg (cfg);
8681 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8682 dreg = alloc_ireg (cfg);
8683 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8685 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8686 (cfg->compile_aot && is_special_static) ||
8687 (context_used && is_special_static)) {
8688 MonoInst *iargs [2];
8690 g_assert (field->parent);
8691 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8693 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8694 field, MONO_RGCTX_INFO_CLASS_FIELD);
8696 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8698 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8699 } else if (context_used) {
8700 MonoInst *static_data;
8703 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8704 method->klass->name_space, method->klass->name, method->name,
8705 depth, field->offset);
8708 if (mono_class_needs_cctor_run (klass, method))
8709 emit_generic_class_init (cfg, klass);
8712 * The pointer we're computing here is
8714 * super_info.static_data + field->offset
8716 static_data = emit_get_rgctx_klass (cfg, context_used,
8717 klass, MONO_RGCTX_INFO_STATIC_DATA);
8719 if (field->offset == 0) {
8722 int addr_reg = mono_alloc_preg (cfg);
8723 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8725 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8726 MonoInst *iargs [2];
8728 g_assert (field->parent);
8729 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8730 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8731 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8733 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8735 CHECK_TYPELOAD (klass);
8737 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8738 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8739 if (cfg->verbose_level > 2)
8740 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8741 class_inits = g_slist_prepend (class_inits, vtable);
8743 if (cfg->run_cctors) {
8745 /* This makes so that inline cannot trigger */
8746 /* .cctors: too many apps depend on them */
8747 /* running with a specific order... */
8748 if (! vtable->initialized)
8750 ex = mono_runtime_class_init_full (vtable, FALSE);
8752 set_exception_object (cfg, ex);
8753 goto exception_exit;
8757 addr = (char*)vtable->data + field->offset;
8759 if (cfg->compile_aot)
8760 EMIT_NEW_SFLDACONST (cfg, ins, field);
8762 EMIT_NEW_PCONST (cfg, ins, addr);
8764 MonoInst *iargs [1];
8765 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8766 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8770 /* Generate IR to do the actual load/store operation */
8772 if (*ip == CEE_LDSFLDA) {
8773 ins->klass = mono_class_from_mono_type (ftype);
8774 ins->type = STACK_PTR;
8776 } else if (*ip == CEE_STSFLD) {
8781 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8782 store->flags |= ins_flag;
8784 gboolean is_const = FALSE;
8785 MonoVTable *vtable = NULL;
8787 if (!context_used) {
8788 vtable = mono_class_vtable (cfg->domain, klass);
8789 CHECK_TYPELOAD (klass);
8791 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8792 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8793 gpointer addr = (char*)vtable->data + field->offset;
8794 int ro_type = ftype->type;
8795 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8796 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8798 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8801 case MONO_TYPE_BOOLEAN:
8803 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8807 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8810 case MONO_TYPE_CHAR:
8812 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8816 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8821 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8825 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8831 case MONO_TYPE_FNPTR:
8832 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8833 type_to_eval_stack_type ((cfg), field->type, *sp);
8836 case MONO_TYPE_STRING:
8837 case MONO_TYPE_OBJECT:
8838 case MONO_TYPE_CLASS:
8839 case MONO_TYPE_SZARRAY:
8840 case MONO_TYPE_ARRAY:
8841 if (!mono_gc_is_moving ()) {
8842 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8843 type_to_eval_stack_type ((cfg), field->type, *sp);
8851 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8856 case MONO_TYPE_VALUETYPE:
8866 CHECK_STACK_OVF (1);
8868 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8869 load->flags |= ins_flag;
8882 token = read32 (ip + 1);
8883 klass = mini_get_class (method, token, generic_context);
8884 CHECK_TYPELOAD (klass);
8885 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8886 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8887 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8888 generic_class_is_reference_type (cfg, klass)) {
8889 /* insert call to write barrier */
8890 emit_write_barrier (cfg, sp [0], sp [1], -1);
8902 const char *data_ptr;
8904 guint32 field_token;
8910 token = read32 (ip + 1);
8912 klass = mini_get_class (method, token, generic_context);
8913 CHECK_TYPELOAD (klass);
8915 if (cfg->generic_sharing_context)
8916 context_used = mono_class_check_context_used (klass);
8918 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8919 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8920 ins->sreg1 = sp [0]->dreg;
8921 ins->type = STACK_I4;
8922 ins->dreg = alloc_ireg (cfg);
8923 MONO_ADD_INS (cfg->cbb, ins);
8924 *sp = mono_decompose_opcode (cfg, ins);
8929 MonoClass *array_class = mono_array_class_get (klass, 1);
8930 /* FIXME: we cannot get a managed
8931 allocator because we can't get the
8932 open generic class's vtable. We
8933 have the same problem in
8934 handle_alloc(). This
8935 needs to be solved so that we can
8936 have managed allocs of shared
8939 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8940 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8942 MonoMethod *managed_alloc = NULL;
8944 /* FIXME: Decompose later to help abcrem */
8947 args [0] = emit_get_rgctx_klass (cfg, context_used,
8948 array_class, MONO_RGCTX_INFO_VTABLE);
8953 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8955 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8957 if (cfg->opt & MONO_OPT_SHARED) {
8958 /* Decompose now to avoid problems with references to the domainvar */
8959 MonoInst *iargs [3];
8961 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8962 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8965 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8967 /* Decompose later since it is needed by abcrem */
8968 MonoClass *array_type = mono_array_class_get (klass, 1);
8969 mono_class_vtable (cfg->domain, array_type);
8970 CHECK_TYPELOAD (array_type);
8972 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8973 ins->dreg = alloc_preg (cfg);
8974 ins->sreg1 = sp [0]->dreg;
8975 ins->inst_newa_class = klass;
8976 ins->type = STACK_OBJ;
8978 MONO_ADD_INS (cfg->cbb, ins);
8979 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8980 cfg->cbb->has_array_access = TRUE;
8982 /* Needed so mono_emit_load_get_addr () gets called */
8983 mono_get_got_var (cfg);
8993 * we inline/optimize the initialization sequence if possible.
8994 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8995 * for small sizes open code the memcpy
8996 * ensure the rva field is big enough
8998 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8999 MonoMethod *memcpy_method = get_memcpy_method ();
9000 MonoInst *iargs [3];
9001 int add_reg = alloc_preg (cfg);
9003 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9004 if (cfg->compile_aot) {
9005 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9007 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9009 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9010 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9019 if (sp [0]->type != STACK_OBJ)
9022 dreg = alloc_preg (cfg);
9023 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9024 ins->dreg = alloc_preg (cfg);
9025 ins->sreg1 = sp [0]->dreg;
9026 ins->type = STACK_I4;
9027 /* This flag will be inherited by the decomposition */
9028 ins->flags |= MONO_INST_FAULT;
9029 MONO_ADD_INS (cfg->cbb, ins);
9030 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9031 cfg->cbb->has_array_access = TRUE;
9039 if (sp [0]->type != STACK_OBJ)
9042 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9044 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9045 CHECK_TYPELOAD (klass);
9046 /* we need to make sure that this array is exactly the type it needs
9047 * to be for correctness. the wrappers are lax with their usage
9048 * so we need to ignore them here
9050 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9051 MonoClass *array_class = mono_array_class_get (klass, 1);
9052 mini_emit_check_array_type (cfg, sp [0], array_class);
9053 CHECK_TYPELOAD (array_class);
9057 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9072 case CEE_LDELEM_REF: {
9078 if (*ip == CEE_LDELEM) {
9080 token = read32 (ip + 1);
9081 klass = mini_get_class (method, token, generic_context);
9082 CHECK_TYPELOAD (klass);
9083 mono_class_init (klass);
9086 klass = array_access_to_klass (*ip);
9088 if (sp [0]->type != STACK_OBJ)
9091 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9093 if (sp [1]->opcode == OP_ICONST) {
9094 int array_reg = sp [0]->dreg;
9095 int index_reg = sp [1]->dreg;
9096 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9098 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9099 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9101 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9102 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9105 if (*ip == CEE_LDELEM)
9118 case CEE_STELEM_REF:
9125 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9127 if (*ip == CEE_STELEM) {
9129 token = read32 (ip + 1);
9130 klass = mini_get_class (method, token, generic_context);
9131 CHECK_TYPELOAD (klass);
9132 mono_class_init (klass);
9135 klass = array_access_to_klass (*ip);
9137 if (sp [0]->type != STACK_OBJ)
9140 /* storing a NULL doesn't need any of the complex checks in stelemref */
9141 if (generic_class_is_reference_type (cfg, klass) &&
9142 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9143 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9144 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9145 MonoInst *iargs [3];
9148 mono_class_setup_vtable (obj_array);
9149 g_assert (helper->slot);
9151 if (sp [0]->type != STACK_OBJ)
9153 if (sp [2]->type != STACK_OBJ)
9160 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9162 if (sp [1]->opcode == OP_ICONST) {
9163 int array_reg = sp [0]->dreg;
9164 int index_reg = sp [1]->dreg;
9165 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9167 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9168 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9170 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9171 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9175 if (*ip == CEE_STELEM)
9182 case CEE_CKFINITE: {
9186 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9187 ins->sreg1 = sp [0]->dreg;
9188 ins->dreg = alloc_freg (cfg);
9189 ins->type = STACK_R8;
9190 MONO_ADD_INS (bblock, ins);
9192 *sp++ = mono_decompose_opcode (cfg, ins);
9197 case CEE_REFANYVAL: {
9198 MonoInst *src_var, *src;
9200 int klass_reg = alloc_preg (cfg);
9201 int dreg = alloc_preg (cfg);
9204 MONO_INST_NEW (cfg, ins, *ip);
9207 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9208 CHECK_TYPELOAD (klass);
9209 mono_class_init (klass);
9211 if (cfg->generic_sharing_context)
9212 context_used = mono_class_check_context_used (klass);
9215 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9217 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9218 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9222 MonoInst *klass_ins;
9224 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9225 klass, MONO_RGCTX_INFO_KLASS);
9228 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9229 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9231 mini_emit_class_check (cfg, klass_reg, klass);
9233 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9234 ins->type = STACK_MP;
9239 case CEE_MKREFANY: {
9240 MonoInst *loc, *addr;
9243 MONO_INST_NEW (cfg, ins, *ip);
9246 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9247 CHECK_TYPELOAD (klass);
9248 mono_class_init (klass);
9250 if (cfg->generic_sharing_context)
9251 context_used = mono_class_check_context_used (klass);
9253 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9254 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9257 MonoInst *const_ins;
9258 int type_reg = alloc_preg (cfg);
9260 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9261 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9262 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9264 } else if (cfg->compile_aot) {
9265 int const_reg = alloc_preg (cfg);
9266 int type_reg = alloc_preg (cfg);
9268 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9270 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9271 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9273 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9274 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9276 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9278 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9279 ins->type = STACK_VTYPE;
9280 ins->klass = mono_defaults.typed_reference_class;
9287 MonoClass *handle_class;
9289 CHECK_STACK_OVF (1);
9292 n = read32 (ip + 1);
9294 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9295 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9296 handle = mono_method_get_wrapper_data (method, n);
9297 handle_class = mono_method_get_wrapper_data (method, n + 1);
9298 if (handle_class == mono_defaults.typehandle_class)
9299 handle = &((MonoClass*)handle)->byval_arg;
9302 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9306 mono_class_init (handle_class);
9307 if (cfg->generic_sharing_context) {
9308 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9309 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9310 /* This case handles ldtoken
9311 of an open type, like for
9314 } else if (handle_class == mono_defaults.typehandle_class) {
9315 /* If we get a MONO_TYPE_CLASS
9316 then we need to provide the
9318 instantiation of it. */
9319 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9322 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9323 } else if (handle_class == mono_defaults.fieldhandle_class)
9324 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9325 else if (handle_class == mono_defaults.methodhandle_class)
9326 context_used = mono_method_check_context_used (handle);
9328 g_assert_not_reached ();
9331 if ((cfg->opt & MONO_OPT_SHARED) &&
9332 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9333 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9334 MonoInst *addr, *vtvar, *iargs [3];
9335 int method_context_used;
9337 if (cfg->generic_sharing_context)
9338 method_context_used = mono_method_check_context_used (method);
9340 method_context_used = 0;
9342 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9344 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9345 EMIT_NEW_ICONST (cfg, iargs [1], n);
9346 if (method_context_used) {
9347 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9348 method, MONO_RGCTX_INFO_METHOD);
9349 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9351 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9352 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9354 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9356 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9358 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9360 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9361 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9362 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9363 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9364 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9365 MonoClass *tclass = mono_class_from_mono_type (handle);
9367 mono_class_init (tclass);
9369 ins = emit_get_rgctx_klass (cfg, context_used,
9370 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9371 } else if (cfg->compile_aot) {
9372 if (method->wrapper_type) {
9373 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9374 /* Special case for static synchronized wrappers */
9375 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9377 /* FIXME: n is not a normal token */
9378 cfg->disable_aot = TRUE;
9379 EMIT_NEW_PCONST (cfg, ins, NULL);
9382 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9385 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9387 ins->type = STACK_OBJ;
9388 ins->klass = cmethod->klass;
9391 MonoInst *addr, *vtvar;
9393 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9396 if (handle_class == mono_defaults.typehandle_class) {
9397 ins = emit_get_rgctx_klass (cfg, context_used,
9398 mono_class_from_mono_type (handle),
9399 MONO_RGCTX_INFO_TYPE);
9400 } else if (handle_class == mono_defaults.methodhandle_class) {
9401 ins = emit_get_rgctx_method (cfg, context_used,
9402 handle, MONO_RGCTX_INFO_METHOD);
9403 } else if (handle_class == mono_defaults.fieldhandle_class) {
9404 ins = emit_get_rgctx_field (cfg, context_used,
9405 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9407 g_assert_not_reached ();
9409 } else if (cfg->compile_aot) {
9410 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9412 EMIT_NEW_PCONST (cfg, ins, handle);
9414 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9415 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9416 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9426 MONO_INST_NEW (cfg, ins, OP_THROW);
9428 ins->sreg1 = sp [0]->dreg;
9430 bblock->out_of_line = TRUE;
9431 MONO_ADD_INS (bblock, ins);
9432 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9433 MONO_ADD_INS (bblock, ins);
9436 link_bblock (cfg, bblock, end_bblock);
9437 start_new_bblock = 1;
9439 case CEE_ENDFINALLY:
9440 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9441 MONO_ADD_INS (bblock, ins);
9443 start_new_bblock = 1;
9446 * Control will leave the method so empty the stack, otherwise
9447 * the next basic block will start with a nonempty stack.
9449 while (sp != stack_start) {
9457 if (*ip == CEE_LEAVE) {
9459 target = ip + 5 + (gint32)read32(ip + 1);
9462 target = ip + 2 + (signed char)(ip [1]);
9465 /* empty the stack */
9466 while (sp != stack_start) {
9471 * If this leave statement is in a catch block, check for a
9472 * pending exception, and rethrow it if necessary.
9473 * We avoid doing this in runtime invoke wrappers, since those are called
9474 * by native code which excepts the wrapper to catch all exceptions.
9476 for (i = 0; i < header->num_clauses; ++i) {
9477 MonoExceptionClause *clause = &header->clauses [i];
9480 * Use <= in the final comparison to handle clauses with multiple
9481 * leave statements, like in bug #78024.
9482 * The ordering of the exception clauses guarantees that we find the
9485 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9487 MonoBasicBlock *dont_throw;
9492 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9495 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9497 NEW_BBLOCK (cfg, dont_throw);
9500 * Currently, we always rethrow the abort exception, despite the
9501 * fact that this is not correct. See thread6.cs for an example.
9502 * But propagating the abort exception is more important than
9503 * getting the sematics right.
9505 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9506 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9507 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9509 MONO_START_BB (cfg, dont_throw);
9514 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9516 MonoExceptionClause *clause;
9518 for (tmp = handlers; tmp; tmp = tmp->next) {
9520 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9522 link_bblock (cfg, bblock, tblock);
9523 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9524 ins->inst_target_bb = tblock;
9525 ins->inst_eh_block = clause;
9526 MONO_ADD_INS (bblock, ins);
9527 bblock->has_call_handler = 1;
9528 if (COMPILE_LLVM (cfg)) {
9529 MonoBasicBlock *target_bb;
9532 * Link the finally bblock with the target, since it will
9533 * conceptually branch there.
9534 * FIXME: Have to link the bblock containing the endfinally.
9536 GET_BBLOCK (cfg, target_bb, target);
9537 link_bblock (cfg, tblock, target_bb);
9540 g_list_free (handlers);
9543 MONO_INST_NEW (cfg, ins, OP_BR);
9544 MONO_ADD_INS (bblock, ins);
9545 GET_BBLOCK (cfg, tblock, target);
9546 link_bblock (cfg, bblock, tblock);
9547 ins->inst_target_bb = tblock;
9548 start_new_bblock = 1;
9550 if (*ip == CEE_LEAVE)
9559 * Mono specific opcodes
9561 case MONO_CUSTOM_PREFIX: {
9563 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9567 case CEE_MONO_ICALL: {
9569 MonoJitICallInfo *info;
9571 token = read32 (ip + 2);
9572 func = mono_method_get_wrapper_data (method, token);
9573 info = mono_find_jit_icall_by_addr (func);
9576 CHECK_STACK (info->sig->param_count);
9577 sp -= info->sig->param_count;
9579 ins = mono_emit_jit_icall (cfg, info->func, sp);
9580 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9584 inline_costs += 10 * num_calls++;
9588 case CEE_MONO_LDPTR: {
9591 CHECK_STACK_OVF (1);
9593 token = read32 (ip + 2);
9595 ptr = mono_method_get_wrapper_data (method, token);
9596 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9597 MonoJitICallInfo *callinfo;
9598 const char *icall_name;
9600 icall_name = method->name + strlen ("__icall_wrapper_");
9601 g_assert (icall_name);
9602 callinfo = mono_find_jit_icall_by_name (icall_name);
9603 g_assert (callinfo);
9605 if (ptr == callinfo->func) {
9606 /* Will be transformed into an AOTCONST later */
9607 EMIT_NEW_PCONST (cfg, ins, ptr);
9613 /* FIXME: Generalize this */
9614 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9615 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9620 EMIT_NEW_PCONST (cfg, ins, ptr);
9623 inline_costs += 10 * num_calls++;
9624 /* Can't embed random pointers into AOT code */
9625 cfg->disable_aot = 1;
9628 case CEE_MONO_ICALL_ADDR: {
9629 MonoMethod *cmethod;
9632 CHECK_STACK_OVF (1);
9634 token = read32 (ip + 2);
9636 cmethod = mono_method_get_wrapper_data (method, token);
9638 if (cfg->compile_aot) {
9639 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9641 ptr = mono_lookup_internal_call (cmethod);
9643 EMIT_NEW_PCONST (cfg, ins, ptr);
9649 case CEE_MONO_VTADDR: {
9650 MonoInst *src_var, *src;
9656 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9657 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9662 case CEE_MONO_NEWOBJ: {
9663 MonoInst *iargs [2];
9665 CHECK_STACK_OVF (1);
9667 token = read32 (ip + 2);
9668 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9669 mono_class_init (klass);
9670 NEW_DOMAINCONST (cfg, iargs [0]);
9671 MONO_ADD_INS (cfg->cbb, iargs [0]);
9672 NEW_CLASSCONST (cfg, iargs [1], klass);
9673 MONO_ADD_INS (cfg->cbb, iargs [1]);
9674 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9676 inline_costs += 10 * num_calls++;
9679 case CEE_MONO_OBJADDR:
9682 MONO_INST_NEW (cfg, ins, OP_MOVE);
9683 ins->dreg = alloc_preg (cfg);
9684 ins->sreg1 = sp [0]->dreg;
9685 ins->type = STACK_MP;
9686 MONO_ADD_INS (cfg->cbb, ins);
9690 case CEE_MONO_LDNATIVEOBJ:
9692 * Similar to LDOBJ, but instead load the unmanaged
9693 * representation of the vtype to the stack.
9698 token = read32 (ip + 2);
9699 klass = mono_method_get_wrapper_data (method, token);
9700 g_assert (klass->valuetype);
9701 mono_class_init (klass);
9704 MonoInst *src, *dest, *temp;
9707 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9708 temp->backend.is_pinvoke = 1;
9709 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9710 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9712 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9713 dest->type = STACK_VTYPE;
9714 dest->klass = klass;
9720 case CEE_MONO_RETOBJ: {
9722 * Same as RET, but return the native representation of a vtype
9725 g_assert (cfg->ret);
9726 g_assert (mono_method_signature (method)->pinvoke);
9731 token = read32 (ip + 2);
9732 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9734 if (!cfg->vret_addr) {
9735 g_assert (cfg->ret_var_is_local);
9737 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9739 EMIT_NEW_RETLOADA (cfg, ins);
9741 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9743 if (sp != stack_start)
9746 MONO_INST_NEW (cfg, ins, OP_BR);
9747 ins->inst_target_bb = end_bblock;
9748 MONO_ADD_INS (bblock, ins);
9749 link_bblock (cfg, bblock, end_bblock);
9750 start_new_bblock = 1;
9754 case CEE_MONO_CISINST:
9755 case CEE_MONO_CCASTCLASS: {
9760 token = read32 (ip + 2);
9761 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9762 if (ip [1] == CEE_MONO_CISINST)
9763 ins = handle_cisinst (cfg, klass, sp [0]);
9765 ins = handle_ccastclass (cfg, klass, sp [0]);
9771 case CEE_MONO_SAVE_LMF:
9772 case CEE_MONO_RESTORE_LMF:
9773 #ifdef MONO_ARCH_HAVE_LMF_OPS
9774 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9775 MONO_ADD_INS (bblock, ins);
9776 cfg->need_lmf_area = TRUE;
9780 case CEE_MONO_CLASSCONST:
9781 CHECK_STACK_OVF (1);
9783 token = read32 (ip + 2);
9784 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9787 inline_costs += 10 * num_calls++;
9789 case CEE_MONO_NOT_TAKEN:
9790 bblock->out_of_line = TRUE;
9794 CHECK_STACK_OVF (1);
9796 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9797 ins->dreg = alloc_preg (cfg);
9798 ins->inst_offset = (gint32)read32 (ip + 2);
9799 ins->type = STACK_PTR;
9800 MONO_ADD_INS (bblock, ins);
9804 case CEE_MONO_DYN_CALL: {
9807 /* It would be easier to call a trampoline, but that would put an
9808 * extra frame on the stack, confusing exception handling. So
9809 * implement it inline using an opcode for now.
9812 if (!cfg->dyn_call_var) {
9813 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9814 /* prevent it from being register allocated */
9815 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9818 /* Has to use a call inst since it local regalloc expects it */
9819 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9820 ins = (MonoInst*)call;
9822 ins->sreg1 = sp [0]->dreg;
9823 ins->sreg2 = sp [1]->dreg;
9824 MONO_ADD_INS (bblock, ins);
9826 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9827 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9831 inline_costs += 10 * num_calls++;
9836 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9846 /* somewhat similar to LDTOKEN */
9847 MonoInst *addr, *vtvar;
9848 CHECK_STACK_OVF (1);
9849 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9851 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9852 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9854 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9855 ins->type = STACK_VTYPE;
9856 ins->klass = mono_defaults.argumenthandle_class;
9869 * The following transforms:
9870 * CEE_CEQ into OP_CEQ
9871 * CEE_CGT into OP_CGT
9872 * CEE_CGT_UN into OP_CGT_UN
9873 * CEE_CLT into OP_CLT
9874 * CEE_CLT_UN into OP_CLT_UN
9876 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9878 MONO_INST_NEW (cfg, ins, cmp->opcode);
9880 cmp->sreg1 = sp [0]->dreg;
9881 cmp->sreg2 = sp [1]->dreg;
9882 type_from_op (cmp, sp [0], sp [1]);
9884 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9885 cmp->opcode = OP_LCOMPARE;
9886 else if (sp [0]->type == STACK_R8)
9887 cmp->opcode = OP_FCOMPARE;
9889 cmp->opcode = OP_ICOMPARE;
9890 MONO_ADD_INS (bblock, cmp);
9891 ins->type = STACK_I4;
9892 ins->dreg = alloc_dreg (cfg, ins->type);
9893 type_from_op (ins, sp [0], sp [1]);
9895 if (cmp->opcode == OP_FCOMPARE) {
9897 * The backends expect the fceq opcodes to do the
9900 cmp->opcode = OP_NOP;
9901 ins->sreg1 = cmp->sreg1;
9902 ins->sreg2 = cmp->sreg2;
9904 MONO_ADD_INS (bblock, ins);
9911 MonoMethod *cil_method;
9912 gboolean needs_static_rgctx_invoke;
9914 CHECK_STACK_OVF (1);
9916 n = read32 (ip + 2);
9917 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9918 if (!cmethod || mono_loader_get_last_error ())
9920 mono_class_init (cmethod->klass);
9922 mono_save_token_info (cfg, image, n, cmethod);
9924 if (cfg->generic_sharing_context)
9925 context_used = mono_method_check_context_used (cmethod);
9927 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9929 cil_method = cmethod;
9930 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9931 METHOD_ACCESS_FAILURE;
9933 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9934 if (check_linkdemand (cfg, method, cmethod))
9936 CHECK_CFG_EXCEPTION;
9937 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9938 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9942 * Optimize the common case of ldftn+delegate creation
9944 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9945 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9946 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9947 MonoInst *target_ins;
9949 int invoke_context_used = 0;
9951 invoke = mono_get_delegate_invoke (ctor_method->klass);
9952 if (!invoke || !mono_method_signature (invoke))
9955 if (cfg->generic_sharing_context)
9956 invoke_context_used = mono_method_check_context_used (invoke);
9958 target_ins = sp [-1];
9960 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9961 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9962 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9963 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9964 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9968 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9969 /* FIXME: SGEN support */
9970 if (invoke_context_used == 0) {
9972 if (cfg->verbose_level > 3)
9973 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9975 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9976 CHECK_CFG_EXCEPTION;
9985 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9986 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9990 inline_costs += 10 * num_calls++;
9993 case CEE_LDVIRTFTN: {
9998 n = read32 (ip + 2);
9999 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10000 if (!cmethod || mono_loader_get_last_error ())
10002 mono_class_init (cmethod->klass);
10004 if (cfg->generic_sharing_context)
10005 context_used = mono_method_check_context_used (cmethod);
10007 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10008 if (check_linkdemand (cfg, method, cmethod))
10010 CHECK_CFG_EXCEPTION;
10011 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10012 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10018 args [1] = emit_get_rgctx_method (cfg, context_used,
10019 cmethod, MONO_RGCTX_INFO_METHOD);
10022 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10024 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10027 inline_costs += 10 * num_calls++;
10031 CHECK_STACK_OVF (1);
10033 n = read16 (ip + 2);
10035 EMIT_NEW_ARGLOAD (cfg, ins, n);
10040 CHECK_STACK_OVF (1);
10042 n = read16 (ip + 2);
10044 NEW_ARGLOADA (cfg, ins, n);
10045 MONO_ADD_INS (cfg->cbb, ins);
10053 n = read16 (ip + 2);
10055 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10057 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10061 CHECK_STACK_OVF (1);
10063 n = read16 (ip + 2);
10065 EMIT_NEW_LOCLOAD (cfg, ins, n);
10070 unsigned char *tmp_ip;
10071 CHECK_STACK_OVF (1);
10073 n = read16 (ip + 2);
10076 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10082 EMIT_NEW_LOCLOADA (cfg, ins, n);
10091 n = read16 (ip + 2);
10093 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10095 emit_stloc_ir (cfg, sp, header, n);
10102 if (sp != stack_start)
10104 if (cfg->method != method)
10106 * Inlining this into a loop in a parent could lead to
10107 * stack overflows which is different behavior than the
10108 * non-inlined case, thus disable inlining in this case.
10110 goto inline_failure;
10112 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10113 ins->dreg = alloc_preg (cfg);
10114 ins->sreg1 = sp [0]->dreg;
10115 ins->type = STACK_PTR;
10116 MONO_ADD_INS (cfg->cbb, ins);
10118 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10120 ins->flags |= MONO_INST_INIT;
10125 case CEE_ENDFILTER: {
10126 MonoExceptionClause *clause, *nearest;
10127 int cc, nearest_num;
10131 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10133 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10134 ins->sreg1 = (*sp)->dreg;
10135 MONO_ADD_INS (bblock, ins);
10136 start_new_bblock = 1;
10141 for (cc = 0; cc < header->num_clauses; ++cc) {
10142 clause = &header->clauses [cc];
10143 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10144 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10145 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10150 g_assert (nearest);
10151 if ((ip - header->code) != nearest->handler_offset)
10156 case CEE_UNALIGNED_:
10157 ins_flag |= MONO_INST_UNALIGNED;
10158 /* FIXME: record alignment? we can assume 1 for now */
10162 case CEE_VOLATILE_:
10163 ins_flag |= MONO_INST_VOLATILE;
10167 ins_flag |= MONO_INST_TAILCALL;
10168 cfg->flags |= MONO_CFG_HAS_TAIL;
10169 /* Can't inline tail calls at this time */
10170 inline_costs += 100000;
10177 token = read32 (ip + 2);
10178 klass = mini_get_class (method, token, generic_context);
10179 CHECK_TYPELOAD (klass);
10180 if (generic_class_is_reference_type (cfg, klass))
10181 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10183 mini_emit_initobj (cfg, *sp, NULL, klass);
10187 case CEE_CONSTRAINED_:
10189 token = read32 (ip + 2);
10190 if (method->wrapper_type != MONO_WRAPPER_NONE)
10191 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10193 constrained_call = mono_class_get_full (image, token, generic_context);
10194 CHECK_TYPELOAD (constrained_call);
10198 case CEE_INITBLK: {
10199 MonoInst *iargs [3];
10203 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10204 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10205 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10206 /* emit_memset only works when val == 0 */
10207 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10209 iargs [0] = sp [0];
10210 iargs [1] = sp [1];
10211 iargs [2] = sp [2];
10212 if (ip [1] == CEE_CPBLK) {
10213 MonoMethod *memcpy_method = get_memcpy_method ();
10214 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10216 MonoMethod *memset_method = get_memset_method ();
10217 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10227 ins_flag |= MONO_INST_NOTYPECHECK;
10229 ins_flag |= MONO_INST_NORANGECHECK;
10230 /* we ignore the no-nullcheck for now since we
10231 * really do it explicitly only when doing callvirt->call
10235 case CEE_RETHROW: {
10237 int handler_offset = -1;
10239 for (i = 0; i < header->num_clauses; ++i) {
10240 MonoExceptionClause *clause = &header->clauses [i];
10241 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10242 handler_offset = clause->handler_offset;
10247 bblock->flags |= BB_EXCEPTION_UNSAFE;
10249 g_assert (handler_offset != -1);
10251 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10252 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10253 ins->sreg1 = load->dreg;
10254 MONO_ADD_INS (bblock, ins);
10256 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10257 MONO_ADD_INS (bblock, ins);
10260 link_bblock (cfg, bblock, end_bblock);
10261 start_new_bblock = 1;
10269 CHECK_STACK_OVF (1);
10271 token = read32 (ip + 2);
10272 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10273 MonoType *type = mono_type_create_from_typespec (image, token);
10274 token = mono_type_size (type, &ialign);
10276 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10277 CHECK_TYPELOAD (klass);
10278 mono_class_init (klass);
10279 token = mono_class_value_size (klass, &align);
10281 EMIT_NEW_ICONST (cfg, ins, token);
10286 case CEE_REFANYTYPE: {
10287 MonoInst *src_var, *src;
10293 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10295 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10296 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10297 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10302 case CEE_READONLY_:
10315 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10325 g_warning ("opcode 0x%02x not handled", *ip);
10329 if (start_new_bblock != 1)
10332 bblock->cil_length = ip - bblock->cil_code;
10333 bblock->next_bb = end_bblock;
10335 if (cfg->method == method && cfg->domainvar) {
10337 MonoInst *get_domain;
10339 cfg->cbb = init_localsbb;
10341 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10342 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10345 get_domain->dreg = alloc_preg (cfg);
10346 MONO_ADD_INS (cfg->cbb, get_domain);
10348 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10349 MONO_ADD_INS (cfg->cbb, store);
10352 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10353 if (cfg->compile_aot)
10354 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10355 mono_get_got_var (cfg);
10358 if (cfg->method == method && cfg->got_var)
10359 mono_emit_load_got_addr (cfg);
10364 cfg->cbb = init_localsbb;
10366 for (i = 0; i < header->num_locals; ++i) {
10367 MonoType *ptype = header->locals [i];
10368 int t = ptype->type;
10369 dreg = cfg->locals [i]->dreg;
10371 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10372 t = mono_class_enum_basetype (ptype->data.klass)->type;
10373 if (ptype->byref) {
10374 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10375 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10376 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10377 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10378 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10379 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10380 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10381 ins->type = STACK_R8;
10382 ins->inst_p0 = (void*)&r8_0;
10383 ins->dreg = alloc_dreg (cfg, STACK_R8);
10384 MONO_ADD_INS (init_localsbb, ins);
10385 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10386 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10387 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10388 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10390 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10395 if (cfg->init_ref_vars && cfg->method == method) {
10396 /* Emit initialization for ref vars */
10397 // FIXME: Avoid duplication initialization for IL locals.
10398 for (i = 0; i < cfg->num_varinfo; ++i) {
10399 MonoInst *ins = cfg->varinfo [i];
10401 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10402 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10406 /* Add a sequence point for method entry/exit events */
10408 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10409 MONO_ADD_INS (init_localsbb, ins);
10410 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10411 MONO_ADD_INS (cfg->bb_exit, ins);
10416 if (cfg->method == method) {
10417 MonoBasicBlock *bb;
10418 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10419 bb->region = mono_find_block_region (cfg, bb->real_offset);
10421 mono_create_spvar_for_region (cfg, bb->region);
10422 if (cfg->verbose_level > 2)
10423 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10427 g_slist_free (class_inits);
10428 dont_inline = g_list_remove (dont_inline, method);
10430 if (inline_costs < 0) {
10433 /* Method is too large */
10434 mname = mono_method_full_name (method, TRUE);
10435 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10436 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10438 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10439 mono_basic_block_free (original_bb);
10443 if ((cfg->verbose_level > 2) && (cfg->method == method))
10444 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10446 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10447 mono_basic_block_free (original_bb);
10448 return inline_costs;
10451 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10458 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10462 set_exception_type_from_invalid_il (cfg, method, ip);
10466 g_slist_free (class_inits);
10467 mono_basic_block_free (original_bb);
10468 dont_inline = g_list_remove (dont_inline, method);
10469 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10474 store_membase_reg_to_store_membase_imm (int opcode)
10477 case OP_STORE_MEMBASE_REG:
10478 return OP_STORE_MEMBASE_IMM;
10479 case OP_STOREI1_MEMBASE_REG:
10480 return OP_STOREI1_MEMBASE_IMM;
10481 case OP_STOREI2_MEMBASE_REG:
10482 return OP_STOREI2_MEMBASE_IMM;
10483 case OP_STOREI4_MEMBASE_REG:
10484 return OP_STOREI4_MEMBASE_IMM;
10485 case OP_STOREI8_MEMBASE_REG:
10486 return OP_STOREI8_MEMBASE_IMM;
10488 g_assert_not_reached ();
10494 #endif /* DISABLE_JIT */
10497 mono_op_to_op_imm (int opcode)
10501 return OP_IADD_IMM;
10503 return OP_ISUB_IMM;
10505 return OP_IDIV_IMM;
10507 return OP_IDIV_UN_IMM;
10509 return OP_IREM_IMM;
10511 return OP_IREM_UN_IMM;
10513 return OP_IMUL_IMM;
10515 return OP_IAND_IMM;
10519 return OP_IXOR_IMM;
10521 return OP_ISHL_IMM;
10523 return OP_ISHR_IMM;
10525 return OP_ISHR_UN_IMM;
10528 return OP_LADD_IMM;
10530 return OP_LSUB_IMM;
10532 return OP_LAND_IMM;
10536 return OP_LXOR_IMM;
10538 return OP_LSHL_IMM;
10540 return OP_LSHR_IMM;
10542 return OP_LSHR_UN_IMM;
10545 return OP_COMPARE_IMM;
10547 return OP_ICOMPARE_IMM;
10549 return OP_LCOMPARE_IMM;
10551 case OP_STORE_MEMBASE_REG:
10552 return OP_STORE_MEMBASE_IMM;
10553 case OP_STOREI1_MEMBASE_REG:
10554 return OP_STOREI1_MEMBASE_IMM;
10555 case OP_STOREI2_MEMBASE_REG:
10556 return OP_STOREI2_MEMBASE_IMM;
10557 case OP_STOREI4_MEMBASE_REG:
10558 return OP_STOREI4_MEMBASE_IMM;
10560 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10562 return OP_X86_PUSH_IMM;
10563 case OP_X86_COMPARE_MEMBASE_REG:
10564 return OP_X86_COMPARE_MEMBASE_IMM;
10566 #if defined(TARGET_AMD64)
10567 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10568 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10570 case OP_VOIDCALL_REG:
10571 return OP_VOIDCALL;
10579 return OP_LOCALLOC_IMM;
10586 ldind_to_load_membase (int opcode)
10590 return OP_LOADI1_MEMBASE;
10592 return OP_LOADU1_MEMBASE;
10594 return OP_LOADI2_MEMBASE;
10596 return OP_LOADU2_MEMBASE;
10598 return OP_LOADI4_MEMBASE;
10600 return OP_LOADU4_MEMBASE;
10602 return OP_LOAD_MEMBASE;
10603 case CEE_LDIND_REF:
10604 return OP_LOAD_MEMBASE;
10606 return OP_LOADI8_MEMBASE;
10608 return OP_LOADR4_MEMBASE;
10610 return OP_LOADR8_MEMBASE;
10612 g_assert_not_reached ();
10619 stind_to_store_membase (int opcode)
10623 return OP_STOREI1_MEMBASE_REG;
10625 return OP_STOREI2_MEMBASE_REG;
10627 return OP_STOREI4_MEMBASE_REG;
10629 case CEE_STIND_REF:
10630 return OP_STORE_MEMBASE_REG;
10632 return OP_STOREI8_MEMBASE_REG;
10634 return OP_STORER4_MEMBASE_REG;
10636 return OP_STORER8_MEMBASE_REG;
10638 g_assert_not_reached ();
10645 mono_load_membase_to_load_mem (int opcode)
10647 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10648 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10650 case OP_LOAD_MEMBASE:
10651 return OP_LOAD_MEM;
10652 case OP_LOADU1_MEMBASE:
10653 return OP_LOADU1_MEM;
10654 case OP_LOADU2_MEMBASE:
10655 return OP_LOADU2_MEM;
10656 case OP_LOADI4_MEMBASE:
10657 return OP_LOADI4_MEM;
10658 case OP_LOADU4_MEMBASE:
10659 return OP_LOADU4_MEM;
10660 #if SIZEOF_REGISTER == 8
10661 case OP_LOADI8_MEMBASE:
10662 return OP_LOADI8_MEM;
10671 op_to_op_dest_membase (int store_opcode, int opcode)
10673 #if defined(TARGET_X86)
10674 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10679 return OP_X86_ADD_MEMBASE_REG;
10681 return OP_X86_SUB_MEMBASE_REG;
10683 return OP_X86_AND_MEMBASE_REG;
10685 return OP_X86_OR_MEMBASE_REG;
10687 return OP_X86_XOR_MEMBASE_REG;
10690 return OP_X86_ADD_MEMBASE_IMM;
10693 return OP_X86_SUB_MEMBASE_IMM;
10696 return OP_X86_AND_MEMBASE_IMM;
10699 return OP_X86_OR_MEMBASE_IMM;
10702 return OP_X86_XOR_MEMBASE_IMM;
10708 #if defined(TARGET_AMD64)
10709 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10714 return OP_X86_ADD_MEMBASE_REG;
10716 return OP_X86_SUB_MEMBASE_REG;
10718 return OP_X86_AND_MEMBASE_REG;
10720 return OP_X86_OR_MEMBASE_REG;
10722 return OP_X86_XOR_MEMBASE_REG;
10724 return OP_X86_ADD_MEMBASE_IMM;
10726 return OP_X86_SUB_MEMBASE_IMM;
10728 return OP_X86_AND_MEMBASE_IMM;
10730 return OP_X86_OR_MEMBASE_IMM;
10732 return OP_X86_XOR_MEMBASE_IMM;
10734 return OP_AMD64_ADD_MEMBASE_REG;
10736 return OP_AMD64_SUB_MEMBASE_REG;
10738 return OP_AMD64_AND_MEMBASE_REG;
10740 return OP_AMD64_OR_MEMBASE_REG;
10742 return OP_AMD64_XOR_MEMBASE_REG;
10745 return OP_AMD64_ADD_MEMBASE_IMM;
10748 return OP_AMD64_SUB_MEMBASE_IMM;
10751 return OP_AMD64_AND_MEMBASE_IMM;
10754 return OP_AMD64_OR_MEMBASE_IMM;
10757 return OP_AMD64_XOR_MEMBASE_IMM;
10767 op_to_op_store_membase (int store_opcode, int opcode)
10769 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10772 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10773 return OP_X86_SETEQ_MEMBASE;
10775 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10776 return OP_X86_SETNE_MEMBASE;
10784 op_to_op_src1_membase (int load_opcode, int opcode)
10787 /* FIXME: This has sign extension issues */
10789 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10790 return OP_X86_COMPARE_MEMBASE8_IMM;
10793 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10798 return OP_X86_PUSH_MEMBASE;
10799 case OP_COMPARE_IMM:
10800 case OP_ICOMPARE_IMM:
10801 return OP_X86_COMPARE_MEMBASE_IMM;
10804 return OP_X86_COMPARE_MEMBASE_REG;
10808 #ifdef TARGET_AMD64
10809 /* FIXME: This has sign extension issues */
10811 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10812 return OP_X86_COMPARE_MEMBASE8_IMM;
10817 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10818 return OP_X86_PUSH_MEMBASE;
10820 /* FIXME: This only works for 32 bit immediates
10821 case OP_COMPARE_IMM:
10822 case OP_LCOMPARE_IMM:
10823 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10824 return OP_AMD64_COMPARE_MEMBASE_IMM;
10826 case OP_ICOMPARE_IMM:
10827 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10828 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10832 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10833 return OP_AMD64_COMPARE_MEMBASE_REG;
10836 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10837 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10846 op_to_op_src2_membase (int load_opcode, int opcode)
10849 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10855 return OP_X86_COMPARE_REG_MEMBASE;
10857 return OP_X86_ADD_REG_MEMBASE;
10859 return OP_X86_SUB_REG_MEMBASE;
10861 return OP_X86_AND_REG_MEMBASE;
10863 return OP_X86_OR_REG_MEMBASE;
10865 return OP_X86_XOR_REG_MEMBASE;
10869 #ifdef TARGET_AMD64
10870 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10873 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10875 return OP_X86_ADD_REG_MEMBASE;
10877 return OP_X86_SUB_REG_MEMBASE;
10879 return OP_X86_AND_REG_MEMBASE;
10881 return OP_X86_OR_REG_MEMBASE;
10883 return OP_X86_XOR_REG_MEMBASE;
10885 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10889 return OP_AMD64_COMPARE_REG_MEMBASE;
10891 return OP_AMD64_ADD_REG_MEMBASE;
10893 return OP_AMD64_SUB_REG_MEMBASE;
10895 return OP_AMD64_AND_REG_MEMBASE;
10897 return OP_AMD64_OR_REG_MEMBASE;
10899 return OP_AMD64_XOR_REG_MEMBASE;
10908 mono_op_to_op_imm_noemul (int opcode)
10911 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10917 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10925 return mono_op_to_op_imm (opcode);
10929 #ifndef DISABLE_JIT
10932 * mono_handle_global_vregs:
10934 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10938 mono_handle_global_vregs (MonoCompile *cfg)
10940 gint32 *vreg_to_bb;
10941 MonoBasicBlock *bb;
10944 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10946 #ifdef MONO_ARCH_SIMD_INTRINSICS
10947 if (cfg->uses_simd_intrinsics)
10948 mono_simd_simplify_indirection (cfg);
10951 /* Find local vregs used in more than one bb */
10952 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10953 MonoInst *ins = bb->code;
10954 int block_num = bb->block_num;
10956 if (cfg->verbose_level > 2)
10957 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10960 for (; ins; ins = ins->next) {
10961 const char *spec = INS_INFO (ins->opcode);
10962 int regtype = 0, regindex;
10965 if (G_UNLIKELY (cfg->verbose_level > 2))
10966 mono_print_ins (ins);
10968 g_assert (ins->opcode >= MONO_CEE_LAST);
10970 for (regindex = 0; regindex < 4; regindex ++) {
10973 if (regindex == 0) {
10974 regtype = spec [MONO_INST_DEST];
10975 if (regtype == ' ')
10978 } else if (regindex == 1) {
10979 regtype = spec [MONO_INST_SRC1];
10980 if (regtype == ' ')
10983 } else if (regindex == 2) {
10984 regtype = spec [MONO_INST_SRC2];
10985 if (regtype == ' ')
10988 } else if (regindex == 3) {
10989 regtype = spec [MONO_INST_SRC3];
10990 if (regtype == ' ')
10995 #if SIZEOF_REGISTER == 4
10996 /* In the LLVM case, the long opcodes are not decomposed */
10997 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10999 * Since some instructions reference the original long vreg,
11000 * and some reference the two component vregs, it is quite hard
11001 * to determine when it needs to be global. So be conservative.
11003 if (!get_vreg_to_inst (cfg, vreg)) {
11004 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11006 if (cfg->verbose_level > 2)
11007 printf ("LONG VREG R%d made global.\n", vreg);
11011 * Make the component vregs volatile since the optimizations can
11012 * get confused otherwise.
11014 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11015 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11019 g_assert (vreg != -1);
11021 prev_bb = vreg_to_bb [vreg];
11022 if (prev_bb == 0) {
11023 /* 0 is a valid block num */
11024 vreg_to_bb [vreg] = block_num + 1;
11025 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11026 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11029 if (!get_vreg_to_inst (cfg, vreg)) {
11030 if (G_UNLIKELY (cfg->verbose_level > 2))
11031 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11035 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11038 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11041 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11044 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11047 g_assert_not_reached ();
11051 /* Flag as having been used in more than one bb */
11052 vreg_to_bb [vreg] = -1;
11058 /* If a variable is used in only one bblock, convert it into a local vreg */
11059 for (i = 0; i < cfg->num_varinfo; i++) {
11060 MonoInst *var = cfg->varinfo [i];
11061 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11063 switch (var->type) {
11069 #if SIZEOF_REGISTER == 8
11072 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11073 /* Enabling this screws up the fp stack on x86 */
11076 /* Arguments are implicitly global */
11077 /* Putting R4 vars into registers doesn't work currently */
11078 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11080 * Make that the variable's liveness interval doesn't contain a call, since
11081 * that would cause the lvreg to be spilled, making the whole optimization
11084 /* This is too slow for JIT compilation */
11086 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11088 int def_index, call_index, ins_index;
11089 gboolean spilled = FALSE;
11094 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11095 const char *spec = INS_INFO (ins->opcode);
11097 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11098 def_index = ins_index;
11100 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11101 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11102 if (call_index > def_index) {
11108 if (MONO_IS_CALL (ins))
11109 call_index = ins_index;
11119 if (G_UNLIKELY (cfg->verbose_level > 2))
11120 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11121 var->flags |= MONO_INST_IS_DEAD;
11122 cfg->vreg_to_inst [var->dreg] = NULL;
11129 * Compress the varinfo and vars tables so the liveness computation is faster and
11130 * takes up less space.
11133 for (i = 0; i < cfg->num_varinfo; ++i) {
11134 MonoInst *var = cfg->varinfo [i];
11135 if (pos < i && cfg->locals_start == i)
11136 cfg->locals_start = pos;
11137 if (!(var->flags & MONO_INST_IS_DEAD)) {
11139 cfg->varinfo [pos] = cfg->varinfo [i];
11140 cfg->varinfo [pos]->inst_c0 = pos;
11141 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11142 cfg->vars [pos].idx = pos;
11143 #if SIZEOF_REGISTER == 4
11144 if (cfg->varinfo [pos]->type == STACK_I8) {
11145 /* Modify the two component vars too */
11148 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11149 var1->inst_c0 = pos;
11150 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11151 var1->inst_c0 = pos;
11158 cfg->num_varinfo = pos;
11159 if (cfg->locals_start > cfg->num_varinfo)
11160 cfg->locals_start = cfg->num_varinfo;
11164 * mono_spill_global_vars:
11166 * Generate spill code for variables which are not allocated to registers,
11167 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11168 * code is generated which could be optimized by the local optimization passes.
11171 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11173 MonoBasicBlock *bb;
11175 int orig_next_vreg;
11176 guint32 *vreg_to_lvreg;
11178 guint32 i, lvregs_len;
11179 gboolean dest_has_lvreg = FALSE;
11180 guint32 stacktypes [128];
11181 MonoInst **live_range_start, **live_range_end;
11182 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11184 *need_local_opts = FALSE;
11186 memset (spec2, 0, sizeof (spec2));
11188 /* FIXME: Move this function to mini.c */
11189 stacktypes ['i'] = STACK_PTR;
11190 stacktypes ['l'] = STACK_I8;
11191 stacktypes ['f'] = STACK_R8;
11192 #ifdef MONO_ARCH_SIMD_INTRINSICS
11193 stacktypes ['x'] = STACK_VTYPE;
11196 #if SIZEOF_REGISTER == 4
11197 /* Create MonoInsts for longs */
11198 for (i = 0; i < cfg->num_varinfo; i++) {
11199 MonoInst *ins = cfg->varinfo [i];
11201 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11202 switch (ins->type) {
11207 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11210 g_assert (ins->opcode == OP_REGOFFSET);
11212 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11214 tree->opcode = OP_REGOFFSET;
11215 tree->inst_basereg = ins->inst_basereg;
11216 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11218 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11220 tree->opcode = OP_REGOFFSET;
11221 tree->inst_basereg = ins->inst_basereg;
11222 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11232 /* FIXME: widening and truncation */
11235 * As an optimization, when a variable allocated to the stack is first loaded into
11236 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11237 * the variable again.
11239 orig_next_vreg = cfg->next_vreg;
11240 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11241 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11245 * These arrays contain the first and last instructions accessing a given
11247 * Since we emit bblocks in the same order we process them here, and we
11248 * don't split live ranges, these will precisely describe the live range of
11249 * the variable, i.e. the instruction range where a valid value can be found
11250 * in the variables location.
11251 * The live range is computed using the liveness info computed by the liveness pass.
11252 * We can't use vmv->range, since that is an abstract live range, and we need
11253 * one which is instruction precise.
11254 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11256 /* FIXME: Only do this if debugging info is requested */
11257 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11258 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11259 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11260 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11262 /* Add spill loads/stores */
11263 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11266 if (cfg->verbose_level > 2)
11267 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11269 /* Clear vreg_to_lvreg array */
11270 for (i = 0; i < lvregs_len; i++)
11271 vreg_to_lvreg [lvregs [i]] = 0;
11275 MONO_BB_FOR_EACH_INS (bb, ins) {
11276 const char *spec = INS_INFO (ins->opcode);
11277 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11278 gboolean store, no_lvreg;
11279 int sregs [MONO_MAX_SRC_REGS];
11281 if (G_UNLIKELY (cfg->verbose_level > 2))
11282 mono_print_ins (ins);
11284 if (ins->opcode == OP_NOP)
11288 * We handle LDADDR here as well, since it can only be decomposed
11289 * when variable addresses are known.
11291 if (ins->opcode == OP_LDADDR) {
11292 MonoInst *var = ins->inst_p0;
11294 if (var->opcode == OP_VTARG_ADDR) {
11295 /* Happens on SPARC/S390 where vtypes are passed by reference */
11296 MonoInst *vtaddr = var->inst_left;
11297 if (vtaddr->opcode == OP_REGVAR) {
11298 ins->opcode = OP_MOVE;
11299 ins->sreg1 = vtaddr->dreg;
11301 else if (var->inst_left->opcode == OP_REGOFFSET) {
11302 ins->opcode = OP_LOAD_MEMBASE;
11303 ins->inst_basereg = vtaddr->inst_basereg;
11304 ins->inst_offset = vtaddr->inst_offset;
11308 g_assert (var->opcode == OP_REGOFFSET);
11310 ins->opcode = OP_ADD_IMM;
11311 ins->sreg1 = var->inst_basereg;
11312 ins->inst_imm = var->inst_offset;
11315 *need_local_opts = TRUE;
11316 spec = INS_INFO (ins->opcode);
11319 if (ins->opcode < MONO_CEE_LAST) {
11320 mono_print_ins (ins);
11321 g_assert_not_reached ();
11325 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11329 if (MONO_IS_STORE_MEMBASE (ins)) {
11330 tmp_reg = ins->dreg;
11331 ins->dreg = ins->sreg2;
11332 ins->sreg2 = tmp_reg;
11335 spec2 [MONO_INST_DEST] = ' ';
11336 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11337 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11338 spec2 [MONO_INST_SRC3] = ' ';
11340 } else if (MONO_IS_STORE_MEMINDEX (ins))
11341 g_assert_not_reached ();
11346 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11347 printf ("\t %.3s %d", spec, ins->dreg);
11348 num_sregs = mono_inst_get_src_registers (ins, sregs);
11349 for (srcindex = 0; srcindex < 3; ++srcindex)
11350 printf (" %d", sregs [srcindex]);
11357 regtype = spec [MONO_INST_DEST];
11358 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11361 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11362 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11363 MonoInst *store_ins;
11365 MonoInst *def_ins = ins;
11366 int dreg = ins->dreg; /* The original vreg */
11368 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11370 if (var->opcode == OP_REGVAR) {
11371 ins->dreg = var->dreg;
11372 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11374 * Instead of emitting a load+store, use a _membase opcode.
11376 g_assert (var->opcode == OP_REGOFFSET);
11377 if (ins->opcode == OP_MOVE) {
11381 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11382 ins->inst_basereg = var->inst_basereg;
11383 ins->inst_offset = var->inst_offset;
11386 spec = INS_INFO (ins->opcode);
11390 g_assert (var->opcode == OP_REGOFFSET);
11392 prev_dreg = ins->dreg;
11394 /* Invalidate any previous lvreg for this vreg */
11395 vreg_to_lvreg [ins->dreg] = 0;
11399 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11401 store_opcode = OP_STOREI8_MEMBASE_REG;
11404 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11406 if (regtype == 'l') {
11407 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11408 mono_bblock_insert_after_ins (bb, ins, store_ins);
11409 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11410 mono_bblock_insert_after_ins (bb, ins, store_ins);
11411 def_ins = store_ins;
11414 g_assert (store_opcode != OP_STOREV_MEMBASE);
11416 /* Try to fuse the store into the instruction itself */
11417 /* FIXME: Add more instructions */
11418 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11419 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11420 ins->inst_imm = ins->inst_c0;
11421 ins->inst_destbasereg = var->inst_basereg;
11422 ins->inst_offset = var->inst_offset;
11423 spec = INS_INFO (ins->opcode);
11424 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11425 ins->opcode = store_opcode;
11426 ins->inst_destbasereg = var->inst_basereg;
11427 ins->inst_offset = var->inst_offset;
11431 tmp_reg = ins->dreg;
11432 ins->dreg = ins->sreg2;
11433 ins->sreg2 = tmp_reg;
11436 spec2 [MONO_INST_DEST] = ' ';
11437 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11438 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11439 spec2 [MONO_INST_SRC3] = ' ';
11441 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11442 // FIXME: The backends expect the base reg to be in inst_basereg
11443 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11445 ins->inst_basereg = var->inst_basereg;
11446 ins->inst_offset = var->inst_offset;
11447 spec = INS_INFO (ins->opcode);
11449 /* printf ("INS: "); mono_print_ins (ins); */
11450 /* Create a store instruction */
11451 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11453 /* Insert it after the instruction */
11454 mono_bblock_insert_after_ins (bb, ins, store_ins);
11456 def_ins = store_ins;
11459 * We can't assign ins->dreg to var->dreg here, since the
11460 * sregs could use it. So set a flag, and do it after
11463 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11464 dest_has_lvreg = TRUE;
11469 if (def_ins && !live_range_start [dreg]) {
11470 live_range_start [dreg] = def_ins;
11471 live_range_start_bb [dreg] = bb;
11478 num_sregs = mono_inst_get_src_registers (ins, sregs);
11479 for (srcindex = 0; srcindex < 3; ++srcindex) {
11480 regtype = spec [MONO_INST_SRC1 + srcindex];
11481 sreg = sregs [srcindex];
11483 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11484 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11485 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11486 MonoInst *use_ins = ins;
11487 MonoInst *load_ins;
11488 guint32 load_opcode;
11490 if (var->opcode == OP_REGVAR) {
11491 sregs [srcindex] = var->dreg;
11492 //mono_inst_set_src_registers (ins, sregs);
11493 live_range_end [sreg] = use_ins;
11494 live_range_end_bb [sreg] = bb;
11498 g_assert (var->opcode == OP_REGOFFSET);
11500 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11502 g_assert (load_opcode != OP_LOADV_MEMBASE);
11504 if (vreg_to_lvreg [sreg]) {
11505 g_assert (vreg_to_lvreg [sreg] != -1);
11507 /* The variable is already loaded to an lvreg */
11508 if (G_UNLIKELY (cfg->verbose_level > 2))
11509 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11510 sregs [srcindex] = vreg_to_lvreg [sreg];
11511 //mono_inst_set_src_registers (ins, sregs);
11515 /* Try to fuse the load into the instruction */
11516 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11517 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11518 sregs [0] = var->inst_basereg;
11519 //mono_inst_set_src_registers (ins, sregs);
11520 ins->inst_offset = var->inst_offset;
11521 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11522 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11523 sregs [1] = var->inst_basereg;
11524 //mono_inst_set_src_registers (ins, sregs);
11525 ins->inst_offset = var->inst_offset;
11527 if (MONO_IS_REAL_MOVE (ins)) {
11528 ins->opcode = OP_NOP;
11531 //printf ("%d ", srcindex); mono_print_ins (ins);
11533 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11535 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11536 if (var->dreg == prev_dreg) {
11538 * sreg refers to the value loaded by the load
11539 * emitted below, but we need to use ins->dreg
11540 * since it refers to the store emitted earlier.
11544 g_assert (sreg != -1);
11545 vreg_to_lvreg [var->dreg] = sreg;
11546 g_assert (lvregs_len < 1024);
11547 lvregs [lvregs_len ++] = var->dreg;
11551 sregs [srcindex] = sreg;
11552 //mono_inst_set_src_registers (ins, sregs);
11554 if (regtype == 'l') {
11555 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11556 mono_bblock_insert_before_ins (bb, ins, load_ins);
11557 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11558 mono_bblock_insert_before_ins (bb, ins, load_ins);
11559 use_ins = load_ins;
11562 #if SIZEOF_REGISTER == 4
11563 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11565 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11566 mono_bblock_insert_before_ins (bb, ins, load_ins);
11567 use_ins = load_ins;
11571 if (var->dreg < orig_next_vreg) {
11572 live_range_end [var->dreg] = use_ins;
11573 live_range_end_bb [var->dreg] = bb;
11577 mono_inst_set_src_registers (ins, sregs);
11579 if (dest_has_lvreg) {
11580 g_assert (ins->dreg != -1);
11581 vreg_to_lvreg [prev_dreg] = ins->dreg;
11582 g_assert (lvregs_len < 1024);
11583 lvregs [lvregs_len ++] = prev_dreg;
11584 dest_has_lvreg = FALSE;
11588 tmp_reg = ins->dreg;
11589 ins->dreg = ins->sreg2;
11590 ins->sreg2 = tmp_reg;
11593 if (MONO_IS_CALL (ins)) {
11594 /* Clear vreg_to_lvreg array */
11595 for (i = 0; i < lvregs_len; i++)
11596 vreg_to_lvreg [lvregs [i]] = 0;
11598 } else if (ins->opcode == OP_NOP) {
11600 MONO_INST_NULLIFY_SREGS (ins);
11603 if (cfg->verbose_level > 2)
11604 mono_print_ins_index (1, ins);
11607 /* Extend the live range based on the liveness info */
11608 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11609 for (i = 0; i < cfg->num_varinfo; i ++) {
11610 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11612 if (vreg_is_volatile (cfg, vi->vreg))
11613 /* The liveness info is incomplete */
11616 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11617 /* Live from at least the first ins of this bb */
11618 live_range_start [vi->vreg] = bb->code;
11619 live_range_start_bb [vi->vreg] = bb;
11622 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11623 /* Live at least until the last ins of this bb */
11624 live_range_end [vi->vreg] = bb->last_ins;
11625 live_range_end_bb [vi->vreg] = bb;
11631 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11633 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11634 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11636 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11637 for (i = 0; i < cfg->num_varinfo; ++i) {
11638 int vreg = MONO_VARINFO (cfg, i)->vreg;
11641 if (live_range_start [vreg]) {
11642 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11644 ins->inst_c1 = vreg;
11645 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11647 if (live_range_end [vreg]) {
11648 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11650 ins->inst_c1 = vreg;
11651 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11652 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11654 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11660 g_free (live_range_start);
11661 g_free (live_range_end);
11662 g_free (live_range_start_bb);
11663 g_free (live_range_end_bb);
11668 * - use 'iadd' instead of 'int_add'
11669 * - handling ovf opcodes: decompose in method_to_ir.
11670 * - unify iregs/fregs
11671 * -> partly done, the missing parts are:
11672 * - a more complete unification would involve unifying the hregs as well, so
11673 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11674 * would no longer map to the machine hregs, so the code generators would need to
11675 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11676 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11677 * fp/non-fp branches speeds it up by about 15%.
11678 * - use sext/zext opcodes instead of shifts
11680 * - get rid of TEMPLOADs if possible and use vregs instead
11681 * - clean up usage of OP_P/OP_ opcodes
11682 * - cleanup usage of DUMMY_USE
11683 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11685 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11686 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11687 * - make sure handle_stack_args () is called before the branch is emitted
11688 * - when the new IR is done, get rid of all unused stuff
11689 * - COMPARE/BEQ as separate instructions or unify them ?
11690 * - keeping them separate allows specialized compare instructions like
11691 * compare_imm, compare_membase
11692 * - most back ends unify fp compare+branch, fp compare+ceq
11693 * - integrate mono_save_args into inline_method
11694 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11695 * - handle long shift opts on 32 bit platforms somehow: they require
11696 * 3 sregs (2 for arg1 and 1 for arg2)
11697 * - make byref a 'normal' type.
11698 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11699 * variable if needed.
11700 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11701 * like inline_method.
11702 * - remove inlining restrictions
11703 * - fix LNEG and enable cfold of INEG
11704 * - generalize x86 optimizations like ldelema as a peephole optimization
11705 * - add store_mem_imm for amd64
11706 * - optimize the loading of the interruption flag in the managed->native wrappers
11707 * - avoid special handling of OP_NOP in passes
11708 * - move code inserting instructions into one function/macro.
11709 * - try a coalescing phase after liveness analysis
11710 * - add float -> vreg conversion + local optimizations on !x86
11711 * - figure out how to handle decomposed branches during optimizations, ie.
11712 * compare+branch, op_jump_table+op_br etc.
11713 * - promote RuntimeXHandles to vregs
11714 * - vtype cleanups:
11715 * - add a NEW_VARLOADA_VREG macro
11716 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11717 * accessing vtype fields.
11718 * - get rid of I8CONST on 64 bit platforms
11719 * - dealing with the increase in code size due to branches created during opcode
11721 * - use extended basic blocks
11722 * - all parts of the JIT
11723 * - handle_global_vregs () && local regalloc
11724 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11725 * - sources of increase in code size:
11728 * - isinst and castclass
11729 * - lvregs not allocated to global registers even if used multiple times
11730 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11732 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11733 * - add all micro optimizations from the old JIT
11734 * - put tree optimizations into the deadce pass
11735 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11736 * specific function.
11737 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11738 * fcompare + branchCC.
11739 * - create a helper function for allocating a stack slot, taking into account
11740 * MONO_CFG_HAS_SPILLUP.
11742 * - merge the ia64 switch changes.
11743 * - optimize mono_regstate2_alloc_int/float.
11744 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11745 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11746 * parts of the tree could be separated by other instructions, killing the tree
11747 * arguments, or stores killing loads etc. Also, should we fold loads into other
11748 * instructions if the result of the load is used multiple times ?
11749 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11750 * - LAST MERGE: 108395.
11751 * - when returning vtypes in registers, generate IR and append it to the end of the
11752 * last bb instead of doing it in the epilog.
11753 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11761 - When to decompose opcodes:
11762 - earlier: this makes some optimizations hard to implement, since the low level IR
11763 no longer contains the neccessary information. But it is easier to do.
11764 - later: harder to implement, enables more optimizations.
11765 - Branches inside bblocks:
11766 - created when decomposing complex opcodes.
11767 - branches to another bblock: harmless, but not tracked by the branch
11768 optimizations, so need to branch to a label at the start of the bblock.
11769 - branches to inside the same bblock: very problematic, trips up the local
11770 reg allocator. Can be fixed by spitting the current bblock, but that is a
11771 complex operation, since some local vregs can become global vregs etc.
11772 - Local/global vregs:
11773 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11774 local register allocator.
11775 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11776 structure, created by mono_create_var (). Assigned to hregs or the stack by
11777 the global register allocator.
11778 - When to do optimizations like alu->alu_imm:
11779 - earlier -> saves work later on since the IR will be smaller/simpler
11780 - later -> can work on more instructions
11781 - Handling of valuetypes:
11782 - When a vtype is pushed on the stack, a new temporary is created, an
11783 instruction computing its address (LDADDR) is emitted and pushed on
11784 the stack. Need to optimize cases when the vtype is used immediately as in
11785 argument passing, stloc etc.
11786 - Instead of the to_end stuff in the old JIT, simply call the function handling
11787 the values on the stack before emitting the last instruction of the bb.
11790 #endif /* DISABLE_JIT */