2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
213 switch (type->type) {
216 case MONO_TYPE_BOOLEAN:
228 case MONO_TYPE_FNPTR:
230 case MONO_TYPE_CLASS:
231 case MONO_TYPE_STRING:
232 case MONO_TYPE_OBJECT:
233 case MONO_TYPE_SZARRAY:
234 case MONO_TYPE_ARRAY:
238 #if SIZEOF_REGISTER == 8
247 case MONO_TYPE_VALUETYPE:
248 if (type->data.klass->enumtype) {
249 type = mono_class_enum_basetype (type->data.klass);
252 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
255 case MONO_TYPE_TYPEDBYREF:
257 case MONO_TYPE_GENERICINST:
258 type = &type->data.generic_class->container_class->byval_arg;
262 g_assert (cfg->generic_sharing_context);
265 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
271 mono_print_bb (MonoBasicBlock *bb, const char *msg)
276 printf ("\n%s %d: [IN: ", msg, bb->block_num);
277 for (i = 0; i < bb->in_count; ++i)
278 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
280 for (i = 0; i < bb->out_count; ++i)
281 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
283 for (tree = bb->code; tree; tree = tree->next)
284 mono_print_ins_index (-1, tree);
288 mono_create_helper_signatures (void)
290 helper_sig_domain_get = mono_create_icall_signature ("ptr");
291 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
292 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
293 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
294 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
295 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
296 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
300 * Can't put this at the beginning, since other files reference stuff from this
305 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
307 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
309 #define GET_BBLOCK(cfg,tblock,ip) do { \
310 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
312 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
313 NEW_BBLOCK (cfg, (tblock)); \
314 (tblock)->cil_code = (ip); \
315 ADD_BBLOCK (cfg, (tblock)); \
319 #if defined(TARGET_X86) || defined(TARGET_AMD64)
320 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
321 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
322 (dest)->dreg = alloc_preg ((cfg)); \
323 (dest)->sreg1 = (sr1); \
324 (dest)->sreg2 = (sr2); \
325 (dest)->inst_imm = (imm); \
326 (dest)->backend.shift_amount = (shift); \
327 MONO_ADD_INS ((cfg)->cbb, (dest)); \
331 #if SIZEOF_REGISTER == 8
332 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
333 /* FIXME: Need to add many more cases */ \
334 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
336 int dr = alloc_preg (cfg); \
337 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
338 (ins)->sreg2 = widen->dreg; \
342 #define ADD_WIDEN_OP(ins, arg1, arg2)
345 #define ADD_BINOP(op) do { \
346 MONO_INST_NEW (cfg, ins, (op)); \
348 ins->sreg1 = sp [0]->dreg; \
349 ins->sreg2 = sp [1]->dreg; \
350 type_from_op (ins, sp [0], sp [1]); \
352 /* Have to insert a widening op */ \
353 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
354 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
355 MONO_ADD_INS ((cfg)->cbb, (ins)); \
356 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
359 #define ADD_UNOP(op) do { \
360 MONO_INST_NEW (cfg, ins, (op)); \
362 ins->sreg1 = sp [0]->dreg; \
363 type_from_op (ins, sp [0], NULL); \
365 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
366 MONO_ADD_INS ((cfg)->cbb, (ins)); \
367 *sp++ = mono_decompose_opcode (cfg, ins); \
370 #define ADD_BINCOND(next_block) do { \
373 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
374 cmp->sreg1 = sp [0]->dreg; \
375 cmp->sreg2 = sp [1]->dreg; \
376 type_from_op (cmp, sp [0], sp [1]); \
378 type_from_op (ins, sp [0], sp [1]); \
379 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
380 GET_BBLOCK (cfg, tblock, target); \
381 link_bblock (cfg, bblock, tblock); \
382 ins->inst_true_bb = tblock; \
383 if ((next_block)) { \
384 link_bblock (cfg, bblock, (next_block)); \
385 ins->inst_false_bb = (next_block); \
386 start_new_bblock = 1; \
388 GET_BBLOCK (cfg, tblock, ip); \
389 link_bblock (cfg, bblock, tblock); \
390 ins->inst_false_bb = tblock; \
391 start_new_bblock = 2; \
393 if (sp != stack_start) { \
394 handle_stack_args (cfg, stack_start, sp - stack_start); \
395 CHECK_UNVERIFIABLE (cfg); \
397 MONO_ADD_INS (bblock, cmp); \
398 MONO_ADD_INS (bblock, ins); \
402 * link_bblock: Links two basic blocks
404 * links two basic blocks in the control flow graph, the 'from'
405 * argument is the starting block and the 'to' argument is the block
406 * the control flow ends to after 'from'.
409 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
411 MonoBasicBlock **newa;
415 if (from->cil_code) {
417 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
419 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
422 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
424 printf ("edge from entry to exit\n");
429 for (i = 0; i < from->out_count; ++i) {
430 if (to == from->out_bb [i]) {
436 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
437 for (i = 0; i < from->out_count; ++i) {
438 newa [i] = from->out_bb [i];
446 for (i = 0; i < to->in_count; ++i) {
447 if (from == to->in_bb [i]) {
453 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
454 for (i = 0; i < to->in_count; ++i) {
455 newa [i] = to->in_bb [i];
464 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
466 link_bblock (cfg, from, to);
470 * mono_find_block_region:
472 * We mark each basic block with a region ID. We use that to avoid BB
473 * optimizations when blocks are in different regions.
476 * A region token that encodes where this region is, and information
477 * about the clause owner for this block.
479 * The region encodes the try/catch/filter clause that owns this block
480 * as well as the type. -1 is a special value that represents a block
481 * that is in none of try/catch/filter.
484 mono_find_block_region (MonoCompile *cfg, int offset)
486 MonoMethodHeader *header = cfg->header;
487 MonoExceptionClause *clause;
490 for (i = 0; i < header->num_clauses; ++i) {
491 clause = &header->clauses [i];
492 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
493 (offset < (clause->handler_offset)))
494 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
496 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
497 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
498 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
499 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
500 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
502 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
505 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
506 return ((i + 1) << 8) | clause->flags;
513 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
515 MonoMethodHeader *header = cfg->header;
516 MonoExceptionClause *clause;
520 for (i = 0; i < header->num_clauses; ++i) {
521 clause = &header->clauses [i];
522 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
523 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
524 if (clause->flags == type)
525 res = g_list_append (res, clause);
532 mono_create_spvar_for_region (MonoCompile *cfg, int region)
536 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
540 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
541 /* prevent it from being register allocated */
542 var->flags |= MONO_INST_INDIRECT;
544 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
548 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
550 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
554 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
558 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
562 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
563 /* prevent it from being register allocated */
564 var->flags |= MONO_INST_INDIRECT;
566 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
572 * Returns the type used in the eval stack when @type is loaded.
573 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
576 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
580 inst->klass = klass = mono_class_from_mono_type (type);
582 inst->type = STACK_MP;
587 switch (type->type) {
589 inst->type = STACK_INV;
593 case MONO_TYPE_BOOLEAN:
599 inst->type = STACK_I4;
604 case MONO_TYPE_FNPTR:
605 inst->type = STACK_PTR;
607 case MONO_TYPE_CLASS:
608 case MONO_TYPE_STRING:
609 case MONO_TYPE_OBJECT:
610 case MONO_TYPE_SZARRAY:
611 case MONO_TYPE_ARRAY:
612 inst->type = STACK_OBJ;
616 inst->type = STACK_I8;
620 inst->type = STACK_R8;
622 case MONO_TYPE_VALUETYPE:
623 if (type->data.klass->enumtype) {
624 type = mono_class_enum_basetype (type->data.klass);
628 inst->type = STACK_VTYPE;
631 case MONO_TYPE_TYPEDBYREF:
632 inst->klass = mono_defaults.typed_reference_class;
633 inst->type = STACK_VTYPE;
635 case MONO_TYPE_GENERICINST:
636 type = &type->data.generic_class->container_class->byval_arg;
639 case MONO_TYPE_MVAR :
640 /* FIXME: all the arguments must be references for now,
641 * later look inside cfg and see if the arg num is
644 g_assert (cfg->generic_sharing_context);
645 inst->type = STACK_OBJ;
648 g_error ("unknown type 0x%02x in eval stack type", type->type);
653 * The following tables are used to quickly validate the IL code in type_from_op ().
656 bin_num_table [STACK_MAX] [STACK_MAX] = {
657 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
669 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
672 /* reduce the size of this table */
674 bin_int_table [STACK_MAX] [STACK_MAX] = {
675 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
676 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
677 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
678 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
679 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
680 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
681 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
686 bin_comp_table [STACK_MAX] [STACK_MAX] = {
687 /* Inv i L p F & O vt */
689 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
690 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
691 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
692 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
693 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
694 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
695 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
698 /* reduce the size of this table */
700 shift_table [STACK_MAX] [STACK_MAX] = {
701 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
704 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
705 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
706 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
707 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
708 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
712 * Tables to map from the non-specific opcode to the matching
713 * type-specific opcode.
715 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
717 binops_op_map [STACK_MAX] = {
718 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
721 /* handles from CEE_NEG to CEE_CONV_U8 */
723 unops_op_map [STACK_MAX] = {
724 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
727 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
729 ovfops_op_map [STACK_MAX] = {
730 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
733 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
735 ovf2ops_op_map [STACK_MAX] = {
736 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
739 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
741 ovf3ops_op_map [STACK_MAX] = {
742 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
745 /* handles from CEE_BEQ to CEE_BLT_UN */
747 beqops_op_map [STACK_MAX] = {
748 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
751 /* handles from CEE_CEQ to CEE_CLT_UN */
753 ceqops_op_map [STACK_MAX] = {
754 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
758 * Sets ins->type (the type on the eval stack) according to the
759 * type of the opcode and the arguments to it.
760 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
762 * FIXME: this function sets ins->type unconditionally in some cases, but
763 * it should set it to invalid for some types (a conv.x on an object)
766 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
768 switch (ins->opcode) {
775 /* FIXME: check unverifiable args for STACK_MP */
776 ins->type = bin_num_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
784 ins->type = bin_int_table [src1->type] [src2->type];
785 ins->opcode += binops_op_map [ins->type];
790 ins->type = shift_table [src1->type] [src2->type];
791 ins->opcode += binops_op_map [ins->type];
796 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
797 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
798 ins->opcode = OP_LCOMPARE;
799 else if (src1->type == STACK_R8)
800 ins->opcode = OP_FCOMPARE;
802 ins->opcode = OP_ICOMPARE;
804 case OP_ICOMPARE_IMM:
805 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
806 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
807 ins->opcode = OP_LCOMPARE_IMM;
819 ins->opcode += beqops_op_map [src1->type];
822 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
823 ins->opcode += ceqops_op_map [src1->type];
829 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
830 ins->opcode += ceqops_op_map [src1->type];
834 ins->type = neg_table [src1->type];
835 ins->opcode += unops_op_map [ins->type];
838 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
839 ins->type = src1->type;
841 ins->type = STACK_INV;
842 ins->opcode += unops_op_map [ins->type];
848 ins->type = STACK_I4;
849 ins->opcode += unops_op_map [src1->type];
852 ins->type = STACK_R8;
853 switch (src1->type) {
856 ins->opcode = OP_ICONV_TO_R_UN;
859 ins->opcode = OP_LCONV_TO_R_UN;
863 case CEE_CONV_OVF_I1:
864 case CEE_CONV_OVF_U1:
865 case CEE_CONV_OVF_I2:
866 case CEE_CONV_OVF_U2:
867 case CEE_CONV_OVF_I4:
868 case CEE_CONV_OVF_U4:
869 ins->type = STACK_I4;
870 ins->opcode += ovf3ops_op_map [src1->type];
872 case CEE_CONV_OVF_I_UN:
873 case CEE_CONV_OVF_U_UN:
874 ins->type = STACK_PTR;
875 ins->opcode += ovf2ops_op_map [src1->type];
877 case CEE_CONV_OVF_I1_UN:
878 case CEE_CONV_OVF_I2_UN:
879 case CEE_CONV_OVF_I4_UN:
880 case CEE_CONV_OVF_U1_UN:
881 case CEE_CONV_OVF_U2_UN:
882 case CEE_CONV_OVF_U4_UN:
883 ins->type = STACK_I4;
884 ins->opcode += ovf2ops_op_map [src1->type];
887 ins->type = STACK_PTR;
888 switch (src1->type) {
890 ins->opcode = OP_ICONV_TO_U;
894 #if SIZEOF_REGISTER == 8
895 ins->opcode = OP_LCONV_TO_U;
897 ins->opcode = OP_MOVE;
901 ins->opcode = OP_LCONV_TO_U;
904 ins->opcode = OP_FCONV_TO_U;
910 ins->type = STACK_I8;
911 ins->opcode += unops_op_map [src1->type];
913 case CEE_CONV_OVF_I8:
914 case CEE_CONV_OVF_U8:
915 ins->type = STACK_I8;
916 ins->opcode += ovf3ops_op_map [src1->type];
918 case CEE_CONV_OVF_U8_UN:
919 case CEE_CONV_OVF_I8_UN:
920 ins->type = STACK_I8;
921 ins->opcode += ovf2ops_op_map [src1->type];
925 ins->type = STACK_R8;
926 ins->opcode += unops_op_map [src1->type];
929 ins->type = STACK_R8;
933 ins->type = STACK_I4;
934 ins->opcode += ovfops_op_map [src1->type];
939 ins->type = STACK_PTR;
940 ins->opcode += ovfops_op_map [src1->type];
948 ins->type = bin_num_table [src1->type] [src2->type];
949 ins->opcode += ovfops_op_map [src1->type];
950 if (ins->type == STACK_R8)
951 ins->type = STACK_INV;
953 case OP_LOAD_MEMBASE:
954 ins->type = STACK_PTR;
956 case OP_LOADI1_MEMBASE:
957 case OP_LOADU1_MEMBASE:
958 case OP_LOADI2_MEMBASE:
959 case OP_LOADU2_MEMBASE:
960 case OP_LOADI4_MEMBASE:
961 case OP_LOADU4_MEMBASE:
962 ins->type = STACK_PTR;
964 case OP_LOADI8_MEMBASE:
965 ins->type = STACK_I8;
967 case OP_LOADR4_MEMBASE:
968 case OP_LOADR8_MEMBASE:
969 ins->type = STACK_R8;
972 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
976 if (ins->type == STACK_MP)
977 ins->klass = mono_defaults.object_class;
982 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
988 param_table [STACK_MAX] [STACK_MAX] = {
993 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
997 switch (args->type) {
1007 for (i = 0; i < sig->param_count; ++i) {
1008 switch (args [i].type) {
1012 if (!sig->params [i]->byref)
1016 if (sig->params [i]->byref)
1018 switch (sig->params [i]->type) {
1019 case MONO_TYPE_CLASS:
1020 case MONO_TYPE_STRING:
1021 case MONO_TYPE_OBJECT:
1022 case MONO_TYPE_SZARRAY:
1023 case MONO_TYPE_ARRAY:
1030 if (sig->params [i]->byref)
1032 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1041 /*if (!param_table [args [i].type] [sig->params [i]->type])
1049 * When we need a pointer to the current domain many times in a method, we
1050 * call mono_domain_get() once and we store the result in a local variable.
1051 * This function returns the variable that represents the MonoDomain*.
1053 inline static MonoInst *
1054 mono_get_domainvar (MonoCompile *cfg)
1056 if (!cfg->domainvar)
1057 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1058 return cfg->domainvar;
1062 * The got_var contains the address of the Global Offset Table when AOT
1066 mono_get_got_var (MonoCompile *cfg)
1068 #ifdef MONO_ARCH_NEED_GOT_VAR
1069 if (!cfg->compile_aot)
1071 if (!cfg->got_var) {
1072 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 return cfg->got_var;
1081 mono_get_vtable_var (MonoCompile *cfg)
1083 g_assert (cfg->generic_sharing_context);
1085 if (!cfg->rgctx_var) {
1086 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1087 /* force the var to be stack allocated */
1088 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1091 return cfg->rgctx_var;
1095 type_from_stack_type (MonoInst *ins) {
1096 switch (ins->type) {
1097 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1098 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1099 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1100 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1102 return &ins->klass->this_arg;
1103 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1104 case STACK_VTYPE: return &ins->klass->byval_arg;
1106 g_error ("stack type %d to monotype not handled\n", ins->type);
1111 static G_GNUC_UNUSED int
1112 type_to_stack_type (MonoType *t)
1114 t = mono_type_get_underlying_type (t);
1118 case MONO_TYPE_BOOLEAN:
1121 case MONO_TYPE_CHAR:
1128 case MONO_TYPE_FNPTR:
1130 case MONO_TYPE_CLASS:
1131 case MONO_TYPE_STRING:
1132 case MONO_TYPE_OBJECT:
1133 case MONO_TYPE_SZARRAY:
1134 case MONO_TYPE_ARRAY:
1142 case MONO_TYPE_VALUETYPE:
1143 case MONO_TYPE_TYPEDBYREF:
1145 case MONO_TYPE_GENERICINST:
1146 if (mono_type_generic_inst_is_valuetype (t))
1152 g_assert_not_reached ();
1159 array_access_to_klass (int opcode)
1163 return mono_defaults.byte_class;
1165 return mono_defaults.uint16_class;
1168 return mono_defaults.int_class;
1171 return mono_defaults.sbyte_class;
1174 return mono_defaults.int16_class;
1177 return mono_defaults.int32_class;
1179 return mono_defaults.uint32_class;
1182 return mono_defaults.int64_class;
1185 return mono_defaults.single_class;
1188 return mono_defaults.double_class;
1189 case CEE_LDELEM_REF:
1190 case CEE_STELEM_REF:
1191 return mono_defaults.object_class;
1193 g_assert_not_reached ();
1199 * We try to share variables when possible
1202 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1207 /* inlining can result in deeper stacks */
1208 if (slot >= cfg->header->max_stack)
1209 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1211 pos = ins->type - 1 + slot * STACK_MAX;
1213 switch (ins->type) {
1220 if ((vnum = cfg->intvars [pos]))
1221 return cfg->varinfo [vnum];
1222 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1223 cfg->intvars [pos] = res->inst_c0;
1226 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1232 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1235 * Don't use this if a generic_context is set, since that means AOT can't
1236 * look up the method using just the image+token.
1237 * table == 0 means this is a reference made from a wrapper.
1239 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1240 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1241 jump_info_token->image = image;
1242 jump_info_token->token = token;
1243 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1248 * This function is called to handle items that are left on the evaluation stack
1249 * at basic block boundaries. What happens is that we save the values to local variables
1250 * and we reload them later when first entering the target basic block (with the
1251 * handle_loaded_temps () function).
1252 * A single joint point will use the same variables (stored in the array bb->out_stack or
1253 * bb->in_stack, if the basic block is before or after the joint point).
1255 * This function needs to be called _before_ emitting the last instruction of
1256 * the bb (i.e. before emitting a branch).
1257 * If the stack merge fails at a join point, cfg->unverifiable is set.
1260 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1263 MonoBasicBlock *bb = cfg->cbb;
1264 MonoBasicBlock *outb;
1265 MonoInst *inst, **locals;
1270 if (cfg->verbose_level > 3)
1271 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1272 if (!bb->out_scount) {
1273 bb->out_scount = count;
1274 //printf ("bblock %d has out:", bb->block_num);
1276 for (i = 0; i < bb->out_count; ++i) {
1277 outb = bb->out_bb [i];
1278 /* exception handlers are linked, but they should not be considered for stack args */
1279 if (outb->flags & BB_EXCEPTION_HANDLER)
1281 //printf (" %d", outb->block_num);
1282 if (outb->in_stack) {
1284 bb->out_stack = outb->in_stack;
1290 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1291 for (i = 0; i < count; ++i) {
1293 * try to reuse temps already allocated for this purpouse, if they occupy the same
1294 * stack slot and if they are of the same type.
1295 * This won't cause conflicts since if 'local' is used to
1296 * store one of the values in the in_stack of a bblock, then
1297 * the same variable will be used for the same outgoing stack
1299 * This doesn't work when inlining methods, since the bblocks
1300 * in the inlined methods do not inherit their in_stack from
1301 * the bblock they are inlined to. See bug #58863 for an
1304 if (cfg->inlined_method)
1305 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1307 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1312 for (i = 0; i < bb->out_count; ++i) {
1313 outb = bb->out_bb [i];
1314 /* exception handlers are linked, but they should not be considered for stack args */
1315 if (outb->flags & BB_EXCEPTION_HANDLER)
1317 if (outb->in_scount) {
1318 if (outb->in_scount != bb->out_scount) {
1319 cfg->unverifiable = TRUE;
1322 continue; /* check they are the same locals */
1324 outb->in_scount = count;
1325 outb->in_stack = bb->out_stack;
1328 locals = bb->out_stack;
1330 for (i = 0; i < count; ++i) {
1331 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1332 inst->cil_code = sp [i]->cil_code;
1333 sp [i] = locals [i];
1334 if (cfg->verbose_level > 3)
1335 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1339 * It is possible that the out bblocks already have in_stack assigned, and
1340 * the in_stacks differ. In this case, we will store to all the different
1347 /* Find a bblock which has a different in_stack */
1349 while (bindex < bb->out_count) {
1350 outb = bb->out_bb [bindex];
1351 /* exception handlers are linked, but they should not be considered for stack args */
1352 if (outb->flags & BB_EXCEPTION_HANDLER) {
1356 if (outb->in_stack != locals) {
1357 for (i = 0; i < count; ++i) {
1358 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1359 inst->cil_code = sp [i]->cil_code;
1360 sp [i] = locals [i];
1361 if (cfg->verbose_level > 3)
1362 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1364 locals = outb->in_stack;
1373 /* Emit code which loads interface_offsets [klass->interface_id]
1374 * The array is stored in memory before vtable.
1377 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1379 if (cfg->compile_aot) {
1380 int ioffset_reg = alloc_preg (cfg);
1381 int iid_reg = alloc_preg (cfg);
1383 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1384 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1393 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1395 int ibitmap_reg = alloc_preg (cfg);
1396 #ifdef COMPRESSED_INTERFACE_BITMAP
1398 MonoInst *res, *ins;
1399 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1400 MONO_ADD_INS (cfg->cbb, ins);
1402 if (cfg->compile_aot)
1403 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1405 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1406 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1407 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1409 int ibitmap_byte_reg = alloc_preg (cfg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1413 if (cfg->compile_aot) {
1414 int iid_reg = alloc_preg (cfg);
1415 int shifted_iid_reg = alloc_preg (cfg);
1416 int ibitmap_byte_address_reg = alloc_preg (cfg);
1417 int masked_iid_reg = alloc_preg (cfg);
1418 int iid_one_bit_reg = alloc_preg (cfg);
1419 int iid_bit_reg = alloc_preg (cfg);
1420 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1422 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1424 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1425 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1426 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1427 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1429 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1436 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1437 * stored in "klass_reg" implements the interface "klass".
1440 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1442 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1446 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1447 * stored in "vtable_reg" implements the interface "klass".
1450 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1452 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1456 * Emit code which checks whenever the interface id of @klass is smaller than
1457 * than the value given by max_iid_reg.
1460 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1461 MonoBasicBlock *false_target)
1463 if (cfg->compile_aot) {
1464 int iid_reg = alloc_preg (cfg);
1465 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1466 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1471 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1473 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1476 /* Same as above, but obtains max_iid from a vtable */
1478 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1479 MonoBasicBlock *false_target)
1481 int max_iid_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1484 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1487 /* Same as above, but obtains max_iid from a klass */
1489 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1490 MonoBasicBlock *false_target)
1492 int max_iid_reg = alloc_preg (cfg);
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1495 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1499 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1501 int idepth_reg = alloc_preg (cfg);
1502 int stypes_reg = alloc_preg (cfg);
1503 int stype = alloc_preg (cfg);
1505 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1510 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1511 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1513 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1514 } else if (cfg->compile_aot) {
1515 int const_reg = alloc_preg (cfg);
1516 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1517 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1525 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1527 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1531 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1533 int intf_reg = alloc_preg (cfg);
1535 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1536 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1541 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1545 * Variant of the above that takes a register to the class, not the vtable.
1548 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1550 int intf_bit_reg = alloc_preg (cfg);
1552 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1553 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1558 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1562 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1565 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1566 } else if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1577 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1579 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1583 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1585 if (cfg->compile_aot) {
1586 int const_reg = alloc_preg (cfg);
1587 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1588 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1592 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1596 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1599 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1602 int rank_reg = alloc_preg (cfg);
1603 int eclass_reg = alloc_preg (cfg);
1605 g_assert (!klass_inst);
1606 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1608 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1609 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1611 if (klass->cast_class == mono_defaults.object_class) {
1612 int parent_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1614 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1615 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1616 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1617 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1618 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1619 } else if (klass->cast_class == mono_defaults.enum_class) {
1620 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1621 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1622 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1624 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1625 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1628 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1629 /* Check that the object is a vector too */
1630 int bounds_reg = alloc_preg (cfg);
1631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1632 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1633 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1636 int idepth_reg = alloc_preg (cfg);
1637 int stypes_reg = alloc_preg (cfg);
1638 int stype = alloc_preg (cfg);
1640 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1641 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1643 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1645 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1646 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1647 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1652 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1654 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1658 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1662 g_assert (val == 0);
1667 if ((size <= 4) && (size <= align)) {
1670 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1673 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1676 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1678 #if SIZEOF_REGISTER == 8
1680 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1686 val_reg = alloc_preg (cfg);
1688 if (SIZEOF_REGISTER == 8)
1689 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1691 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1694 /* This could be optimized further if neccesary */
1696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1703 #if !NO_UNALIGNED_ACCESS
1704 if (SIZEOF_REGISTER == 8) {
1706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1724 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1736 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1743 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1744 g_assert (size < 10000);
1747 /* This could be optimized further if neccesary */
1749 cur_reg = alloc_preg (cfg);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1758 #if !NO_UNALIGNED_ACCESS
1759 if (SIZEOF_REGISTER == 8) {
1761 cur_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1772 cur_reg = alloc_preg (cfg);
1773 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1774 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1780 cur_reg = alloc_preg (cfg);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1798 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1804 type = mini_get_basic_type_from_generic (gsctx, type);
1805 switch (type->type) {
1806 case MONO_TYPE_VOID:
1807 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1810 case MONO_TYPE_BOOLEAN:
1813 case MONO_TYPE_CHAR:
1816 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1820 case MONO_TYPE_FNPTR:
1821 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1822 case MONO_TYPE_CLASS:
1823 case MONO_TYPE_STRING:
1824 case MONO_TYPE_OBJECT:
1825 case MONO_TYPE_SZARRAY:
1826 case MONO_TYPE_ARRAY:
1827 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1830 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1833 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1834 case MONO_TYPE_VALUETYPE:
1835 if (type->data.klass->enumtype) {
1836 type = mono_class_enum_basetype (type->data.klass);
1839 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1840 case MONO_TYPE_TYPEDBYREF:
1841 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1842 case MONO_TYPE_GENERICINST:
1843 type = &type->data.generic_class->container_class->byval_arg;
1846 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1852 * target_type_is_incompatible:
1853 * @cfg: MonoCompile context
1855 * Check that the item @arg on the evaluation stack can be stored
1856 * in the target type (can be a local, or field, etc).
1857 * The cfg arg can be used to check if we need verification or just
1860 * Returns: non-0 value if arg can't be stored on a target.
1863 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1865 MonoType *simple_type;
1868 if (target->byref) {
1869 /* FIXME: check that the pointed to types match */
1870 if (arg->type == STACK_MP)
1871 return arg->klass != mono_class_from_mono_type (target);
1872 if (arg->type == STACK_PTR)
1877 simple_type = mono_type_get_underlying_type (target);
1878 switch (simple_type->type) {
1879 case MONO_TYPE_VOID:
1883 case MONO_TYPE_BOOLEAN:
1886 case MONO_TYPE_CHAR:
1889 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1893 /* STACK_MP is needed when setting pinned locals */
1894 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1899 case MONO_TYPE_FNPTR:
1900 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1903 case MONO_TYPE_CLASS:
1904 case MONO_TYPE_STRING:
1905 case MONO_TYPE_OBJECT:
1906 case MONO_TYPE_SZARRAY:
1907 case MONO_TYPE_ARRAY:
1908 if (arg->type != STACK_OBJ)
1910 /* FIXME: check type compatibility */
1914 if (arg->type != STACK_I8)
1919 if (arg->type != STACK_R8)
1922 case MONO_TYPE_VALUETYPE:
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1929 case MONO_TYPE_TYPEDBYREF:
1930 if (arg->type != STACK_VTYPE)
1932 klass = mono_class_from_mono_type (simple_type);
1933 if (klass != arg->klass)
1936 case MONO_TYPE_GENERICINST:
1937 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1938 if (arg->type != STACK_VTYPE)
1940 klass = mono_class_from_mono_type (simple_type);
1941 if (klass != arg->klass)
1945 if (arg->type != STACK_OBJ)
1947 /* FIXME: check type compatibility */
1951 case MONO_TYPE_MVAR:
1952 /* FIXME: all the arguments must be references for now,
1953 * later look inside cfg and see if the arg num is
1954 * really a reference
1956 g_assert (cfg->generic_sharing_context);
1957 if (arg->type != STACK_OBJ)
1961 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1967 * Prepare arguments for passing to a function call.
1968 * Return a non-zero value if the arguments can't be passed to the given
1970 * The type checks are not yet complete and some conversions may need
1971 * casts on 32 or 64 bit architectures.
1973 * FIXME: implement this using target_type_is_incompatible ()
1976 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1978 MonoType *simple_type;
1982 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1986 for (i = 0; i < sig->param_count; ++i) {
1987 if (sig->params [i]->byref) {
1988 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1992 simple_type = sig->params [i];
1993 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1995 switch (simple_type->type) {
1996 case MONO_TYPE_VOID:
2001 case MONO_TYPE_BOOLEAN:
2004 case MONO_TYPE_CHAR:
2007 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2013 case MONO_TYPE_FNPTR:
2014 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2017 case MONO_TYPE_CLASS:
2018 case MONO_TYPE_STRING:
2019 case MONO_TYPE_OBJECT:
2020 case MONO_TYPE_SZARRAY:
2021 case MONO_TYPE_ARRAY:
2022 if (args [i]->type != STACK_OBJ)
2027 if (args [i]->type != STACK_I8)
2032 if (args [i]->type != STACK_R8)
2035 case MONO_TYPE_VALUETYPE:
2036 if (simple_type->data.klass->enumtype) {
2037 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2040 if (args [i]->type != STACK_VTYPE)
2043 case MONO_TYPE_TYPEDBYREF:
2044 if (args [i]->type != STACK_VTYPE)
2047 case MONO_TYPE_GENERICINST:
2048 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2052 g_error ("unknown type 0x%02x in check_call_signature",
2060 callvirt_to_call (int opcode)
2065 case OP_VOIDCALLVIRT:
2074 g_assert_not_reached ();
2081 callvirt_to_call_membase (int opcode)
2085 return OP_CALL_MEMBASE;
2086 case OP_VOIDCALLVIRT:
2087 return OP_VOIDCALL_MEMBASE;
2089 return OP_FCALL_MEMBASE;
2091 return OP_LCALL_MEMBASE;
2093 return OP_VCALL_MEMBASE;
2095 g_assert_not_reached ();
2101 #ifdef MONO_ARCH_HAVE_IMT
2103 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2107 if (COMPILE_LLVM (cfg)) {
2108 method_reg = alloc_preg (cfg);
2111 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2112 } else if (cfg->compile_aot) {
2113 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2116 MONO_INST_NEW (cfg, ins, OP_PCONST);
2117 ins->inst_p0 = call->method;
2118 ins->dreg = method_reg;
2119 MONO_ADD_INS (cfg->cbb, ins);
2123 call->imt_arg_reg = method_reg;
2125 #ifdef MONO_ARCH_IMT_REG
2126 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2128 /* Need this to keep the IMT arg alive */
2129 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2134 #ifdef MONO_ARCH_IMT_REG
2135 method_reg = alloc_preg (cfg);
2138 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2139 } else if (cfg->compile_aot) {
2140 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2143 MONO_INST_NEW (cfg, ins, OP_PCONST);
2144 ins->inst_p0 = call->method;
2145 ins->dreg = method_reg;
2146 MONO_ADD_INS (cfg->cbb, ins);
2149 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2151 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2156 static MonoJumpInfo *
2157 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2159 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2163 ji->data.target = target;
2168 inline static MonoCallInst *
2169 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2170 MonoInst **args, int calli, int virtual, int tail)
2173 #ifdef MONO_ARCH_SOFT_FLOAT
2178 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2180 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2183 call->signature = sig;
2185 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2188 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2189 call->vret_var = cfg->vret_addr;
2190 //g_assert_not_reached ();
2192 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2193 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2196 temp->backend.is_pinvoke = sig->pinvoke;
2199 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2200 * address of return value to increase optimization opportunities.
2201 * Before vtype decomposition, the dreg of the call ins itself represents the
2202 * fact the call modifies the return value. After decomposition, the call will
2203 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2204 * will be transformed into an LDADDR.
2206 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2207 loada->dreg = alloc_preg (cfg);
2208 loada->inst_p0 = temp;
2209 /* We reference the call too since call->dreg could change during optimization */
2210 loada->inst_p1 = call;
2211 MONO_ADD_INS (cfg->cbb, loada);
2213 call->inst.dreg = temp->dreg;
2215 call->vret_var = loada;
2216 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2217 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2219 #ifdef MONO_ARCH_SOFT_FLOAT
2220 if (COMPILE_SOFT_FLOAT (cfg)) {
2222 * If the call has a float argument, we would need to do an r8->r4 conversion using
2223 * an icall, but that cannot be done during the call sequence since it would clobber
2224 * the call registers + the stack. So we do it before emitting the call.
2226 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2228 MonoInst *in = call->args [i];
2230 if (i >= sig->hasthis)
2231 t = sig->params [i - sig->hasthis];
2233 t = &mono_defaults.int_class->byval_arg;
2234 t = mono_type_get_underlying_type (t);
2236 if (!t->byref && t->type == MONO_TYPE_R4) {
2237 MonoInst *iargs [1];
2241 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2243 /* The result will be in an int vreg */
2244 call->args [i] = conv;
2251 if (COMPILE_LLVM (cfg))
2252 mono_llvm_emit_call (cfg, call);
2254 mono_arch_emit_call (cfg, call);
2256 mono_arch_emit_call (cfg, call);
2259 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2260 cfg->flags |= MONO_CFG_HAS_CALLS;
2265 inline static MonoInst*
2266 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2268 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2270 call->inst.sreg1 = addr->dreg;
2272 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2274 return (MonoInst*)call;
2278 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2280 #ifdef MONO_ARCH_RGCTX_REG
2281 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2282 cfg->uses_rgctx_reg = TRUE;
2283 call->rgctx_reg = TRUE;
2285 call->rgctx_arg_reg = rgctx_reg;
2292 inline static MonoInst*
2293 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2299 rgctx_reg = mono_alloc_preg (cfg);
2300 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2302 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2304 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2305 return (MonoInst*)call;
2309 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2311 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2314 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2315 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2317 gboolean might_be_remote;
2318 gboolean virtual = this != NULL;
2319 gboolean enable_for_aot = TRUE;
2323 if (method->string_ctor) {
2324 /* Create the real signature */
2325 /* FIXME: Cache these */
2326 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2327 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2332 might_be_remote = this && sig->hasthis &&
2333 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2334 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2336 context_used = mono_method_check_context_used (method);
2337 if (might_be_remote && context_used) {
2340 g_assert (cfg->generic_sharing_context);
2342 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2344 return mono_emit_calli (cfg, sig, args, addr);
2347 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2349 if (might_be_remote)
2350 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2352 call->method = method;
2353 call->inst.flags |= MONO_INST_HAS_METHOD;
2354 call->inst.inst_left = this;
2357 int vtable_reg, slot_reg, this_reg;
2359 this_reg = this->dreg;
2361 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2362 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2363 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2365 /* Make a call to delegate->invoke_impl */
2366 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2367 call->inst.inst_basereg = this_reg;
2368 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2369 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2371 return (MonoInst*)call;
2375 if ((!cfg->compile_aot || enable_for_aot) &&
2376 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2377 (MONO_METHOD_IS_FINAL (method) &&
2378 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2379 !(method->klass->marshalbyref && context_used)) {
2381 * the method is not virtual, we just need to ensure this is not null
2382 * and then we can call the method directly.
2384 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2386 * The check above ensures method is not gshared, this is needed since
2387 * gshared methods can't have wrappers.
2389 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2392 if (!method->string_ctor)
2393 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2395 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2397 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2399 return (MonoInst*)call;
2402 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2404 * the method is virtual, but we can statically dispatch since either
2405 * it's class or the method itself are sealed.
2406 * But first we need to ensure it's not a null reference.
2408 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2410 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2411 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2413 return (MonoInst*)call;
2416 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2418 vtable_reg = alloc_preg (cfg);
2419 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2420 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2422 #ifdef MONO_ARCH_HAVE_IMT
2424 guint32 imt_slot = mono_method_get_imt_slot (method);
2425 emit_imt_argument (cfg, call, imt_arg);
2426 slot_reg = vtable_reg;
2427 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2430 if (slot_reg == -1) {
2431 slot_reg = alloc_preg (cfg);
2432 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2433 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2436 slot_reg = vtable_reg;
2437 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2438 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2439 #ifdef MONO_ARCH_HAVE_IMT
2441 g_assert (mono_method_signature (method)->generic_param_count);
2442 emit_imt_argument (cfg, call, imt_arg);
2447 call->inst.sreg1 = slot_reg;
2448 call->virtual = TRUE;
2451 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2453 return (MonoInst*)call;
2457 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2458 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2465 rgctx_reg = mono_alloc_preg (cfg);
2466 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2468 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2470 call = (MonoCallInst*)ins;
2472 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2478 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2480 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2484 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2491 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2494 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2496 return (MonoInst*)call;
2500 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2502 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2506 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2510 * mono_emit_abs_call:
2512 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2514 inline static MonoInst*
2515 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2516 MonoMethodSignature *sig, MonoInst **args)
2518 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2522 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2525 if (cfg->abs_patches == NULL)
2526 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2527 g_hash_table_insert (cfg->abs_patches, ji, ji);
2528 ins = mono_emit_native_call (cfg, ji, sig, args);
2529 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2534 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2536 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2537 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2541 * Native code might return non register sized integers
2542 * without initializing the upper bits.
2544 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2545 case OP_LOADI1_MEMBASE:
2546 widen_op = OP_ICONV_TO_I1;
2548 case OP_LOADU1_MEMBASE:
2549 widen_op = OP_ICONV_TO_U1;
2551 case OP_LOADI2_MEMBASE:
2552 widen_op = OP_ICONV_TO_I2;
2554 case OP_LOADU2_MEMBASE:
2555 widen_op = OP_ICONV_TO_U2;
2561 if (widen_op != -1) {
2562 int dreg = alloc_preg (cfg);
2565 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2566 widen->type = ins->type;
2576 get_memcpy_method (void)
2578 static MonoMethod *memcpy_method = NULL;
2579 if (!memcpy_method) {
2580 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2582 g_error ("Old corlib found. Install a new one");
2584 return memcpy_method;
2588 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2590 MonoClassField *field;
2591 gpointer iter = NULL;
2593 while ((field = mono_class_get_fields (klass, &iter))) {
2596 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2598 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2599 if (mono_type_is_reference (field->type)) {
2600 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2601 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2603 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2604 MonoClass *field_class = mono_class_from_mono_type (field->type);
2605 if (field_class->has_references)
2606 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2612 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2614 int card_table_shift_bits;
2615 gpointer card_table_mask;
2617 MonoInst *dummy_use;
2618 int nursery_shift_bits;
2619 size_t nursery_size;
2620 gboolean has_card_table_wb = FALSE;
2622 if (!cfg->gen_write_barriers)
2625 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2627 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2629 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2630 has_card_table_wb = TRUE;
2633 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2636 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2637 wbarrier->sreg1 = ptr->dreg;
2639 wbarrier->sreg2 = value->dreg;
2641 wbarrier->sreg2 = value_reg;
2642 MONO_ADD_INS (cfg->cbb, wbarrier);
2643 } else if (card_table) {
2644 int offset_reg = alloc_preg (cfg);
2645 int card_reg = alloc_preg (cfg);
2648 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2649 if (card_table_mask)
2650 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2652 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2653 * IMM's larger than 32bits.
2655 if (cfg->compile_aot) {
2656 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2658 MONO_INST_NEW (cfg, ins, OP_PCONST);
2659 ins->inst_p0 = card_table;
2660 ins->dreg = card_reg;
2661 MONO_ADD_INS (cfg->cbb, ins);
2664 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2665 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2667 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2668 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2672 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2674 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2675 dummy_use->sreg1 = value_reg;
2676 MONO_ADD_INS (cfg->cbb, dummy_use);
2681 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2683 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2684 unsigned need_wb = 0;
2689 /*types with references can't have alignment smaller than sizeof(void*) */
2690 if (align < SIZEOF_VOID_P)
2693 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2694 if (size > 32 * SIZEOF_VOID_P)
2697 create_write_barrier_bitmap (klass, &need_wb, 0);
2699 /* We don't unroll more than 5 stores to avoid code bloat. */
2700 if (size > 5 * SIZEOF_VOID_P) {
2701 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2702 size += (SIZEOF_VOID_P - 1);
2703 size &= ~(SIZEOF_VOID_P - 1);
2705 EMIT_NEW_ICONST (cfg, iargs [2], size);
2706 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2707 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2711 destreg = iargs [0]->dreg;
2712 srcreg = iargs [1]->dreg;
2715 dest_ptr_reg = alloc_preg (cfg);
2716 tmp_reg = alloc_preg (cfg);
2719 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2721 while (size >= SIZEOF_VOID_P) {
2722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2723 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2726 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2728 offset += SIZEOF_VOID_P;
2729 size -= SIZEOF_VOID_P;
2732 /*tmp += sizeof (void*)*/
2733 if (size >= SIZEOF_VOID_P) {
2734 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2735 MONO_ADD_INS (cfg->cbb, iargs [0]);
2739 /* Those cannot be references since size < sizeof (void*) */
2741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2742 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2748 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2765 * Emit code to copy a valuetype of type @klass whose address is stored in
2766 * @src->dreg to memory whose address is stored at @dest->dreg.
2769 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2771 MonoInst *iargs [4];
2774 MonoMethod *memcpy_method;
2778 * This check breaks with spilled vars... need to handle it during verification anyway.
2779 * g_assert (klass && klass == src->klass && klass == dest->klass);
2783 n = mono_class_native_size (klass, &align);
2785 n = mono_class_value_size (klass, &align);
2787 /* if native is true there should be no references in the struct */
2788 if (cfg->gen_write_barriers && klass->has_references && !native) {
2789 /* Avoid barriers when storing to the stack */
2790 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2791 (dest->opcode == OP_LDADDR))) {
2792 int context_used = 0;
2797 if (cfg->generic_sharing_context)
2798 context_used = mono_class_check_context_used (klass);
2800 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2801 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2803 } else if (context_used) {
2804 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2806 if (cfg->compile_aot) {
2807 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2809 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2810 mono_class_compute_gc_descriptor (klass);
2814 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2819 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2820 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2821 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2825 EMIT_NEW_ICONST (cfg, iargs [2], n);
2827 memcpy_method = get_memcpy_method ();
2828 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2833 get_memset_method (void)
2835 static MonoMethod *memset_method = NULL;
2836 if (!memset_method) {
2837 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2839 g_error ("Old corlib found. Install a new one");
2841 return memset_method;
2845 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2847 MonoInst *iargs [3];
2850 MonoMethod *memset_method;
2852 /* FIXME: Optimize this for the case when dest is an LDADDR */
2854 mono_class_init (klass);
2855 n = mono_class_value_size (klass, &align);
2857 if (n <= sizeof (gpointer) * 5) {
2858 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2861 memset_method = get_memset_method ();
2863 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2864 EMIT_NEW_ICONST (cfg, iargs [2], n);
2865 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2870 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2872 MonoInst *this = NULL;
2874 g_assert (cfg->generic_sharing_context);
2876 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2877 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2878 !method->klass->valuetype)
2879 EMIT_NEW_ARGLOAD (cfg, this, 0);
2881 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2882 MonoInst *mrgctx_loc, *mrgctx_var;
2885 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2887 mrgctx_loc = mono_get_vtable_var (cfg);
2888 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2891 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2892 MonoInst *vtable_loc, *vtable_var;
2896 vtable_loc = mono_get_vtable_var (cfg);
2897 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2899 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2900 MonoInst *mrgctx_var = vtable_var;
2903 vtable_reg = alloc_preg (cfg);
2904 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2905 vtable_var->type = STACK_PTR;
2911 int vtable_reg, res_reg;
2913 vtable_reg = alloc_preg (cfg);
2914 res_reg = alloc_preg (cfg);
2915 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2920 static MonoJumpInfoRgctxEntry *
2921 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2923 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2924 res->method = method;
2925 res->in_mrgctx = in_mrgctx;
2926 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2927 res->data->type = patch_type;
2928 res->data->data.target = patch_data;
2929 res->info_type = info_type;
2934 static inline MonoInst*
2935 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2937 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2941 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2942 MonoClass *klass, int rgctx_type)
2944 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2945 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2947 return emit_rgctx_fetch (cfg, rgctx, entry);
2951 * emit_get_rgctx_method:
2953 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2954 * normal constants, else emit a load from the rgctx.
2957 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2958 MonoMethod *cmethod, int rgctx_type)
2960 if (!context_used) {
2963 switch (rgctx_type) {
2964 case MONO_RGCTX_INFO_METHOD:
2965 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2967 case MONO_RGCTX_INFO_METHOD_RGCTX:
2968 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2971 g_assert_not_reached ();
2974 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2975 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2977 return emit_rgctx_fetch (cfg, rgctx, entry);
2982 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2983 MonoClassField *field, int rgctx_type)
2985 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2986 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2988 return emit_rgctx_fetch (cfg, rgctx, entry);
2992 * On return the caller must check @klass for load errors.
2995 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2997 MonoInst *vtable_arg;
2999 int context_used = 0;
3001 if (cfg->generic_sharing_context)
3002 context_used = mono_class_check_context_used (klass);
3005 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3006 klass, MONO_RGCTX_INFO_VTABLE);
3008 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3012 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3015 if (COMPILE_LLVM (cfg))
3016 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3018 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3019 #ifdef MONO_ARCH_VTABLE_REG
3020 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3021 cfg->uses_vtable_reg = TRUE;
3028 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3030 if (mini_get_debug_options ()->better_cast_details) {
3031 int to_klass_reg = alloc_preg (cfg);
3032 int vtable_reg = alloc_preg (cfg);
3033 int klass_reg = alloc_preg (cfg);
3034 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3037 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3041 MONO_ADD_INS (cfg->cbb, tls_get);
3042 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3043 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3045 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3046 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3047 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3052 reset_cast_details (MonoCompile *cfg)
3054 /* Reset the variables holding the cast details */
3055 if (mini_get_debug_options ()->better_cast_details) {
3056 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3058 MONO_ADD_INS (cfg->cbb, tls_get);
3059 /* It is enough to reset the from field */
3060 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3065 * On return the caller must check @array_class for load errors
3068 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3070 int vtable_reg = alloc_preg (cfg);
3071 int context_used = 0;
3073 if (cfg->generic_sharing_context)
3074 context_used = mono_class_check_context_used (array_class);
3076 save_cast_details (cfg, array_class, obj->dreg);
3078 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3080 if (cfg->opt & MONO_OPT_SHARED) {
3081 int class_reg = alloc_preg (cfg);
3082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3083 if (cfg->compile_aot) {
3084 int klass_reg = alloc_preg (cfg);
3085 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3086 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3090 } else if (context_used) {
3091 MonoInst *vtable_ins;
3093 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3094 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3096 if (cfg->compile_aot) {
3100 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3102 vt_reg = alloc_preg (cfg);
3103 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3104 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3107 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3113 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3115 reset_cast_details (cfg);
3119 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3120 * generic code is generated.
3123 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3125 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3128 MonoInst *rgctx, *addr;
3130 /* FIXME: What if the class is shared? We might not
3131 have to get the address of the method from the
3133 addr = emit_get_rgctx_method (cfg, context_used, method,
3134 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3136 rgctx = emit_get_rgctx (cfg, method, context_used);
3138 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3140 return mono_emit_method_call (cfg, method, &val, NULL);
3145 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3149 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3150 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3151 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3152 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3154 obj_reg = sp [0]->dreg;
3155 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3156 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3158 /* FIXME: generics */
3159 g_assert (klass->rank == 0);
3162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3163 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3166 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3169 MonoInst *element_class;
3171 /* This assertion is from the unboxcast insn */
3172 g_assert (klass->rank == 0);
3174 element_class = emit_get_rgctx_klass (cfg, context_used,
3175 klass->element_class, MONO_RGCTX_INFO_KLASS);
3177 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3178 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3180 save_cast_details (cfg, klass->element_class, obj_reg);
3181 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3182 reset_cast_details (cfg);
3185 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3186 MONO_ADD_INS (cfg->cbb, add);
3187 add->type = STACK_MP;
3194 * Returns NULL and set the cfg exception on error.
3197 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3199 MonoInst *iargs [2];
3205 MonoInst *iargs [2];
3208 FIXME: we cannot get managed_alloc here because we can't get
3209 the class's vtable (because it's not a closed class)
3211 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3212 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3215 if (cfg->opt & MONO_OPT_SHARED)
3216 rgctx_info = MONO_RGCTX_INFO_KLASS;
3218 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3219 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3221 if (cfg->opt & MONO_OPT_SHARED) {
3222 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3224 alloc_ftn = mono_object_new;
3227 alloc_ftn = mono_object_new_specific;
3230 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3233 if (cfg->opt & MONO_OPT_SHARED) {
3234 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3235 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3237 alloc_ftn = mono_object_new;
3238 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3239 /* This happens often in argument checking code, eg. throw new FooException... */
3240 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3241 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3242 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3244 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3245 MonoMethod *managed_alloc = NULL;
3249 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3250 cfg->exception_ptr = klass;
3254 #ifndef MONO_CROSS_COMPILE
3255 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3258 if (managed_alloc) {
3259 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3260 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3262 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3264 guint32 lw = vtable->klass->instance_size;
3265 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3266 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3267 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3270 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3274 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3278 * Returns NULL and set the cfg exception on error.
3281 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3283 MonoInst *alloc, *ins;
3285 if (mono_class_is_nullable (klass)) {
3286 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3289 /* FIXME: What if the class is shared? We might not
3290 have to get the method address from the RGCTX. */
3291 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3292 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3293 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3295 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3297 return mono_emit_method_call (cfg, method, &val, NULL);
3301 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3305 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3312 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3315 MonoGenericContainer *container;
3316 MonoGenericInst *ginst;
3318 if (klass->generic_class) {
3319 container = klass->generic_class->container_class->generic_container;
3320 ginst = klass->generic_class->context.class_inst;
3321 } else if (klass->generic_container && context_used) {
3322 container = klass->generic_container;
3323 ginst = container->context.class_inst;
3328 for (i = 0; i < container->type_argc; ++i) {
3330 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3332 type = ginst->type_argv [i];
3333 if (MONO_TYPE_IS_REFERENCE (type))
3336 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3342 // FIXME: This doesn't work yet (class libs tests fail?)
3343 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3346 * Returns NULL and set the cfg exception on error.
3349 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3351 MonoBasicBlock *is_null_bb;
3352 int obj_reg = src->dreg;
3353 int vtable_reg = alloc_preg (cfg);
3354 MonoInst *klass_inst = NULL;
3359 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3360 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3361 MonoInst *cache_ins;
3363 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3368 /* klass - it's the second element of the cache entry*/
3369 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3372 args [2] = cache_ins;
3374 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3377 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3379 if (is_complex_isinst (klass)) {
3380 /* Complex case, handle by an icall */
3386 args [1] = klass_inst;
3388 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3390 /* Simple case, handled by the code below */
3394 NEW_BBLOCK (cfg, is_null_bb);
3396 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3397 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3399 save_cast_details (cfg, klass, obj_reg);
3401 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3402 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3403 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3405 int klass_reg = alloc_preg (cfg);
3407 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3409 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3410 /* the remoting code is broken, access the class for now */
3411 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3412 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3414 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3415 cfg->exception_ptr = klass;
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3421 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3423 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3425 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3426 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3430 MONO_START_BB (cfg, is_null_bb);
3432 reset_cast_details (cfg);
3438 * Returns NULL and set the cfg exception on error.
3441 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3444 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3445 int obj_reg = src->dreg;
3446 int vtable_reg = alloc_preg (cfg);
3447 int res_reg = alloc_preg (cfg);
3448 MonoInst *klass_inst = NULL;
3453 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3454 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3455 MonoInst *cache_ins;
3457 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3462 /* klass - it's the second element of the cache entry*/
3463 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3466 args [2] = cache_ins;
3468 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3471 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3473 if (is_complex_isinst (klass)) {
3474 /* Complex case, handle by an icall */
3480 args [1] = klass_inst;
3482 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3484 /* Simple case, the code below can handle it */
3488 NEW_BBLOCK (cfg, is_null_bb);
3489 NEW_BBLOCK (cfg, false_bb);
3490 NEW_BBLOCK (cfg, end_bb);
3492 /* Do the assignment at the beginning, so the other assignment can be if converted */
3493 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3494 ins->type = STACK_OBJ;
3497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3502 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3503 g_assert (!context_used);
3504 /* the is_null_bb target simply copies the input register to the output */
3505 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3507 int klass_reg = alloc_preg (cfg);
3510 int rank_reg = alloc_preg (cfg);
3511 int eclass_reg = alloc_preg (cfg);
3513 g_assert (!context_used);
3514 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3516 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3517 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3519 if (klass->cast_class == mono_defaults.object_class) {
3520 int parent_reg = alloc_preg (cfg);
3521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3522 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3523 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3524 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3525 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3526 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3527 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3528 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3529 } else if (klass->cast_class == mono_defaults.enum_class) {
3530 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3531 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3532 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3533 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3535 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3536 /* Check that the object is a vector too */
3537 int bounds_reg = alloc_preg (cfg);
3538 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3539 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3540 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3543 /* the is_null_bb target simply copies the input register to the output */
3544 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3546 } else if (mono_class_is_nullable (klass)) {
3547 g_assert (!context_used);
3548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3549 /* the is_null_bb target simply copies the input register to the output */
3550 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3552 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3553 g_assert (!context_used);
3554 /* the remoting code is broken, access the class for now */
3555 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3556 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3558 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3559 cfg->exception_ptr = klass;
3562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3564 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3570 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3571 /* the is_null_bb target simply copies the input register to the output */
3572 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3577 MONO_START_BB (cfg, false_bb);
3579 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3582 MONO_START_BB (cfg, is_null_bb);
3584 MONO_START_BB (cfg, end_bb);
3590 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3592 /* This opcode takes as input an object reference and a class, and returns:
3593 0) if the object is an instance of the class,
3594 1) if the object is not instance of the class,
3595 2) if the object is a proxy whose type cannot be determined */
3598 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3599 int obj_reg = src->dreg;
3600 int dreg = alloc_ireg (cfg);
3602 int klass_reg = alloc_preg (cfg);
3604 NEW_BBLOCK (cfg, true_bb);
3605 NEW_BBLOCK (cfg, false_bb);
3606 NEW_BBLOCK (cfg, false2_bb);
3607 NEW_BBLOCK (cfg, end_bb);
3608 NEW_BBLOCK (cfg, no_proxy_bb);
3610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3611 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3613 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3614 NEW_BBLOCK (cfg, interface_fail_bb);
3616 tmp_reg = alloc_preg (cfg);
3617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3618 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3619 MONO_START_BB (cfg, interface_fail_bb);
3620 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3622 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3624 tmp_reg = alloc_preg (cfg);
3625 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3626 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3627 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3629 tmp_reg = alloc_preg (cfg);
3630 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3631 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3633 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3634 tmp_reg = alloc_preg (cfg);
3635 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3638 tmp_reg = alloc_preg (cfg);
3639 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3643 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3646 MONO_START_BB (cfg, no_proxy_bb);
3648 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3651 MONO_START_BB (cfg, false_bb);
3653 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3654 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3656 MONO_START_BB (cfg, false2_bb);
3658 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3661 MONO_START_BB (cfg, true_bb);
3663 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3665 MONO_START_BB (cfg, end_bb);
3668 MONO_INST_NEW (cfg, ins, OP_ICONST);
3670 ins->type = STACK_I4;
3676 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3678 /* This opcode takes as input an object reference and a class, and returns:
3679 0) if the object is an instance of the class,
3680 1) if the object is a proxy whose type cannot be determined
3681 an InvalidCastException exception is thrown otherwhise*/
3684 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3685 int obj_reg = src->dreg;
3686 int dreg = alloc_ireg (cfg);
3687 int tmp_reg = alloc_preg (cfg);
3688 int klass_reg = alloc_preg (cfg);
3690 NEW_BBLOCK (cfg, end_bb);
3691 NEW_BBLOCK (cfg, ok_result_bb);
3693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3694 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3696 save_cast_details (cfg, klass, obj_reg);
3698 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3699 NEW_BBLOCK (cfg, interface_fail_bb);
3701 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3702 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3703 MONO_START_BB (cfg, interface_fail_bb);
3704 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3706 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3708 tmp_reg = alloc_preg (cfg);
3709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3710 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3711 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3713 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3717 NEW_BBLOCK (cfg, no_proxy_bb);
3719 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3720 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3721 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3723 tmp_reg = alloc_preg (cfg);
3724 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3725 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3727 tmp_reg = alloc_preg (cfg);
3728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3730 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3732 NEW_BBLOCK (cfg, fail_1_bb);
3734 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3736 MONO_START_BB (cfg, fail_1_bb);
3738 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3739 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3741 MONO_START_BB (cfg, no_proxy_bb);
3743 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3746 MONO_START_BB (cfg, ok_result_bb);
3748 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3750 MONO_START_BB (cfg, end_bb);
3753 MONO_INST_NEW (cfg, ins, OP_ICONST);
3755 ins->type = STACK_I4;
3761 * Returns NULL and set the cfg exception on error.
3763 static G_GNUC_UNUSED MonoInst*
3764 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3768 gpointer *trampoline;
3769 MonoInst *obj, *method_ins, *tramp_ins;
3773 obj = handle_alloc (cfg, klass, FALSE, 0);
3777 /* Inline the contents of mono_delegate_ctor */
3779 /* Set target field */
3780 /* Optimize away setting of NULL target */
3781 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3782 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3783 if (cfg->gen_write_barriers) {
3784 dreg = alloc_preg (cfg);
3785 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3786 emit_write_barrier (cfg, ptr, target, 0);
3790 /* Set method field */
3791 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3792 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3793 if (cfg->gen_write_barriers) {
3794 dreg = alloc_preg (cfg);
3795 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3796 emit_write_barrier (cfg, ptr, method_ins, 0);
3799 * To avoid looking up the compiled code belonging to the target method
3800 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3801 * store it, and we fill it after the method has been compiled.
3803 if (!cfg->compile_aot && !method->dynamic) {
3804 MonoInst *code_slot_ins;
3807 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3809 domain = mono_domain_get ();
3810 mono_domain_lock (domain);
3811 if (!domain_jit_info (domain)->method_code_hash)
3812 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3813 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3815 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3816 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3818 mono_domain_unlock (domain);
3820 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3825 /* Set invoke_impl field */
3826 if (cfg->compile_aot) {
3827 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3829 trampoline = mono_create_delegate_trampoline (klass);
3830 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3832 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3834 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3840 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3842 MonoJitICallInfo *info;
3844 /* Need to register the icall so it gets an icall wrapper */
3845 info = mono_get_array_new_va_icall (rank);
3847 cfg->flags |= MONO_CFG_HAS_VARARGS;
3849 /* mono_array_new_va () needs a vararg calling convention */
3850 cfg->disable_llvm = TRUE;
3852 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3853 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3857 mono_emit_load_got_addr (MonoCompile *cfg)
3859 MonoInst *getaddr, *dummy_use;
3861 if (!cfg->got_var || cfg->got_var_allocated)
3864 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3865 getaddr->dreg = cfg->got_var->dreg;
3867 /* Add it to the start of the first bblock */
3868 if (cfg->bb_entry->code) {
3869 getaddr->next = cfg->bb_entry->code;
3870 cfg->bb_entry->code = getaddr;
3873 MONO_ADD_INS (cfg->bb_entry, getaddr);
3875 cfg->got_var_allocated = TRUE;
3878 * Add a dummy use to keep the got_var alive, since real uses might
3879 * only be generated by the back ends.
3880 * Add it to end_bblock, so the variable's lifetime covers the whole
3882 * It would be better to make the usage of the got var explicit in all
3883 * cases when the backend needs it (i.e. calls, throw etc.), so this
3884 * wouldn't be needed.
3886 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3887 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3890 static int inline_limit;
3891 static gboolean inline_limit_inited;
3894 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3896 MonoMethodHeaderSummary header;
3898 #ifdef MONO_ARCH_SOFT_FLOAT
3899 MonoMethodSignature *sig = mono_method_signature (method);
3903 if (cfg->generic_sharing_context)
3906 if (cfg->inline_depth > 10)
3909 #ifdef MONO_ARCH_HAVE_LMF_OPS
3910 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3911 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3912 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3917 if (!mono_method_get_header_summary (method, &header))
3920 /*runtime, icall and pinvoke are checked by summary call*/
3921 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3922 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3923 (method->klass->marshalbyref) ||
3927 /* also consider num_locals? */
3928 /* Do the size check early to avoid creating vtables */
3929 if (!inline_limit_inited) {
3930 if (getenv ("MONO_INLINELIMIT"))
3931 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3933 inline_limit = INLINE_LENGTH_LIMIT;
3934 inline_limit_inited = TRUE;
3936 if (header.code_size >= inline_limit)
3940 * if we can initialize the class of the method right away, we do,
3941 * otherwise we don't allow inlining if the class needs initialization,
3942 * since it would mean inserting a call to mono_runtime_class_init()
3943 * inside the inlined code
3945 if (!(cfg->opt & MONO_OPT_SHARED)) {
3946 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3947 if (cfg->run_cctors && method->klass->has_cctor) {
3948 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3949 if (!method->klass->runtime_info)
3950 /* No vtable created yet */
3952 vtable = mono_class_vtable (cfg->domain, method->klass);
3955 /* This makes so that inline cannot trigger */
3956 /* .cctors: too many apps depend on them */
3957 /* running with a specific order... */
3958 if (! vtable->initialized)
3960 mono_runtime_class_init (vtable);
3962 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3963 if (!method->klass->runtime_info)
3964 /* No vtable created yet */
3966 vtable = mono_class_vtable (cfg->domain, method->klass);
3969 if (!vtable->initialized)
3974 * If we're compiling for shared code
3975 * the cctor will need to be run at aot method load time, for example,
3976 * or at the end of the compilation of the inlining method.
3978 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3983 * CAS - do not inline methods with declarative security
3984 * Note: this has to be before any possible return TRUE;
3986 if (mono_method_has_declsec (method))
3989 #ifdef MONO_ARCH_SOFT_FLOAT
3991 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3993 for (i = 0; i < sig->param_count; ++i)
3994 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4002 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4004 if (vtable->initialized && !cfg->compile_aot)
4007 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4010 if (!mono_class_needs_cctor_run (vtable->klass, method))
4013 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4014 /* The initialization is already done before the method is called */
4021 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4025 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4027 mono_class_init (klass);
4028 size = mono_class_array_element_size (klass);
4030 mult_reg = alloc_preg (cfg);
4031 array_reg = arr->dreg;
4032 index_reg = index->dreg;
4034 #if SIZEOF_REGISTER == 8
4035 /* The array reg is 64 bits but the index reg is only 32 */
4036 if (COMPILE_LLVM (cfg)) {
4038 index2_reg = index_reg;
4040 index2_reg = alloc_preg (cfg);
4041 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4044 if (index->type == STACK_I8) {
4045 index2_reg = alloc_preg (cfg);
4046 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4048 index2_reg = index_reg;
4053 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4055 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4056 if (size == 1 || size == 2 || size == 4 || size == 8) {
4057 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4059 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4060 ins->type = STACK_PTR;
4066 add_reg = alloc_preg (cfg);
4068 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4069 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4070 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4071 ins->type = STACK_PTR;
4072 MONO_ADD_INS (cfg->cbb, ins);
4077 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4079 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4081 int bounds_reg = alloc_preg (cfg);
4082 int add_reg = alloc_preg (cfg);
4083 int mult_reg = alloc_preg (cfg);
4084 int mult2_reg = alloc_preg (cfg);
4085 int low1_reg = alloc_preg (cfg);
4086 int low2_reg = alloc_preg (cfg);
4087 int high1_reg = alloc_preg (cfg);
4088 int high2_reg = alloc_preg (cfg);
4089 int realidx1_reg = alloc_preg (cfg);
4090 int realidx2_reg = alloc_preg (cfg);
4091 int sum_reg = alloc_preg (cfg);
4096 mono_class_init (klass);
4097 size = mono_class_array_element_size (klass);
4099 index1 = index_ins1->dreg;
4100 index2 = index_ins2->dreg;
4102 /* range checking */
4103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4104 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4106 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4107 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4108 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4109 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4110 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4111 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4112 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4114 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4115 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4116 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4117 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4118 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4119 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4120 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4122 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4123 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4124 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4125 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4126 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4128 ins->type = STACK_MP;
4130 MONO_ADD_INS (cfg->cbb, ins);
4137 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4141 MonoMethod *addr_method;
4144 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4147 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4149 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4150 /* emit_ldelema_2 depends on OP_LMUL */
4151 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4152 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4156 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4157 addr_method = mono_marshal_get_array_address (rank, element_size);
4158 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4163 static MonoBreakPolicy
4164 always_insert_breakpoint (MonoMethod *method)
4166 return MONO_BREAK_POLICY_ALWAYS;
4169 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4172 * mono_set_break_policy:
4173 * policy_callback: the new callback function
4175 * Allow embedders to decide wherther to actually obey breakpoint instructions
4176 * (both break IL instructions and Debugger.Break () method calls), for example
4177 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4178 * untrusted or semi-trusted code.
4180 * @policy_callback will be called every time a break point instruction needs to
4181 * be inserted with the method argument being the method that calls Debugger.Break()
4182 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4183 * if it wants the breakpoint to not be effective in the given method.
4184 * #MONO_BREAK_POLICY_ALWAYS is the default.
4187 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4189 if (policy_callback)
4190 break_policy_func = policy_callback;
4192 break_policy_func = always_insert_breakpoint;
4196 should_insert_brekpoint (MonoMethod *method) {
4197 switch (break_policy_func (method)) {
4198 case MONO_BREAK_POLICY_ALWAYS:
4200 case MONO_BREAK_POLICY_NEVER:
4202 case MONO_BREAK_POLICY_ON_DBG:
4203 return mono_debug_using_mono_debugger ();
4205 g_warning ("Incorrect value returned from break policy callback");
4210 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4212 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4214 MonoInst *addr, *store, *load;
4215 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4217 /* the bounds check is already done by the callers */
4218 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4220 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4221 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4223 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4224 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4230 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4232 MonoInst *ins = NULL;
4233 #ifdef MONO_ARCH_SIMD_INTRINSICS
4234 if (cfg->opt & MONO_OPT_SIMD) {
4235 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4245 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4247 MonoInst *ins = NULL;
4249 static MonoClass *runtime_helpers_class = NULL;
4250 if (! runtime_helpers_class)
4251 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4252 "System.Runtime.CompilerServices", "RuntimeHelpers");
4254 if (cmethod->klass == mono_defaults.string_class) {
4255 if (strcmp (cmethod->name, "get_Chars") == 0) {
4256 int dreg = alloc_ireg (cfg);
4257 int index_reg = alloc_preg (cfg);
4258 int mult_reg = alloc_preg (cfg);
4259 int add_reg = alloc_preg (cfg);
4261 #if SIZEOF_REGISTER == 8
4262 /* The array reg is 64 bits but the index reg is only 32 */
4263 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4265 index_reg = args [1]->dreg;
4267 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4269 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4270 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4271 add_reg = ins->dreg;
4272 /* Avoid a warning */
4274 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4277 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4278 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4279 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4280 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4282 type_from_op (ins, NULL, NULL);
4284 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4285 int dreg = alloc_ireg (cfg);
4286 /* Decompose later to allow more optimizations */
4287 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4288 ins->type = STACK_I4;
4289 ins->flags |= MONO_INST_FAULT;
4290 cfg->cbb->has_array_access = TRUE;
4291 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4294 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4295 int mult_reg = alloc_preg (cfg);
4296 int add_reg = alloc_preg (cfg);
4298 /* The corlib functions check for oob already. */
4299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4300 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4301 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4302 return cfg->cbb->last_ins;
4305 } else if (cmethod->klass == mono_defaults.object_class) {
4307 if (strcmp (cmethod->name, "GetType") == 0) {
4308 int dreg = alloc_preg (cfg);
4309 int vt_reg = alloc_preg (cfg);
4310 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4311 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4312 type_from_op (ins, NULL, NULL);
4315 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4316 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4317 int dreg = alloc_ireg (cfg);
4318 int t1 = alloc_ireg (cfg);
4320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4321 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4322 ins->type = STACK_I4;
4326 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4327 MONO_INST_NEW (cfg, ins, OP_NOP);
4328 MONO_ADD_INS (cfg->cbb, ins);
4332 } else if (cmethod->klass == mono_defaults.array_class) {
4333 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4334 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4336 #ifndef MONO_BIG_ARRAYS
4338 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4341 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4342 int dreg = alloc_ireg (cfg);
4343 int bounds_reg = alloc_ireg (cfg);
4344 MonoBasicBlock *end_bb, *szarray_bb;
4345 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4347 NEW_BBLOCK (cfg, end_bb);
4348 NEW_BBLOCK (cfg, szarray_bb);
4350 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4351 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4354 /* Non-szarray case */
4356 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4357 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4359 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4360 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4362 MONO_START_BB (cfg, szarray_bb);
4365 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4366 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4368 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4369 MONO_START_BB (cfg, end_bb);
4371 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4372 ins->type = STACK_I4;
4378 if (cmethod->name [0] != 'g')
4381 if (strcmp (cmethod->name, "get_Rank") == 0) {
4382 int dreg = alloc_ireg (cfg);
4383 int vtable_reg = alloc_preg (cfg);
4384 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4385 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4386 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4387 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4388 type_from_op (ins, NULL, NULL);
4391 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4392 int dreg = alloc_ireg (cfg);
4394 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4395 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4396 type_from_op (ins, NULL, NULL);
4401 } else if (cmethod->klass == runtime_helpers_class) {
4403 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4404 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4408 } else if (cmethod->klass == mono_defaults.thread_class) {
4409 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4410 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4411 MONO_ADD_INS (cfg->cbb, ins);
4413 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4414 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4415 MONO_ADD_INS (cfg->cbb, ins);
4418 } else if (cmethod->klass == mono_defaults.monitor_class) {
4419 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4420 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4423 if (COMPILE_LLVM (cfg)) {
4425 * Pass the argument normally, the LLVM backend will handle the
4426 * calling convention problems.
4428 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4430 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4431 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4432 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4433 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4436 return (MonoInst*)call;
4437 } else if (strcmp (cmethod->name, "Exit") == 0) {
4440 if (COMPILE_LLVM (cfg)) {
4441 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4443 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4444 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4445 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4446 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4449 return (MonoInst*)call;
4451 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4452 MonoMethod *fast_method = NULL;
4454 /* Avoid infinite recursion */
4455 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4456 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4457 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4460 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4461 strcmp (cmethod->name, "Exit") == 0)
4462 fast_method = mono_monitor_get_fast_path (cmethod);
4466 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4468 } else if (cmethod->klass->image == mono_defaults.corlib &&
4469 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4470 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4473 #if SIZEOF_REGISTER == 8
4474 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4475 /* 64 bit reads are already atomic */
4476 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4477 ins->dreg = mono_alloc_preg (cfg);
4478 ins->inst_basereg = args [0]->dreg;
4479 ins->inst_offset = 0;
4480 MONO_ADD_INS (cfg->cbb, ins);
4484 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4485 if (strcmp (cmethod->name, "Increment") == 0) {
4486 MonoInst *ins_iconst;
4489 if (fsig->params [0]->type == MONO_TYPE_I4)
4490 opcode = OP_ATOMIC_ADD_NEW_I4;
4491 #if SIZEOF_REGISTER == 8
4492 else if (fsig->params [0]->type == MONO_TYPE_I8)
4493 opcode = OP_ATOMIC_ADD_NEW_I8;
4496 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4497 ins_iconst->inst_c0 = 1;
4498 ins_iconst->dreg = mono_alloc_ireg (cfg);
4499 MONO_ADD_INS (cfg->cbb, ins_iconst);
4501 MONO_INST_NEW (cfg, ins, opcode);
4502 ins->dreg = mono_alloc_ireg (cfg);
4503 ins->inst_basereg = args [0]->dreg;
4504 ins->inst_offset = 0;
4505 ins->sreg2 = ins_iconst->dreg;
4506 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4507 MONO_ADD_INS (cfg->cbb, ins);
4509 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4510 MonoInst *ins_iconst;
4513 if (fsig->params [0]->type == MONO_TYPE_I4)
4514 opcode = OP_ATOMIC_ADD_NEW_I4;
4515 #if SIZEOF_REGISTER == 8
4516 else if (fsig->params [0]->type == MONO_TYPE_I8)
4517 opcode = OP_ATOMIC_ADD_NEW_I8;
4520 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4521 ins_iconst->inst_c0 = -1;
4522 ins_iconst->dreg = mono_alloc_ireg (cfg);
4523 MONO_ADD_INS (cfg->cbb, ins_iconst);
4525 MONO_INST_NEW (cfg, ins, opcode);
4526 ins->dreg = mono_alloc_ireg (cfg);
4527 ins->inst_basereg = args [0]->dreg;
4528 ins->inst_offset = 0;
4529 ins->sreg2 = ins_iconst->dreg;
4530 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4531 MONO_ADD_INS (cfg->cbb, ins);
4533 } else if (strcmp (cmethod->name, "Add") == 0) {
4536 if (fsig->params [0]->type == MONO_TYPE_I4)
4537 opcode = OP_ATOMIC_ADD_NEW_I4;
4538 #if SIZEOF_REGISTER == 8
4539 else if (fsig->params [0]->type == MONO_TYPE_I8)
4540 opcode = OP_ATOMIC_ADD_NEW_I8;
4544 MONO_INST_NEW (cfg, ins, opcode);
4545 ins->dreg = mono_alloc_ireg (cfg);
4546 ins->inst_basereg = args [0]->dreg;
4547 ins->inst_offset = 0;
4548 ins->sreg2 = args [1]->dreg;
4549 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4550 MONO_ADD_INS (cfg->cbb, ins);
4553 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4555 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4556 if (strcmp (cmethod->name, "Exchange") == 0) {
4558 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4560 if (fsig->params [0]->type == MONO_TYPE_I4)
4561 opcode = OP_ATOMIC_EXCHANGE_I4;
4562 #if SIZEOF_REGISTER == 8
4563 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4564 (fsig->params [0]->type == MONO_TYPE_I))
4565 opcode = OP_ATOMIC_EXCHANGE_I8;
4567 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4568 opcode = OP_ATOMIC_EXCHANGE_I4;
4573 MONO_INST_NEW (cfg, ins, opcode);
4574 ins->dreg = mono_alloc_ireg (cfg);
4575 ins->inst_basereg = args [0]->dreg;
4576 ins->inst_offset = 0;
4577 ins->sreg2 = args [1]->dreg;
4578 MONO_ADD_INS (cfg->cbb, ins);
4580 switch (fsig->params [0]->type) {
4582 ins->type = STACK_I4;
4586 ins->type = STACK_I8;
4588 case MONO_TYPE_OBJECT:
4589 ins->type = STACK_OBJ;
4592 g_assert_not_reached ();
4595 if (cfg->gen_write_barriers && is_ref)
4596 emit_write_barrier (cfg, args [0], args [1], -1);
4598 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4600 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4601 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4603 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4604 if (fsig->params [1]->type == MONO_TYPE_I4)
4606 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4607 size = sizeof (gpointer);
4608 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4611 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4612 ins->dreg = alloc_ireg (cfg);
4613 ins->sreg1 = args [0]->dreg;
4614 ins->sreg2 = args [1]->dreg;
4615 ins->sreg3 = args [2]->dreg;
4616 ins->type = STACK_I4;
4617 MONO_ADD_INS (cfg->cbb, ins);
4618 } else if (size == 8) {
4619 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4620 ins->dreg = alloc_ireg (cfg);
4621 ins->sreg1 = args [0]->dreg;
4622 ins->sreg2 = args [1]->dreg;
4623 ins->sreg3 = args [2]->dreg;
4624 ins->type = STACK_I8;
4625 MONO_ADD_INS (cfg->cbb, ins);
4627 /* g_assert_not_reached (); */
4629 if (cfg->gen_write_barriers && is_ref)
4630 emit_write_barrier (cfg, args [0], args [1], -1);
4632 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4636 } else if (cmethod->klass->image == mono_defaults.corlib) {
4637 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4638 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4639 if (should_insert_brekpoint (cfg->method))
4640 MONO_INST_NEW (cfg, ins, OP_BREAK);
4642 MONO_INST_NEW (cfg, ins, OP_NOP);
4643 MONO_ADD_INS (cfg->cbb, ins);
4646 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4647 && strcmp (cmethod->klass->name, "Environment") == 0) {
4649 EMIT_NEW_ICONST (cfg, ins, 1);
4651 EMIT_NEW_ICONST (cfg, ins, 0);
4655 } else if (cmethod->klass == mono_defaults.math_class) {
4657 * There is general branches code for Min/Max, but it does not work for
4659 * http://everything2.com/?node_id=1051618
4663 #ifdef MONO_ARCH_SIMD_INTRINSICS
4664 if (cfg->opt & MONO_OPT_SIMD) {
4665 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4671 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4675 * This entry point could be used later for arbitrary method
4678 inline static MonoInst*
4679 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4680 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4682 if (method->klass == mono_defaults.string_class) {
4683 /* managed string allocation support */
4684 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4685 MonoInst *iargs [2];
4686 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4687 MonoMethod *managed_alloc = NULL;
4689 g_assert (vtable); /*Should not fail since it System.String*/
4690 #ifndef MONO_CROSS_COMPILE
4691 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4695 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4696 iargs [1] = args [0];
4697 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4704 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4706 MonoInst *store, *temp;
4709 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4710 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4713 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4714 * would be different than the MonoInst's used to represent arguments, and
4715 * the ldelema implementation can't deal with that.
4716 * Solution: When ldelema is used on an inline argument, create a var for
4717 * it, emit ldelema on that var, and emit the saving code below in
4718 * inline_method () if needed.
4720 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4721 cfg->args [i] = temp;
4722 /* This uses cfg->args [i] which is set by the preceeding line */
4723 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4724 store->cil_code = sp [0]->cil_code;
4729 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4730 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4732 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4734 check_inline_called_method_name_limit (MonoMethod *called_method)
4737 static char *limit = NULL;
4739 if (limit == NULL) {
4740 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4742 if (limit_string != NULL)
4743 limit = limit_string;
4745 limit = (char *) "";
4748 if (limit [0] != '\0') {
4749 char *called_method_name = mono_method_full_name (called_method, TRUE);
4751 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4752 g_free (called_method_name);
4754 //return (strncmp_result <= 0);
4755 return (strncmp_result == 0);
4762 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4764 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4767 static char *limit = NULL;
4769 if (limit == NULL) {
4770 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4771 if (limit_string != NULL) {
4772 limit = limit_string;
4774 limit = (char *) "";
4778 if (limit [0] != '\0') {
4779 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4781 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4782 g_free (caller_method_name);
4784 //return (strncmp_result <= 0);
4785 return (strncmp_result == 0);
4793 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4794 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4796 MonoInst *ins, *rvar = NULL;
4797 MonoMethodHeader *cheader;
4798 MonoBasicBlock *ebblock, *sbblock;
4800 MonoMethod *prev_inlined_method;
4801 MonoInst **prev_locals, **prev_args;
4802 MonoType **prev_arg_types;
4803 guint prev_real_offset;
4804 GHashTable *prev_cbb_hash;
4805 MonoBasicBlock **prev_cil_offset_to_bb;
4806 MonoBasicBlock *prev_cbb;
4807 unsigned char* prev_cil_start;
4808 guint32 prev_cil_offset_to_bb_len;
4809 MonoMethod *prev_current_method;
4810 MonoGenericContext *prev_generic_context;
4811 gboolean ret_var_set, prev_ret_var_set;
4813 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4815 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4816 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4819 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4820 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4824 if (cfg->verbose_level > 2)
4825 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4827 if (!cmethod->inline_info) {
4828 mono_jit_stats.inlineable_methods++;
4829 cmethod->inline_info = 1;
4832 /* allocate local variables */
4833 cheader = mono_method_get_header (cmethod);
4835 if (cheader == NULL || mono_loader_get_last_error ()) {
4836 MonoLoaderError *error = mono_loader_get_last_error ();
4839 mono_metadata_free_mh (cheader);
4840 if (inline_always && error)
4841 mono_cfg_set_exception (cfg, error->exception_type);
4843 mono_loader_clear_error ();
4847 /*Must verify before creating locals as it can cause the JIT to assert.*/
4848 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4849 mono_metadata_free_mh (cheader);
4853 /* allocate space to store the return value */
4854 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4855 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4859 prev_locals = cfg->locals;
4860 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4861 for (i = 0; i < cheader->num_locals; ++i)
4862 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4864 /* allocate start and end blocks */
4865 /* This is needed so if the inline is aborted, we can clean up */
4866 NEW_BBLOCK (cfg, sbblock);
4867 sbblock->real_offset = real_offset;
4869 NEW_BBLOCK (cfg, ebblock);
4870 ebblock->block_num = cfg->num_bblocks++;
4871 ebblock->real_offset = real_offset;
4873 prev_args = cfg->args;
4874 prev_arg_types = cfg->arg_types;
4875 prev_inlined_method = cfg->inlined_method;
4876 cfg->inlined_method = cmethod;
4877 cfg->ret_var_set = FALSE;
4878 cfg->inline_depth ++;
4879 prev_real_offset = cfg->real_offset;
4880 prev_cbb_hash = cfg->cbb_hash;
4881 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4882 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4883 prev_cil_start = cfg->cil_start;
4884 prev_cbb = cfg->cbb;
4885 prev_current_method = cfg->current_method;
4886 prev_generic_context = cfg->generic_context;
4887 prev_ret_var_set = cfg->ret_var_set;
4889 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4891 ret_var_set = cfg->ret_var_set;
4893 cfg->inlined_method = prev_inlined_method;
4894 cfg->real_offset = prev_real_offset;
4895 cfg->cbb_hash = prev_cbb_hash;
4896 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4897 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4898 cfg->cil_start = prev_cil_start;
4899 cfg->locals = prev_locals;
4900 cfg->args = prev_args;
4901 cfg->arg_types = prev_arg_types;
4902 cfg->current_method = prev_current_method;
4903 cfg->generic_context = prev_generic_context;
4904 cfg->ret_var_set = prev_ret_var_set;
4905 cfg->inline_depth --;
4907 if ((costs >= 0 && costs < 60) || inline_always) {
4908 if (cfg->verbose_level > 2)
4909 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4911 mono_jit_stats.inlined_methods++;
4913 /* always add some code to avoid block split failures */
4914 MONO_INST_NEW (cfg, ins, OP_NOP);
4915 MONO_ADD_INS (prev_cbb, ins);
4917 prev_cbb->next_bb = sbblock;
4918 link_bblock (cfg, prev_cbb, sbblock);
4921 * Get rid of the begin and end bblocks if possible to aid local
4924 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4926 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4927 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4929 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4930 MonoBasicBlock *prev = ebblock->in_bb [0];
4931 mono_merge_basic_blocks (cfg, prev, ebblock);
4933 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4934 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4935 cfg->cbb = prev_cbb;
4943 * If the inlined method contains only a throw, then the ret var is not
4944 * set, so set it to a dummy value.
4947 static double r8_0 = 0.0;
4949 switch (rvar->type) {
4951 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4954 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4959 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4962 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4963 ins->type = STACK_R8;
4964 ins->inst_p0 = (void*)&r8_0;
4965 ins->dreg = rvar->dreg;
4966 MONO_ADD_INS (cfg->cbb, ins);
4969 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4972 g_assert_not_reached ();
4976 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4979 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4982 if (cfg->verbose_level > 2)
4983 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4984 cfg->exception_type = MONO_EXCEPTION_NONE;
4985 mono_loader_clear_error ();
4987 /* This gets rid of the newly added bblocks */
4988 cfg->cbb = prev_cbb;
4990 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4995 * Some of these comments may well be out-of-date.
4996 * Design decisions: we do a single pass over the IL code (and we do bblock
4997 * splitting/merging in the few cases when it's required: a back jump to an IL
4998 * address that was not already seen as bblock starting point).
4999 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5000 * Complex operations are decomposed in simpler ones right away. We need to let the
5001 * arch-specific code peek and poke inside this process somehow (except when the
5002 * optimizations can take advantage of the full semantic info of coarse opcodes).
5003 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5004 * MonoInst->opcode initially is the IL opcode or some simplification of that
5005 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5006 * opcode with value bigger than OP_LAST.
5007 * At this point the IR can be handed over to an interpreter, a dumb code generator
5008 * or to the optimizing code generator that will translate it to SSA form.
5010 * Profiling directed optimizations.
5011 * We may compile by default with few or no optimizations and instrument the code
5012 * or the user may indicate what methods to optimize the most either in a config file
5013 * or through repeated runs where the compiler applies offline the optimizations to
5014 * each method and then decides if it was worth it.
5017 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5018 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5019 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5020 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5021 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5022 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5023 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5024 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5026 /* offset from br.s -> br like opcodes */
5027 #define BIG_BRANCH_OFFSET 13
5030 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5032 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5034 return b == NULL || b == bb;
5038 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5040 unsigned char *ip = start;
5041 unsigned char *target;
5044 MonoBasicBlock *bblock;
5045 const MonoOpcode *opcode;
5048 cli_addr = ip - start;
5049 i = mono_opcode_value ((const guint8 **)&ip, end);
5052 opcode = &mono_opcodes [i];
5053 switch (opcode->argument) {
5054 case MonoInlineNone:
5057 case MonoInlineString:
5058 case MonoInlineType:
5059 case MonoInlineField:
5060 case MonoInlineMethod:
5063 case MonoShortInlineR:
5070 case MonoShortInlineVar:
5071 case MonoShortInlineI:
5074 case MonoShortInlineBrTarget:
5075 target = start + cli_addr + 2 + (signed char)ip [1];
5076 GET_BBLOCK (cfg, bblock, target);
5079 GET_BBLOCK (cfg, bblock, ip);
5081 case MonoInlineBrTarget:
5082 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5083 GET_BBLOCK (cfg, bblock, target);
5086 GET_BBLOCK (cfg, bblock, ip);
5088 case MonoInlineSwitch: {
5089 guint32 n = read32 (ip + 1);
5092 cli_addr += 5 + 4 * n;
5093 target = start + cli_addr;
5094 GET_BBLOCK (cfg, bblock, target);
5096 for (j = 0; j < n; ++j) {
5097 target = start + cli_addr + (gint32)read32 (ip);
5098 GET_BBLOCK (cfg, bblock, target);
5108 g_assert_not_reached ();
5111 if (i == CEE_THROW) {
5112 unsigned char *bb_start = ip - 1;
5114 /* Find the start of the bblock containing the throw */
5116 while ((bb_start >= start) && !bblock) {
5117 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5121 bblock->out_of_line = 1;
5130 static inline MonoMethod *
5131 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5135 if (m->wrapper_type != MONO_WRAPPER_NONE)
5136 return mono_method_get_wrapper_data (m, token);
5138 method = mono_get_method_full (m->klass->image, token, klass, context);
5143 static inline MonoMethod *
5144 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5146 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5148 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5154 static inline MonoClass*
5155 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5159 if (method->wrapper_type != MONO_WRAPPER_NONE)
5160 klass = mono_method_get_wrapper_data (method, token);
5162 klass = mono_class_get_full (method->klass->image, token, context);
5164 mono_class_init (klass);
5169 * Returns TRUE if the JIT should abort inlining because "callee"
5170 * is influenced by security attributes.
5173 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5177 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5181 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5182 if (result == MONO_JIT_SECURITY_OK)
5185 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5186 /* Generate code to throw a SecurityException before the actual call/link */
5187 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5190 NEW_ICONST (cfg, args [0], 4);
5191 NEW_METHODCONST (cfg, args [1], caller);
5192 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5193 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5194 /* don't hide previous results */
5195 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5196 cfg->exception_data = result;
5204 throw_exception (void)
5206 static MonoMethod *method = NULL;
5209 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5210 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5217 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5219 MonoMethod *thrower = throw_exception ();
5222 EMIT_NEW_PCONST (cfg, args [0], ex);
5223 mono_emit_method_call (cfg, thrower, args, NULL);
5227 * Return the original method is a wrapper is specified. We can only access
5228 * the custom attributes from the original method.
5231 get_original_method (MonoMethod *method)
5233 if (method->wrapper_type == MONO_WRAPPER_NONE)
5236 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5237 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5240 /* in other cases we need to find the original method */
5241 return mono_marshal_method_from_wrapper (method);
5245 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5246 MonoBasicBlock *bblock, unsigned char *ip)
5248 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5249 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5251 emit_throw_exception (cfg, ex);
5255 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5256 MonoBasicBlock *bblock, unsigned char *ip)
5258 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5259 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5261 emit_throw_exception (cfg, ex);
5265 * Check that the IL instructions at ip are the array initialization
5266 * sequence and return the pointer to the data and the size.
5269 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5272 * newarr[System.Int32]
5274 * ldtoken field valuetype ...
5275 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5277 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5278 guint32 token = read32 (ip + 7);
5279 guint32 field_token = read32 (ip + 2);
5280 guint32 field_index = field_token & 0xffffff;
5282 const char *data_ptr;
5284 MonoMethod *cmethod;
5285 MonoClass *dummy_class;
5286 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5292 *out_field_token = field_token;
5294 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5297 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5299 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5300 case MONO_TYPE_BOOLEAN:
5304 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5305 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5306 case MONO_TYPE_CHAR:
5316 return NULL; /* stupid ARM FP swapped format */
5326 if (size > mono_type_size (field->type, &dummy_align))
5329 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5330 if (!method->klass->image->dynamic) {
5331 field_index = read32 (ip + 2) & 0xffffff;
5332 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5333 data_ptr = mono_image_rva_map (method->klass->image, rva);
5334 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5335 /* for aot code we do the lookup on load */
5336 if (aot && data_ptr)
5337 return GUINT_TO_POINTER (rva);
5339 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5341 data_ptr = mono_field_get_data (field);
5349 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5351 char *method_fname = mono_method_full_name (method, TRUE);
5353 MonoMethodHeader *header = mono_method_get_header (method);
5355 if (header->code_size == 0)
5356 method_code = g_strdup ("method body is empty.");
5358 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5359 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5360 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5361 g_free (method_fname);
5362 g_free (method_code);
5363 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5367 set_exception_object (MonoCompile *cfg, MonoException *exception)
5369 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5370 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5371 cfg->exception_ptr = exception;
5375 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5379 if (cfg->generic_sharing_context)
5380 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5382 type = &klass->byval_arg;
5383 return MONO_TYPE_IS_REFERENCE (type);
5387 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5390 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5391 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5392 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5393 /* Optimize reg-reg moves away */
5395 * Can't optimize other opcodes, since sp[0] might point to
5396 * the last ins of a decomposed opcode.
5398 sp [0]->dreg = (cfg)->locals [n]->dreg;
5400 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5405 * ldloca inhibits many optimizations so try to get rid of it in common
5408 static inline unsigned char *
5409 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5418 local = read16 (ip + 2);
5422 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5423 gboolean skip = FALSE;
5425 /* From the INITOBJ case */
5426 token = read32 (ip + 2);
5427 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5428 CHECK_TYPELOAD (klass);
5429 if (generic_class_is_reference_type (cfg, klass)) {
5430 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5431 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5432 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5433 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5434 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5447 is_exception_class (MonoClass *class)
5450 if (class == mono_defaults.exception_class)
5452 class = class->parent;
5458 * is_jit_optimizer_disabled:
5460 * Determine whenever M's assembly has a DebuggableAttribute with the
5461 * IsJITOptimizerDisabled flag set.
5464 is_jit_optimizer_disabled (MonoMethod *m)
5466 MonoAssembly *ass = m->klass->image->assembly;
5467 MonoCustomAttrInfo* attrs;
5468 static MonoClass *klass;
5470 gboolean val = FALSE;
5473 if (ass->jit_optimizer_disabled_inited)
5474 return ass->jit_optimizer_disabled;
5476 klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5478 attrs = mono_custom_attrs_from_assembly (ass);
5480 for (i = 0; i < attrs->num_attrs; ++i) {
5481 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5484 MonoMethodSignature *sig;
5486 if (!attr->ctor || attr->ctor->klass != klass)
5488 /* Decode the attribute. See reflection.c */
5489 len = attr->data_size;
5490 p = (const char*)attr->data;
5491 g_assert (read16 (p) == 0x0001);
5494 // FIXME: Support named parameters
5495 sig = mono_method_signature (attr->ctor);
5496 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5498 /* Two boolean arguments */
5504 ass->jit_optimizer_disabled = val;
5505 mono_memory_barrier ();
5506 ass->jit_optimizer_disabled_inited = TRUE;
5512 * mono_method_to_ir:
5514 * Translate the .net IL into linear IR.
5517 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5518 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5519 guint inline_offset, gboolean is_virtual_call)
5522 MonoInst *ins, **sp, **stack_start;
5523 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5524 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5525 MonoMethod *cmethod, *method_definition;
5526 MonoInst **arg_array;
5527 MonoMethodHeader *header;
5529 guint32 token, ins_flag;
5531 MonoClass *constrained_call = NULL;
5532 unsigned char *ip, *end, *target, *err_pos;
5533 static double r8_0 = 0.0;
5534 MonoMethodSignature *sig;
5535 MonoGenericContext *generic_context = NULL;
5536 MonoGenericContainer *generic_container = NULL;
5537 MonoType **param_types;
5538 int i, n, start_new_bblock, dreg;
5539 int num_calls = 0, inline_costs = 0;
5540 int breakpoint_id = 0;
5542 MonoBoolean security, pinvoke;
5543 MonoSecurityManager* secman = NULL;
5544 MonoDeclSecurityActions actions;
5545 GSList *class_inits = NULL;
5546 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5548 gboolean init_locals, seq_points, skip_dead_blocks;
5549 gboolean disable_inline;
5551 disable_inline = is_jit_optimizer_disabled (method);
5553 /* serialization and xdomain stuff may need access to private fields and methods */
5554 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5555 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5556 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5557 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5558 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5559 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5561 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5563 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5564 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5565 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5566 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5568 image = method->klass->image;
5569 header = mono_method_get_header (method);
5571 MonoLoaderError *error;
5573 if ((error = mono_loader_get_last_error ())) {
5574 mono_cfg_set_exception (cfg, error->exception_type);
5576 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5577 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5579 goto exception_exit;
5581 generic_container = mono_method_get_generic_container (method);
5582 sig = mono_method_signature (method);
5583 num_args = sig->hasthis + sig->param_count;
5584 ip = (unsigned char*)header->code;
5585 cfg->cil_start = ip;
5586 end = ip + header->code_size;
5587 mono_jit_stats.cil_code_size += header->code_size;
5588 init_locals = header->init_locals;
5590 seq_points = cfg->gen_seq_points && cfg->method == method;
5593 * Methods without init_locals set could cause asserts in various passes
5598 method_definition = method;
5599 while (method_definition->is_inflated) {
5600 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5601 method_definition = imethod->declaring;
5604 /* SkipVerification is not allowed if core-clr is enabled */
5605 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5607 dont_verify_stloc = TRUE;
5610 if (mono_debug_using_mono_debugger ())
5611 cfg->keep_cil_nops = TRUE;
5613 if (sig->is_inflated)
5614 generic_context = mono_method_get_context (method);
5615 else if (generic_container)
5616 generic_context = &generic_container->context;
5617 cfg->generic_context = generic_context;
5619 if (!cfg->generic_sharing_context)
5620 g_assert (!sig->has_type_parameters);
5622 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5623 g_assert (method->is_inflated);
5624 g_assert (mono_method_get_context (method)->method_inst);
5626 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5627 g_assert (sig->generic_param_count);
5629 if (cfg->method == method) {
5630 cfg->real_offset = 0;
5632 cfg->real_offset = inline_offset;
5635 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5636 cfg->cil_offset_to_bb_len = header->code_size;
5638 cfg->current_method = method;
5640 if (cfg->verbose_level > 2)
5641 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5643 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5645 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5646 for (n = 0; n < sig->param_count; ++n)
5647 param_types [n + sig->hasthis] = sig->params [n];
5648 cfg->arg_types = param_types;
5650 dont_inline = g_list_prepend (dont_inline, method);
5651 if (cfg->method == method) {
5653 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5654 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5657 NEW_BBLOCK (cfg, start_bblock);
5658 cfg->bb_entry = start_bblock;
5659 start_bblock->cil_code = NULL;
5660 start_bblock->cil_length = 0;
5663 NEW_BBLOCK (cfg, end_bblock);
5664 cfg->bb_exit = end_bblock;
5665 end_bblock->cil_code = NULL;
5666 end_bblock->cil_length = 0;
5667 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5668 g_assert (cfg->num_bblocks == 2);
5670 arg_array = cfg->args;
5672 if (header->num_clauses) {
5673 cfg->spvars = g_hash_table_new (NULL, NULL);
5674 cfg->exvars = g_hash_table_new (NULL, NULL);
5676 /* handle exception clauses */
5677 for (i = 0; i < header->num_clauses; ++i) {
5678 MonoBasicBlock *try_bb;
5679 MonoExceptionClause *clause = &header->clauses [i];
5680 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5681 try_bb->real_offset = clause->try_offset;
5682 try_bb->try_start = TRUE;
5683 try_bb->region = ((i + 1) << 8) | clause->flags;
5684 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5685 tblock->real_offset = clause->handler_offset;
5686 tblock->flags |= BB_EXCEPTION_HANDLER;
5688 link_bblock (cfg, try_bb, tblock);
5690 if (*(ip + clause->handler_offset) == CEE_POP)
5691 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5693 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5694 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5695 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5697 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5698 MONO_ADD_INS (tblock, ins);
5700 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5701 MONO_ADD_INS (tblock, ins);
5703 /* todo: is a fault block unsafe to optimize? */
5704 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5705 tblock->flags |= BB_EXCEPTION_UNSAFE;
5709 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5711 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5713 /* catch and filter blocks get the exception object on the stack */
5714 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5715 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5716 MonoInst *dummy_use;
5718 /* mostly like handle_stack_args (), but just sets the input args */
5719 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5720 tblock->in_scount = 1;
5721 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5722 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5725 * Add a dummy use for the exvar so its liveness info will be
5729 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5731 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5732 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5733 tblock->flags |= BB_EXCEPTION_HANDLER;
5734 tblock->real_offset = clause->data.filter_offset;
5735 tblock->in_scount = 1;
5736 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5737 /* The filter block shares the exvar with the handler block */
5738 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5739 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5740 MONO_ADD_INS (tblock, ins);
5744 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5745 clause->data.catch_class &&
5746 cfg->generic_sharing_context &&
5747 mono_class_check_context_used (clause->data.catch_class)) {
5749 * In shared generic code with catch
5750 * clauses containing type variables
5751 * the exception handling code has to
5752 * be able to get to the rgctx.
5753 * Therefore we have to make sure that
5754 * the vtable/mrgctx argument (for
5755 * static or generic methods) or the
5756 * "this" argument (for non-static
5757 * methods) are live.
5759 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5760 mini_method_get_context (method)->method_inst ||
5761 method->klass->valuetype) {
5762 mono_get_vtable_var (cfg);
5764 MonoInst *dummy_use;
5766 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5771 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5772 cfg->cbb = start_bblock;
5773 cfg->args = arg_array;
5774 mono_save_args (cfg, sig, inline_args);
5777 /* FIRST CODE BLOCK */
5778 NEW_BBLOCK (cfg, bblock);
5779 bblock->cil_code = ip;
5783 ADD_BBLOCK (cfg, bblock);
5785 if (cfg->method == method) {
5786 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5787 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5788 MONO_INST_NEW (cfg, ins, OP_BREAK);
5789 MONO_ADD_INS (bblock, ins);
5793 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5794 secman = mono_security_manager_get_methods ();
5796 security = (secman && mono_method_has_declsec (method));
5797 /* at this point having security doesn't mean we have any code to generate */
5798 if (security && (cfg->method == method)) {
5799 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5800 * And we do not want to enter the next section (with allocation) if we
5801 * have nothing to generate */
5802 security = mono_declsec_get_demands (method, &actions);
5805 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5806 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5808 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5809 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5810 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5812 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5813 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5817 mono_custom_attrs_free (custom);
5820 custom = mono_custom_attrs_from_class (wrapped->klass);
5821 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5825 mono_custom_attrs_free (custom);
5828 /* not a P/Invoke after all */
5833 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5834 /* we use a separate basic block for the initialization code */
5835 NEW_BBLOCK (cfg, init_localsbb);
5836 cfg->bb_init = init_localsbb;
5837 init_localsbb->real_offset = cfg->real_offset;
5838 start_bblock->next_bb = init_localsbb;
5839 init_localsbb->next_bb = bblock;
5840 link_bblock (cfg, start_bblock, init_localsbb);
5841 link_bblock (cfg, init_localsbb, bblock);
5843 cfg->cbb = init_localsbb;
5845 start_bblock->next_bb = bblock;
5846 link_bblock (cfg, start_bblock, bblock);
5849 /* at this point we know, if security is TRUE, that some code needs to be generated */
5850 if (security && (cfg->method == method)) {
5853 mono_jit_stats.cas_demand_generation++;
5855 if (actions.demand.blob) {
5856 /* Add code for SecurityAction.Demand */
5857 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5858 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5859 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5860 mono_emit_method_call (cfg, secman->demand, args, NULL);
5862 if (actions.noncasdemand.blob) {
5863 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5864 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5865 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5866 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5867 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5868 mono_emit_method_call (cfg, secman->demand, args, NULL);
5870 if (actions.demandchoice.blob) {
5871 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5872 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5873 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5874 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5875 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5879 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5881 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5884 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5885 /* check if this is native code, e.g. an icall or a p/invoke */
5886 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5887 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5889 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5890 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5892 /* if this ia a native call then it can only be JITted from platform code */
5893 if ((icall || pinvk) && method->klass && method->klass->image) {
5894 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5895 MonoException *ex = icall ? mono_get_exception_security () :
5896 mono_get_exception_method_access ();
5897 emit_throw_exception (cfg, ex);
5904 if (header->code_size == 0)
5907 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5912 if (cfg->method == method)
5913 mono_debug_init_method (cfg, bblock, breakpoint_id);
5915 for (n = 0; n < header->num_locals; ++n) {
5916 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5921 /* We force the vtable variable here for all shared methods
5922 for the possibility that they might show up in a stack
5923 trace where their exact instantiation is needed. */
5924 if (cfg->generic_sharing_context && method == cfg->method) {
5925 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5926 mini_method_get_context (method)->method_inst ||
5927 method->klass->valuetype) {
5928 mono_get_vtable_var (cfg);
5930 /* FIXME: Is there a better way to do this?
5931 We need the variable live for the duration
5932 of the whole method. */
5933 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5937 /* add a check for this != NULL to inlined methods */
5938 if (is_virtual_call) {
5941 NEW_ARGLOAD (cfg, arg_ins, 0);
5942 MONO_ADD_INS (cfg->cbb, arg_ins);
5943 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5946 skip_dead_blocks = !dont_verify;
5947 if (skip_dead_blocks) {
5948 original_bb = bb = mono_basic_block_split (method, &error);
5949 if (!mono_error_ok (&error)) {
5950 mono_error_cleanup (&error);
5956 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5957 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5960 start_new_bblock = 0;
5963 if (cfg->method == method)
5964 cfg->real_offset = ip - header->code;
5966 cfg->real_offset = inline_offset;
5971 if (start_new_bblock) {
5972 bblock->cil_length = ip - bblock->cil_code;
5973 if (start_new_bblock == 2) {
5974 g_assert (ip == tblock->cil_code);
5976 GET_BBLOCK (cfg, tblock, ip);
5978 bblock->next_bb = tblock;
5981 start_new_bblock = 0;
5982 for (i = 0; i < bblock->in_scount; ++i) {
5983 if (cfg->verbose_level > 3)
5984 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5985 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5989 g_slist_free (class_inits);
5992 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5993 link_bblock (cfg, bblock, tblock);
5994 if (sp != stack_start) {
5995 handle_stack_args (cfg, stack_start, sp - stack_start);
5997 CHECK_UNVERIFIABLE (cfg);
5999 bblock->next_bb = tblock;
6002 for (i = 0; i < bblock->in_scount; ++i) {
6003 if (cfg->verbose_level > 3)
6004 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6005 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6008 g_slist_free (class_inits);
6013 if (skip_dead_blocks) {
6014 int ip_offset = ip - header->code;
6016 if (ip_offset == bb->end)
6020 int op_size = mono_opcode_size (ip, end);
6021 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6023 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6025 if (ip_offset + op_size == bb->end) {
6026 MONO_INST_NEW (cfg, ins, OP_NOP);
6027 MONO_ADD_INS (bblock, ins);
6028 start_new_bblock = 1;
6036 * Sequence points are points where the debugger can place a breakpoint.
6037 * Currently, we generate these automatically at points where the IL
6040 if (seq_points && sp == stack_start) {
6041 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6042 MONO_ADD_INS (cfg->cbb, ins);
6045 bblock->real_offset = cfg->real_offset;
6047 if ((cfg->method == method) && cfg->coverage_info) {
6048 guint32 cil_offset = ip - header->code;
6049 cfg->coverage_info->data [cil_offset].cil_code = ip;
6051 /* TODO: Use an increment here */
6052 #if defined(TARGET_X86)
6053 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6054 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6056 MONO_ADD_INS (cfg->cbb, ins);
6058 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6059 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6063 if (cfg->verbose_level > 3)
6064 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6068 if (cfg->keep_cil_nops)
6069 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6071 MONO_INST_NEW (cfg, ins, OP_NOP);
6073 MONO_ADD_INS (bblock, ins);
6076 if (should_insert_brekpoint (cfg->method))
6077 MONO_INST_NEW (cfg, ins, OP_BREAK);
6079 MONO_INST_NEW (cfg, ins, OP_NOP);
6081 MONO_ADD_INS (bblock, ins);
6087 CHECK_STACK_OVF (1);
6088 n = (*ip)-CEE_LDARG_0;
6090 EMIT_NEW_ARGLOAD (cfg, ins, n);
6098 CHECK_STACK_OVF (1);
6099 n = (*ip)-CEE_LDLOC_0;
6101 EMIT_NEW_LOCLOAD (cfg, ins, n);
6110 n = (*ip)-CEE_STLOC_0;
6113 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6115 emit_stloc_ir (cfg, sp, header, n);
6122 CHECK_STACK_OVF (1);
6125 EMIT_NEW_ARGLOAD (cfg, ins, n);
6131 CHECK_STACK_OVF (1);
6134 NEW_ARGLOADA (cfg, ins, n);
6135 MONO_ADD_INS (cfg->cbb, ins);
6145 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6147 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6152 CHECK_STACK_OVF (1);
6155 EMIT_NEW_LOCLOAD (cfg, ins, n);
6159 case CEE_LDLOCA_S: {
6160 unsigned char *tmp_ip;
6162 CHECK_STACK_OVF (1);
6163 CHECK_LOCAL (ip [1]);
6165 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6171 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6180 CHECK_LOCAL (ip [1]);
6181 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6183 emit_stloc_ir (cfg, sp, header, ip [1]);
6188 CHECK_STACK_OVF (1);
6189 EMIT_NEW_PCONST (cfg, ins, NULL);
6190 ins->type = STACK_OBJ;
6195 CHECK_STACK_OVF (1);
6196 EMIT_NEW_ICONST (cfg, ins, -1);
6209 CHECK_STACK_OVF (1);
6210 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6216 CHECK_STACK_OVF (1);
6218 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6224 CHECK_STACK_OVF (1);
6225 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6231 CHECK_STACK_OVF (1);
6232 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6233 ins->type = STACK_I8;
6234 ins->dreg = alloc_dreg (cfg, STACK_I8);
6236 ins->inst_l = (gint64)read64 (ip);
6237 MONO_ADD_INS (bblock, ins);
6243 gboolean use_aotconst = FALSE;
6245 #ifdef TARGET_POWERPC
6246 /* FIXME: Clean this up */
6247 if (cfg->compile_aot)
6248 use_aotconst = TRUE;
6251 /* FIXME: we should really allocate this only late in the compilation process */
6252 f = mono_domain_alloc (cfg->domain, sizeof (float));
6254 CHECK_STACK_OVF (1);
6260 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6262 dreg = alloc_freg (cfg);
6263 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6264 ins->type = STACK_R8;
6266 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6267 ins->type = STACK_R8;
6268 ins->dreg = alloc_dreg (cfg, STACK_R8);
6270 MONO_ADD_INS (bblock, ins);
6280 gboolean use_aotconst = FALSE;
6282 #ifdef TARGET_POWERPC
6283 /* FIXME: Clean this up */
6284 if (cfg->compile_aot)
6285 use_aotconst = TRUE;
6288 /* FIXME: we should really allocate this only late in the compilation process */
6289 d = mono_domain_alloc (cfg->domain, sizeof (double));
6291 CHECK_STACK_OVF (1);
6297 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6299 dreg = alloc_freg (cfg);
6300 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6301 ins->type = STACK_R8;
6303 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6304 ins->type = STACK_R8;
6305 ins->dreg = alloc_dreg (cfg, STACK_R8);
6307 MONO_ADD_INS (bblock, ins);
6316 MonoInst *temp, *store;
6318 CHECK_STACK_OVF (1);
6322 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6323 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6325 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6328 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6341 if (sp [0]->type == STACK_R8)
6342 /* we need to pop the value from the x86 FP stack */
6343 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6352 if (stack_start != sp)
6354 token = read32 (ip + 1);
6355 /* FIXME: check the signature matches */
6356 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6358 if (!cmethod || mono_loader_get_last_error ())
6361 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6362 GENERIC_SHARING_FAILURE (CEE_JMP);
6364 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6365 CHECK_CFG_EXCEPTION;
6367 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6369 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6372 /* Handle tail calls similarly to calls */
6373 n = fsig->param_count + fsig->hasthis;
6375 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6376 call->method = cmethod;
6377 call->tail_call = TRUE;
6378 call->signature = mono_method_signature (cmethod);
6379 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6380 call->inst.inst_p0 = cmethod;
6381 for (i = 0; i < n; ++i)
6382 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6384 mono_arch_emit_call (cfg, call);
6385 MONO_ADD_INS (bblock, (MonoInst*)call);
6388 for (i = 0; i < num_args; ++i)
6389 /* Prevent arguments from being optimized away */
6390 arg_array [i]->flags |= MONO_INST_VOLATILE;
6392 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6393 ins = (MonoInst*)call;
6394 ins->inst_p0 = cmethod;
6395 MONO_ADD_INS (bblock, ins);
6399 start_new_bblock = 1;
6404 case CEE_CALLVIRT: {
6405 MonoInst *addr = NULL;
6406 MonoMethodSignature *fsig = NULL;
6408 int virtual = *ip == CEE_CALLVIRT;
6409 int calli = *ip == CEE_CALLI;
6410 gboolean pass_imt_from_rgctx = FALSE;
6411 MonoInst *imt_arg = NULL;
6412 gboolean pass_vtable = FALSE;
6413 gboolean pass_mrgctx = FALSE;
6414 MonoInst *vtable_arg = NULL;
6415 gboolean check_this = FALSE;
6416 gboolean supported_tail_call = FALSE;
6419 token = read32 (ip + 1);
6426 if (method->wrapper_type != MONO_WRAPPER_NONE)
6427 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6429 fsig = mono_metadata_parse_signature (image, token);
6431 n = fsig->param_count + fsig->hasthis;
6433 if (method->dynamic && fsig->pinvoke) {
6437 * This is a call through a function pointer using a pinvoke
6438 * signature. Have to create a wrapper and call that instead.
6439 * FIXME: This is very slow, need to create a wrapper at JIT time
6440 * instead based on the signature.
6442 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6443 EMIT_NEW_PCONST (cfg, args [1], fsig);
6445 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6448 MonoMethod *cil_method;
6450 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6451 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6452 cil_method = cmethod;
6453 } else if (constrained_call) {
6454 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6456 * This is needed since get_method_constrained can't find
6457 * the method in klass representing a type var.
6458 * The type var is guaranteed to be a reference type in this
6461 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6462 cil_method = cmethod;
6463 g_assert (!cmethod->klass->valuetype);
6465 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6468 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6469 cil_method = cmethod;
6472 if (!cmethod || mono_loader_get_last_error ())
6474 if (!dont_verify && !cfg->skip_visibility) {
6475 MonoMethod *target_method = cil_method;
6476 if (method->is_inflated) {
6477 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6479 if (!mono_method_can_access_method (method_definition, target_method) &&
6480 !mono_method_can_access_method (method, cil_method))
6481 METHOD_ACCESS_FAILURE;
6484 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6485 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6487 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6488 /* MS.NET seems to silently convert this to a callvirt */
6493 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6494 * converts to a callvirt.
6496 * tests/bug-515884.il is an example of this behavior
6498 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6499 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6500 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6504 if (!cmethod->klass->inited)
6505 if (!mono_class_init (cmethod->klass))
6508 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6509 mini_class_is_system_array (cmethod->klass)) {
6510 array_rank = cmethod->klass->rank;
6511 fsig = mono_method_signature (cmethod);
6513 fsig = mono_method_signature (cmethod);
6518 if (fsig->pinvoke) {
6519 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6520 check_for_pending_exc, FALSE);
6521 fsig = mono_method_signature (wrapper);
6522 } else if (constrained_call) {
6523 fsig = mono_method_signature (cmethod);
6525 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6529 mono_save_token_info (cfg, image, token, cil_method);
6531 n = fsig->param_count + fsig->hasthis;
6533 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6534 if (check_linkdemand (cfg, method, cmethod))
6536 CHECK_CFG_EXCEPTION;
6539 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6540 g_assert_not_reached ();
6543 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6546 if (!cfg->generic_sharing_context && cmethod)
6547 g_assert (!mono_method_check_context_used (cmethod));
6551 //g_assert (!virtual || fsig->hasthis);
6555 if (constrained_call) {
6557 * We have the `constrained.' prefix opcode.
6559 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6561 * The type parameter is instantiated as a valuetype,
6562 * but that type doesn't override the method we're
6563 * calling, so we need to box `this'.
6565 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6566 ins->klass = constrained_call;
6567 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6568 CHECK_CFG_EXCEPTION;
6569 } else if (!constrained_call->valuetype) {
6570 int dreg = alloc_preg (cfg);
6573 * The type parameter is instantiated as a reference
6574 * type. We have a managed pointer on the stack, so
6575 * we need to dereference it here.
6577 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6578 ins->type = STACK_OBJ;
6580 } else if (cmethod->klass->valuetype)
6582 constrained_call = NULL;
6585 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6589 * If the callee is a shared method, then its static cctor
6590 * might not get called after the call was patched.
6592 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6593 emit_generic_class_init (cfg, cmethod->klass);
6594 CHECK_TYPELOAD (cmethod->klass);
6597 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6598 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6599 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6600 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6601 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6604 * Pass vtable iff target method might
6605 * be shared, which means that sharing
6606 * is enabled for its class and its
6607 * context is sharable (and it's not a
6610 if (sharing_enabled && context_sharable &&
6611 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6615 if (cmethod && mini_method_get_context (cmethod) &&
6616 mini_method_get_context (cmethod)->method_inst) {
6617 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6618 MonoGenericContext *context = mini_method_get_context (cmethod);
6619 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6621 g_assert (!pass_vtable);
6623 if (sharing_enabled && context_sharable)
6627 if (cfg->generic_sharing_context && cmethod) {
6628 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6630 context_used = mono_method_check_context_used (cmethod);
6632 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6633 /* Generic method interface
6634 calls are resolved via a
6635 helper function and don't
6637 if (!cmethod_context || !cmethod_context->method_inst)
6638 pass_imt_from_rgctx = TRUE;
6642 * If a shared method calls another
6643 * shared method then the caller must
6644 * have a generic sharing context
6645 * because the magic trampoline
6646 * requires it. FIXME: We shouldn't
6647 * have to force the vtable/mrgctx
6648 * variable here. Instead there
6649 * should be a flag in the cfg to
6650 * request a generic sharing context.
6653 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6654 mono_get_vtable_var (cfg);
6659 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6661 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6663 CHECK_TYPELOAD (cmethod->klass);
6664 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6669 g_assert (!vtable_arg);
6671 if (!cfg->compile_aot) {
6673 * emit_get_rgctx_method () calls mono_class_vtable () so check
6674 * for type load errors before.
6676 mono_class_setup_vtable (cmethod->klass);
6677 CHECK_TYPELOAD (cmethod->klass);
6680 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6682 /* !marshalbyref is needed to properly handle generic methods + remoting */
6683 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6684 MONO_METHOD_IS_FINAL (cmethod)) &&
6685 !cmethod->klass->marshalbyref) {
6692 if (pass_imt_from_rgctx) {
6693 g_assert (!pass_vtable);
6696 imt_arg = emit_get_rgctx_method (cfg, context_used,
6697 cmethod, MONO_RGCTX_INFO_METHOD);
6701 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6703 /* Calling virtual generic methods */
6704 if (cmethod && virtual &&
6705 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6706 !(MONO_METHOD_IS_FINAL (cmethod) &&
6707 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6708 mono_method_signature (cmethod)->generic_param_count) {
6709 MonoInst *this_temp, *this_arg_temp, *store;
6710 MonoInst *iargs [4];
6712 g_assert (mono_method_signature (cmethod)->is_inflated);
6714 /* Prevent inlining of methods that contain indirect calls */
6717 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6718 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6719 g_assert (!imt_arg);
6721 g_assert (cmethod->is_inflated);
6722 imt_arg = emit_get_rgctx_method (cfg, context_used,
6723 cmethod, MONO_RGCTX_INFO_METHOD);
6724 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6728 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6729 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6730 MONO_ADD_INS (bblock, store);
6732 /* FIXME: This should be a managed pointer */
6733 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6735 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6736 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6737 cmethod, MONO_RGCTX_INFO_METHOD);
6738 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6739 addr = mono_emit_jit_icall (cfg,
6740 mono_helper_compile_generic_method, iargs);
6742 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6744 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6747 if (!MONO_TYPE_IS_VOID (fsig->ret))
6748 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6750 CHECK_CFG_EXCEPTION;
6757 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6758 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6760 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6764 /* FIXME: runtime generic context pointer for jumps? */
6765 /* FIXME: handle this for generic sharing eventually */
6766 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6769 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6772 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6773 /* Handle tail calls similarly to calls */
6774 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6776 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6777 call->tail_call = TRUE;
6778 call->method = cmethod;
6779 call->signature = mono_method_signature (cmethod);
6782 * We implement tail calls by storing the actual arguments into the
6783 * argument variables, then emitting a CEE_JMP.
6785 for (i = 0; i < n; ++i) {
6786 /* Prevent argument from being register allocated */
6787 arg_array [i]->flags |= MONO_INST_VOLATILE;
6788 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6792 ins = (MonoInst*)call;
6793 ins->inst_p0 = cmethod;
6794 ins->inst_p1 = arg_array [0];
6795 MONO_ADD_INS (bblock, ins);
6796 link_bblock (cfg, bblock, end_bblock);
6797 start_new_bblock = 1;
6799 CHECK_CFG_EXCEPTION;
6801 /* skip CEE_RET as well */
6808 * Implement a workaround for the inherent races involved in locking:
6814 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6815 * try block, the Exit () won't be executed, see:
6816 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6817 * To work around this, we extend such try blocks to include the last x bytes
6818 * of the Monitor.Enter () call.
6820 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6821 MonoBasicBlock *tbb;
6823 GET_BBLOCK (cfg, tbb, ip + 5);
6825 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6826 * from Monitor.Enter like ArgumentNullException.
6828 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6829 /* Mark this bblock as needing to be extended */
6830 tbb->extend_try_block = TRUE;
6834 /* Conversion to a JIT intrinsic */
6835 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6837 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6838 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6843 CHECK_CFG_EXCEPTION;
6851 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6852 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6853 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6854 !g_list_find (dont_inline, cmethod)) {
6856 gboolean always = FALSE;
6858 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6859 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6860 /* Prevent inlining of methods that call wrappers */
6862 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6866 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6868 cfg->real_offset += 5;
6871 if (!MONO_TYPE_IS_VOID (fsig->ret))
6872 /* *sp is already set by inline_method */
6875 inline_costs += costs;
6881 inline_costs += 10 * num_calls++;
6883 /* Tail recursion elimination */
6884 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6885 gboolean has_vtargs = FALSE;
6888 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6891 /* keep it simple */
6892 for (i = fsig->param_count - 1; i >= 0; i--) {
6893 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6898 for (i = 0; i < n; ++i)
6899 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6900 MONO_INST_NEW (cfg, ins, OP_BR);
6901 MONO_ADD_INS (bblock, ins);
6902 tblock = start_bblock->out_bb [0];
6903 link_bblock (cfg, bblock, tblock);
6904 ins->inst_target_bb = tblock;
6905 start_new_bblock = 1;
6907 /* skip the CEE_RET, too */
6908 if (ip_in_bb (cfg, bblock, ip + 5))
6918 /* Generic sharing */
6919 /* FIXME: only do this for generic methods if
6920 they are not shared! */
6921 if (context_used && !imt_arg && !array_rank &&
6922 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6923 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6924 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6925 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6928 g_assert (cfg->generic_sharing_context && cmethod);
6932 * We are compiling a call to a
6933 * generic method from shared code,
6934 * which means that we have to look up
6935 * the method in the rgctx and do an
6938 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6941 /* Indirect calls */
6943 g_assert (!imt_arg);
6945 if (*ip == CEE_CALL)
6946 g_assert (context_used);
6947 else if (*ip == CEE_CALLI)
6948 g_assert (!vtable_arg);
6950 /* FIXME: what the hell is this??? */
6951 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6952 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6954 /* Prevent inlining of methods with indirect calls */
6959 int rgctx_reg = mono_alloc_preg (cfg);
6961 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6962 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6963 call = (MonoCallInst*)ins;
6964 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6966 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6968 * Instead of emitting an indirect call, emit a direct call
6969 * with the contents of the aotconst as the patch info.
6971 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6973 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6974 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6977 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6980 if (!MONO_TYPE_IS_VOID (fsig->ret))
6981 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6983 CHECK_CFG_EXCEPTION;
6994 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6995 if (sp [fsig->param_count]->type == STACK_OBJ) {
6996 MonoInst *iargs [2];
6999 iargs [1] = sp [fsig->param_count];
7001 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7004 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7005 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
7006 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7007 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7009 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7012 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7013 if (!cmethod->klass->element_class->valuetype && !readonly)
7014 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7015 CHECK_TYPELOAD (cmethod->klass);
7018 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7021 g_assert_not_reached ();
7024 CHECK_CFG_EXCEPTION;
7031 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7033 if (!MONO_TYPE_IS_VOID (fsig->ret))
7034 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7036 CHECK_CFG_EXCEPTION;
7046 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7048 } else if (imt_arg) {
7049 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
7051 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
7054 if (!MONO_TYPE_IS_VOID (fsig->ret))
7055 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7057 CHECK_CFG_EXCEPTION;
7064 if (cfg->method != method) {
7065 /* return from inlined method */
7067 * If in_count == 0, that means the ret is unreachable due to
7068 * being preceeded by a throw. In that case, inline_method () will
7069 * handle setting the return value
7070 * (test case: test_0_inline_throw ()).
7072 if (return_var && cfg->cbb->in_count) {
7076 //g_assert (returnvar != -1);
7077 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7078 cfg->ret_var_set = TRUE;
7082 MonoType *ret_type = mono_method_signature (method)->ret;
7086 * Place a seq point here too even through the IL stack is not
7087 * empty, so a step over on
7090 * will work correctly.
7092 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7093 MONO_ADD_INS (cfg->cbb, ins);
7096 g_assert (!return_var);
7099 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7102 if (!cfg->vret_addr) {
7105 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7107 EMIT_NEW_RETLOADA (cfg, ret_addr);
7109 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7110 ins->klass = mono_class_from_mono_type (ret_type);
7113 #ifdef MONO_ARCH_SOFT_FLOAT
7114 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7115 MonoInst *iargs [1];
7119 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7120 mono_arch_emit_setret (cfg, method, conv);
7122 mono_arch_emit_setret (cfg, method, *sp);
7125 mono_arch_emit_setret (cfg, method, *sp);
7130 if (sp != stack_start)
7132 MONO_INST_NEW (cfg, ins, OP_BR);
7134 ins->inst_target_bb = end_bblock;
7135 MONO_ADD_INS (bblock, ins);
7136 link_bblock (cfg, bblock, end_bblock);
7137 start_new_bblock = 1;
7141 MONO_INST_NEW (cfg, ins, OP_BR);
7143 target = ip + 1 + (signed char)(*ip);
7145 GET_BBLOCK (cfg, tblock, target);
7146 link_bblock (cfg, bblock, tblock);
7147 ins->inst_target_bb = tblock;
7148 if (sp != stack_start) {
7149 handle_stack_args (cfg, stack_start, sp - stack_start);
7151 CHECK_UNVERIFIABLE (cfg);
7153 MONO_ADD_INS (bblock, ins);
7154 start_new_bblock = 1;
7155 inline_costs += BRANCH_COST;
7169 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7171 target = ip + 1 + *(signed char*)ip;
7177 inline_costs += BRANCH_COST;
7181 MONO_INST_NEW (cfg, ins, OP_BR);
7184 target = ip + 4 + (gint32)read32(ip);
7186 GET_BBLOCK (cfg, tblock, target);
7187 link_bblock (cfg, bblock, tblock);
7188 ins->inst_target_bb = tblock;
7189 if (sp != stack_start) {
7190 handle_stack_args (cfg, stack_start, sp - stack_start);
7192 CHECK_UNVERIFIABLE (cfg);
7195 MONO_ADD_INS (bblock, ins);
7197 start_new_bblock = 1;
7198 inline_costs += BRANCH_COST;
7205 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7206 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7207 guint32 opsize = is_short ? 1 : 4;
7209 CHECK_OPSIZE (opsize);
7211 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7214 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7219 GET_BBLOCK (cfg, tblock, target);
7220 link_bblock (cfg, bblock, tblock);
7221 GET_BBLOCK (cfg, tblock, ip);
7222 link_bblock (cfg, bblock, tblock);
7224 if (sp != stack_start) {
7225 handle_stack_args (cfg, stack_start, sp - stack_start);
7226 CHECK_UNVERIFIABLE (cfg);
7229 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7230 cmp->sreg1 = sp [0]->dreg;
7231 type_from_op (cmp, sp [0], NULL);
7234 #if SIZEOF_REGISTER == 4
7235 if (cmp->opcode == OP_LCOMPARE_IMM) {
7236 /* Convert it to OP_LCOMPARE */
7237 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7238 ins->type = STACK_I8;
7239 ins->dreg = alloc_dreg (cfg, STACK_I8);
7241 MONO_ADD_INS (bblock, ins);
7242 cmp->opcode = OP_LCOMPARE;
7243 cmp->sreg2 = ins->dreg;
7246 MONO_ADD_INS (bblock, cmp);
7248 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7249 type_from_op (ins, sp [0], NULL);
7250 MONO_ADD_INS (bblock, ins);
7251 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7252 GET_BBLOCK (cfg, tblock, target);
7253 ins->inst_true_bb = tblock;
7254 GET_BBLOCK (cfg, tblock, ip);
7255 ins->inst_false_bb = tblock;
7256 start_new_bblock = 2;
7259 inline_costs += BRANCH_COST;
7274 MONO_INST_NEW (cfg, ins, *ip);
7276 target = ip + 4 + (gint32)read32(ip);
7282 inline_costs += BRANCH_COST;
7286 MonoBasicBlock **targets;
7287 MonoBasicBlock *default_bblock;
7288 MonoJumpInfoBBTable *table;
7289 int offset_reg = alloc_preg (cfg);
7290 int target_reg = alloc_preg (cfg);
7291 int table_reg = alloc_preg (cfg);
7292 int sum_reg = alloc_preg (cfg);
7293 gboolean use_op_switch;
7297 n = read32 (ip + 1);
7300 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7304 CHECK_OPSIZE (n * sizeof (guint32));
7305 target = ip + n * sizeof (guint32);
7307 GET_BBLOCK (cfg, default_bblock, target);
7308 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7310 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7311 for (i = 0; i < n; ++i) {
7312 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7313 targets [i] = tblock;
7314 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7318 if (sp != stack_start) {
7320 * Link the current bb with the targets as well, so handle_stack_args
7321 * will set their in_stack correctly.
7323 link_bblock (cfg, bblock, default_bblock);
7324 for (i = 0; i < n; ++i)
7325 link_bblock (cfg, bblock, targets [i]);
7327 handle_stack_args (cfg, stack_start, sp - stack_start);
7329 CHECK_UNVERIFIABLE (cfg);
7332 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7333 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7336 for (i = 0; i < n; ++i)
7337 link_bblock (cfg, bblock, targets [i]);
7339 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7340 table->table = targets;
7341 table->table_size = n;
7343 use_op_switch = FALSE;
7345 /* ARM implements SWITCH statements differently */
7346 /* FIXME: Make it use the generic implementation */
7347 if (!cfg->compile_aot)
7348 use_op_switch = TRUE;
7351 if (COMPILE_LLVM (cfg))
7352 use_op_switch = TRUE;
7354 cfg->cbb->has_jump_table = 1;
7356 if (use_op_switch) {
7357 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7358 ins->sreg1 = src1->dreg;
7359 ins->inst_p0 = table;
7360 ins->inst_many_bb = targets;
7361 ins->klass = GUINT_TO_POINTER (n);
7362 MONO_ADD_INS (cfg->cbb, ins);
7364 if (sizeof (gpointer) == 8)
7365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7367 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7369 #if SIZEOF_REGISTER == 8
7370 /* The upper word might not be zero, and we add it to a 64 bit address later */
7371 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7374 if (cfg->compile_aot) {
7375 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7377 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7378 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7379 ins->inst_p0 = table;
7380 ins->dreg = table_reg;
7381 MONO_ADD_INS (cfg->cbb, ins);
7384 /* FIXME: Use load_memindex */
7385 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7386 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7387 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7389 start_new_bblock = 1;
7390 inline_costs += (BRANCH_COST * 2);
7410 dreg = alloc_freg (cfg);
7413 dreg = alloc_lreg (cfg);
7416 dreg = alloc_preg (cfg);
7419 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7420 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7421 ins->flags |= ins_flag;
7423 MONO_ADD_INS (bblock, ins);
7438 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7439 ins->flags |= ins_flag;
7441 MONO_ADD_INS (bblock, ins);
7443 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7444 emit_write_barrier (cfg, sp [0], sp [1], -1);
7453 MONO_INST_NEW (cfg, ins, (*ip));
7455 ins->sreg1 = sp [0]->dreg;
7456 ins->sreg2 = sp [1]->dreg;
7457 type_from_op (ins, sp [0], sp [1]);
7459 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7461 /* Use the immediate opcodes if possible */
7462 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7463 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7464 if (imm_opcode != -1) {
7465 ins->opcode = imm_opcode;
7466 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7469 sp [1]->opcode = OP_NOP;
7473 MONO_ADD_INS ((cfg)->cbb, (ins));
7475 *sp++ = mono_decompose_opcode (cfg, ins);
7492 MONO_INST_NEW (cfg, ins, (*ip));
7494 ins->sreg1 = sp [0]->dreg;
7495 ins->sreg2 = sp [1]->dreg;
7496 type_from_op (ins, sp [0], sp [1]);
7498 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7499 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7501 /* FIXME: Pass opcode to is_inst_imm */
7503 /* Use the immediate opcodes if possible */
7504 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7507 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7508 if (imm_opcode != -1) {
7509 ins->opcode = imm_opcode;
7510 if (sp [1]->opcode == OP_I8CONST) {
7511 #if SIZEOF_REGISTER == 8
7512 ins->inst_imm = sp [1]->inst_l;
7514 ins->inst_ls_word = sp [1]->inst_ls_word;
7515 ins->inst_ms_word = sp [1]->inst_ms_word;
7519 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7522 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7523 if (sp [1]->next == NULL)
7524 sp [1]->opcode = OP_NOP;
7527 MONO_ADD_INS ((cfg)->cbb, (ins));
7529 *sp++ = mono_decompose_opcode (cfg, ins);
7542 case CEE_CONV_OVF_I8:
7543 case CEE_CONV_OVF_U8:
7547 /* Special case this earlier so we have long constants in the IR */
7548 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7549 int data = sp [-1]->inst_c0;
7550 sp [-1]->opcode = OP_I8CONST;
7551 sp [-1]->type = STACK_I8;
7552 #if SIZEOF_REGISTER == 8
7553 if ((*ip) == CEE_CONV_U8)
7554 sp [-1]->inst_c0 = (guint32)data;
7556 sp [-1]->inst_c0 = data;
7558 sp [-1]->inst_ls_word = data;
7559 if ((*ip) == CEE_CONV_U8)
7560 sp [-1]->inst_ms_word = 0;
7562 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7564 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7571 case CEE_CONV_OVF_I4:
7572 case CEE_CONV_OVF_I1:
7573 case CEE_CONV_OVF_I2:
7574 case CEE_CONV_OVF_I:
7575 case CEE_CONV_OVF_U:
7578 if (sp [-1]->type == STACK_R8) {
7579 ADD_UNOP (CEE_CONV_OVF_I8);
7586 case CEE_CONV_OVF_U1:
7587 case CEE_CONV_OVF_U2:
7588 case CEE_CONV_OVF_U4:
7591 if (sp [-1]->type == STACK_R8) {
7592 ADD_UNOP (CEE_CONV_OVF_U8);
7599 case CEE_CONV_OVF_I1_UN:
7600 case CEE_CONV_OVF_I2_UN:
7601 case CEE_CONV_OVF_I4_UN:
7602 case CEE_CONV_OVF_I8_UN:
7603 case CEE_CONV_OVF_U1_UN:
7604 case CEE_CONV_OVF_U2_UN:
7605 case CEE_CONV_OVF_U4_UN:
7606 case CEE_CONV_OVF_U8_UN:
7607 case CEE_CONV_OVF_I_UN:
7608 case CEE_CONV_OVF_U_UN:
7615 CHECK_CFG_EXCEPTION;
7619 case CEE_ADD_OVF_UN:
7621 case CEE_MUL_OVF_UN:
7623 case CEE_SUB_OVF_UN:
7631 token = read32 (ip + 1);
7632 klass = mini_get_class (method, token, generic_context);
7633 CHECK_TYPELOAD (klass);
7635 if (generic_class_is_reference_type (cfg, klass)) {
7636 MonoInst *store, *load;
7637 int dreg = alloc_preg (cfg);
7639 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7640 load->flags |= ins_flag;
7641 MONO_ADD_INS (cfg->cbb, load);
7643 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7644 store->flags |= ins_flag;
7645 MONO_ADD_INS (cfg->cbb, store);
7647 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7648 emit_write_barrier (cfg, sp [0], sp [1], -1);
7650 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7662 token = read32 (ip + 1);
7663 klass = mini_get_class (method, token, generic_context);
7664 CHECK_TYPELOAD (klass);
7666 /* Optimize the common ldobj+stloc combination */
7676 loc_index = ip [5] - CEE_STLOC_0;
7683 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7684 CHECK_LOCAL (loc_index);
7686 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7687 ins->dreg = cfg->locals [loc_index]->dreg;
7693 /* Optimize the ldobj+stobj combination */
7694 /* The reference case ends up being a load+store anyway */
7695 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7700 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7707 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7716 CHECK_STACK_OVF (1);
7718 n = read32 (ip + 1);
7720 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7721 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7722 ins->type = STACK_OBJ;
7725 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7726 MonoInst *iargs [1];
7728 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7729 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7731 if (cfg->opt & MONO_OPT_SHARED) {
7732 MonoInst *iargs [3];
7734 if (cfg->compile_aot) {
7735 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7737 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7738 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7739 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7740 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7741 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7743 if (bblock->out_of_line) {
7744 MonoInst *iargs [2];
7746 if (image == mono_defaults.corlib) {
7748 * Avoid relocations in AOT and save some space by using a
7749 * version of helper_ldstr specialized to mscorlib.
7751 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7752 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7754 /* Avoid creating the string object */
7755 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7756 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7757 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7761 if (cfg->compile_aot) {
7762 NEW_LDSTRCONST (cfg, ins, image, n);
7764 MONO_ADD_INS (bblock, ins);
7767 NEW_PCONST (cfg, ins, NULL);
7768 ins->type = STACK_OBJ;
7769 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7771 OUT_OF_MEMORY_FAILURE;
7774 MONO_ADD_INS (bblock, ins);
7783 MonoInst *iargs [2];
7784 MonoMethodSignature *fsig;
7787 MonoInst *vtable_arg = NULL;
7790 token = read32 (ip + 1);
7791 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7792 if (!cmethod || mono_loader_get_last_error ())
7794 fsig = mono_method_get_signature (cmethod, image, token);
7798 mono_save_token_info (cfg, image, token, cmethod);
7800 if (!mono_class_init (cmethod->klass))
7803 if (cfg->generic_sharing_context)
7804 context_used = mono_method_check_context_used (cmethod);
7806 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7807 if (check_linkdemand (cfg, method, cmethod))
7809 CHECK_CFG_EXCEPTION;
7810 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7811 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7814 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7815 emit_generic_class_init (cfg, cmethod->klass);
7816 CHECK_TYPELOAD (cmethod->klass);
7819 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7820 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7821 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7822 mono_class_vtable (cfg->domain, cmethod->klass);
7823 CHECK_TYPELOAD (cmethod->klass);
7825 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7826 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7829 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7830 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7832 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7834 CHECK_TYPELOAD (cmethod->klass);
7835 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7840 n = fsig->param_count;
7844 * Generate smaller code for the common newobj <exception> instruction in
7845 * argument checking code.
7847 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7848 is_exception_class (cmethod->klass) && n <= 2 &&
7849 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7850 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7851 MonoInst *iargs [3];
7853 g_assert (!vtable_arg);
7857 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7860 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7864 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7869 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7872 g_assert_not_reached ();
7880 /* move the args to allow room for 'this' in the first position */
7886 /* check_call_signature () requires sp[0] to be set */
7887 this_ins.type = STACK_OBJ;
7889 if (check_call_signature (cfg, fsig, sp))
7894 if (mini_class_is_system_array (cmethod->klass)) {
7895 g_assert (!vtable_arg);
7897 *sp = emit_get_rgctx_method (cfg, context_used,
7898 cmethod, MONO_RGCTX_INFO_METHOD);
7900 /* Avoid varargs in the common case */
7901 if (fsig->param_count == 1)
7902 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7903 else if (fsig->param_count == 2)
7904 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7905 else if (fsig->param_count == 3)
7906 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7908 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7909 } else if (cmethod->string_ctor) {
7910 g_assert (!context_used);
7911 g_assert (!vtable_arg);
7912 /* we simply pass a null pointer */
7913 EMIT_NEW_PCONST (cfg, *sp, NULL);
7914 /* now call the string ctor */
7915 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7917 MonoInst* callvirt_this_arg = NULL;
7919 if (cmethod->klass->valuetype) {
7920 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7921 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7922 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7927 * The code generated by mini_emit_virtual_call () expects
7928 * iargs [0] to be a boxed instance, but luckily the vcall
7929 * will be transformed into a normal call there.
7931 } else if (context_used) {
7932 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7935 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7937 CHECK_TYPELOAD (cmethod->klass);
7940 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7941 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7942 * As a workaround, we call class cctors before allocating objects.
7944 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7945 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7946 if (cfg->verbose_level > 2)
7947 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7948 class_inits = g_slist_prepend (class_inits, vtable);
7951 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7954 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7957 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7959 /* Now call the actual ctor */
7960 /* Avoid virtual calls to ctors if possible */
7961 if (cmethod->klass->marshalbyref)
7962 callvirt_this_arg = sp [0];
7965 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7966 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7967 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7972 CHECK_CFG_EXCEPTION;
7973 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7974 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7975 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7976 !g_list_find (dont_inline, cmethod)) {
7979 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7980 cfg->real_offset += 5;
7983 inline_costs += costs - 5;
7986 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7988 } else if (context_used &&
7989 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7990 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7991 MonoInst *cmethod_addr;
7993 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7994 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7996 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7999 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
8000 callvirt_this_arg, NULL, vtable_arg);
8004 if (alloc == NULL) {
8006 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8007 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8021 token = read32 (ip + 1);
8022 klass = mini_get_class (method, token, generic_context);
8023 CHECK_TYPELOAD (klass);
8024 if (sp [0]->type != STACK_OBJ)
8027 if (cfg->generic_sharing_context)
8028 context_used = mono_class_check_context_used (klass);
8030 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8031 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8038 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8041 /*FIXME AOT support*/
8042 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8044 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8045 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8048 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8049 MonoMethod *mono_castclass;
8050 MonoInst *iargs [1];
8053 mono_castclass = mono_marshal_get_castclass (klass);
8056 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8057 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8058 CHECK_CFG_EXCEPTION;
8059 g_assert (costs > 0);
8062 cfg->real_offset += 5;
8067 inline_costs += costs;
8070 ins = handle_castclass (cfg, klass, *sp, context_used);
8071 CHECK_CFG_EXCEPTION;
8081 token = read32 (ip + 1);
8082 klass = mini_get_class (method, token, generic_context);
8083 CHECK_TYPELOAD (klass);
8084 if (sp [0]->type != STACK_OBJ)
8087 if (cfg->generic_sharing_context)
8088 context_used = mono_class_check_context_used (klass);
8090 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8091 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8098 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8101 /*FIXME AOT support*/
8102 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8104 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8107 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8108 MonoMethod *mono_isinst;
8109 MonoInst *iargs [1];
8112 mono_isinst = mono_marshal_get_isinst (klass);
8115 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8116 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8117 CHECK_CFG_EXCEPTION;
8118 g_assert (costs > 0);
8121 cfg->real_offset += 5;
8126 inline_costs += costs;
8129 ins = handle_isinst (cfg, klass, *sp, context_used);
8130 CHECK_CFG_EXCEPTION;
8137 case CEE_UNBOX_ANY: {
8141 token = read32 (ip + 1);
8142 klass = mini_get_class (method, token, generic_context);
8143 CHECK_TYPELOAD (klass);
8145 mono_save_token_info (cfg, image, token, klass);
8147 if (cfg->generic_sharing_context)
8148 context_used = mono_class_check_context_used (klass);
8150 if (generic_class_is_reference_type (cfg, klass)) {
8151 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8152 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8153 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8160 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8163 /*FIXME AOT support*/
8164 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8166 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8167 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8170 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8171 MonoMethod *mono_castclass;
8172 MonoInst *iargs [1];
8175 mono_castclass = mono_marshal_get_castclass (klass);
8178 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8179 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8180 CHECK_CFG_EXCEPTION;
8181 g_assert (costs > 0);
8184 cfg->real_offset += 5;
8188 inline_costs += costs;
8190 ins = handle_castclass (cfg, klass, *sp, context_used);
8191 CHECK_CFG_EXCEPTION;
8199 if (mono_class_is_nullable (klass)) {
8200 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8207 ins = handle_unbox (cfg, klass, sp, context_used);
8213 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8226 token = read32 (ip + 1);
8227 klass = mini_get_class (method, token, generic_context);
8228 CHECK_TYPELOAD (klass);
8230 mono_save_token_info (cfg, image, token, klass);
8232 if (cfg->generic_sharing_context)
8233 context_used = mono_class_check_context_used (klass);
8235 if (generic_class_is_reference_type (cfg, klass)) {
8241 if (klass == mono_defaults.void_class)
8243 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8245 /* frequent check in generic code: box (struct), brtrue */
8247 // FIXME: LLVM can't handle the inconsistent bb linking
8248 if (!mono_class_is_nullable (klass) &&
8249 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8250 (ip [5] == CEE_BRTRUE ||
8251 ip [5] == CEE_BRTRUE_S ||
8252 ip [5] == CEE_BRFALSE ||
8253 ip [5] == CEE_BRFALSE_S)) {
8254 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8256 MonoBasicBlock *true_bb, *false_bb;
8260 if (cfg->verbose_level > 3) {
8261 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8262 printf ("<box+brtrue opt>\n");
8270 target = ip + 1 + (signed char)(*ip);
8277 target = ip + 4 + (gint)(read32 (ip));
8281 g_assert_not_reached ();
8285 * We need to link both bblocks, since it is needed for handling stack
8286 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8287 * Branching to only one of them would lead to inconsistencies, so
8288 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8290 GET_BBLOCK (cfg, true_bb, target);
8291 GET_BBLOCK (cfg, false_bb, ip);
8293 mono_link_bblock (cfg, cfg->cbb, true_bb);
8294 mono_link_bblock (cfg, cfg->cbb, false_bb);
8296 if (sp != stack_start) {
8297 handle_stack_args (cfg, stack_start, sp - stack_start);
8299 CHECK_UNVERIFIABLE (cfg);
8302 if (COMPILE_LLVM (cfg)) {
8303 dreg = alloc_ireg (cfg);
8304 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8305 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8307 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8309 /* The JIT can't eliminate the iconst+compare */
8310 MONO_INST_NEW (cfg, ins, OP_BR);
8311 ins->inst_target_bb = is_true ? true_bb : false_bb;
8312 MONO_ADD_INS (cfg->cbb, ins);
8315 start_new_bblock = 1;
8319 *sp++ = handle_box (cfg, val, klass, context_used);
8321 CHECK_CFG_EXCEPTION;
8330 token = read32 (ip + 1);
8331 klass = mini_get_class (method, token, generic_context);
8332 CHECK_TYPELOAD (klass);
8334 mono_save_token_info (cfg, image, token, klass);
8336 if (cfg->generic_sharing_context)
8337 context_used = mono_class_check_context_used (klass);
8339 if (mono_class_is_nullable (klass)) {
8342 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8343 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8347 ins = handle_unbox (cfg, klass, sp, context_used);
8357 MonoClassField *field;
8361 if (*ip == CEE_STFLD) {
8368 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8370 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8373 token = read32 (ip + 1);
8374 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8375 field = mono_method_get_wrapper_data (method, token);
8376 klass = field->parent;
8379 field = mono_field_from_token (image, token, &klass, generic_context);
8383 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8384 FIELD_ACCESS_FAILURE;
8385 mono_class_init (klass);
8387 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8388 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8389 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8390 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8393 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8394 if (*ip == CEE_STFLD) {
8395 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8397 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8398 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8399 MonoInst *iargs [5];
8402 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8403 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8404 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8408 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8409 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8410 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8411 CHECK_CFG_EXCEPTION;
8412 g_assert (costs > 0);
8414 cfg->real_offset += 5;
8417 inline_costs += costs;
8419 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8424 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8426 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8427 if (sp [0]->opcode != OP_LDADDR)
8428 store->flags |= MONO_INST_FAULT;
8430 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8431 /* insert call to write barrier */
8435 dreg = alloc_preg (cfg);
8436 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8437 emit_write_barrier (cfg, ptr, sp [1], -1);
8440 store->flags |= ins_flag;
8447 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8448 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8449 MonoInst *iargs [4];
8452 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8453 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8454 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8455 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8456 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8457 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8458 CHECK_CFG_EXCEPTION;
8460 g_assert (costs > 0);
8462 cfg->real_offset += 5;
8466 inline_costs += costs;
8468 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8472 if (sp [0]->type == STACK_VTYPE) {
8475 /* Have to compute the address of the variable */
8477 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8479 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8481 g_assert (var->klass == klass);
8483 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8487 if (*ip == CEE_LDFLDA) {
8488 if (sp [0]->type == STACK_OBJ) {
8489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8490 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8493 dreg = alloc_preg (cfg);
8495 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8496 ins->klass = mono_class_from_mono_type (field->type);
8497 ins->type = STACK_MP;
8502 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8504 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8505 load->flags |= ins_flag;
8506 if (sp [0]->opcode != OP_LDADDR)
8507 load->flags |= MONO_INST_FAULT;
8518 MonoClassField *field;
8519 gpointer addr = NULL;
8520 gboolean is_special_static;
8523 token = read32 (ip + 1);
8525 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8526 field = mono_method_get_wrapper_data (method, token);
8527 klass = field->parent;
8530 field = mono_field_from_token (image, token, &klass, generic_context);
8533 mono_class_init (klass);
8534 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8535 FIELD_ACCESS_FAILURE;
8537 /* if the class is Critical then transparent code cannot access it's fields */
8538 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8539 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8542 * We can only support shared generic static
8543 * field access on architectures where the
8544 * trampoline code has been extended to handle
8545 * the generic class init.
8547 #ifndef MONO_ARCH_VTABLE_REG
8548 GENERIC_SHARING_FAILURE (*ip);
8551 if (cfg->generic_sharing_context)
8552 context_used = mono_class_check_context_used (klass);
8554 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8556 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8557 * to be called here.
8559 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8560 mono_class_vtable (cfg->domain, klass);
8561 CHECK_TYPELOAD (klass);
8563 mono_domain_lock (cfg->domain);
8564 if (cfg->domain->special_static_fields)
8565 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8566 mono_domain_unlock (cfg->domain);
8568 is_special_static = mono_class_field_is_special_static (field);
8570 /* Generate IR to compute the field address */
8571 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8573 * Fast access to TLS data
8574 * Inline version of get_thread_static_data () in
8578 int idx, static_data_reg, array_reg, dreg;
8579 MonoInst *thread_ins;
8581 // offset &= 0x7fffffff;
8582 // idx = (offset >> 24) - 1;
8583 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8585 thread_ins = mono_get_thread_intrinsic (cfg);
8586 MONO_ADD_INS (cfg->cbb, thread_ins);
8587 static_data_reg = alloc_ireg (cfg);
8588 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8590 if (cfg->compile_aot) {
8591 int offset_reg, offset2_reg, idx_reg;
8593 /* For TLS variables, this will return the TLS offset */
8594 EMIT_NEW_SFLDACONST (cfg, ins, field);
8595 offset_reg = ins->dreg;
8596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8597 idx_reg = alloc_ireg (cfg);
8598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8600 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8601 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8602 array_reg = alloc_ireg (cfg);
8603 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8604 offset2_reg = alloc_ireg (cfg);
8605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8606 dreg = alloc_ireg (cfg);
8607 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8609 offset = (gsize)addr & 0x7fffffff;
8610 idx = (offset >> 24) - 1;
8612 array_reg = alloc_ireg (cfg);
8613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8614 dreg = alloc_ireg (cfg);
8615 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8617 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8618 (cfg->compile_aot && is_special_static) ||
8619 (context_used && is_special_static)) {
8620 MonoInst *iargs [2];
8622 g_assert (field->parent);
8623 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8625 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8626 field, MONO_RGCTX_INFO_CLASS_FIELD);
8628 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8630 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8631 } else if (context_used) {
8632 MonoInst *static_data;
8635 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8636 method->klass->name_space, method->klass->name, method->name,
8637 depth, field->offset);
8640 if (mono_class_needs_cctor_run (klass, method))
8641 emit_generic_class_init (cfg, klass);
8644 * The pointer we're computing here is
8646 * super_info.static_data + field->offset
8648 static_data = emit_get_rgctx_klass (cfg, context_used,
8649 klass, MONO_RGCTX_INFO_STATIC_DATA);
8651 if (field->offset == 0) {
8654 int addr_reg = mono_alloc_preg (cfg);
8655 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8657 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8658 MonoInst *iargs [2];
8660 g_assert (field->parent);
8661 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8662 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8663 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8665 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8667 CHECK_TYPELOAD (klass);
8669 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8670 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8671 if (cfg->verbose_level > 2)
8672 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8673 class_inits = g_slist_prepend (class_inits, vtable);
8675 if (cfg->run_cctors) {
8677 /* This makes so that inline cannot trigger */
8678 /* .cctors: too many apps depend on them */
8679 /* running with a specific order... */
8680 if (! vtable->initialized)
8682 ex = mono_runtime_class_init_full (vtable, FALSE);
8684 set_exception_object (cfg, ex);
8685 goto exception_exit;
8689 addr = (char*)vtable->data + field->offset;
8691 if (cfg->compile_aot)
8692 EMIT_NEW_SFLDACONST (cfg, ins, field);
8694 EMIT_NEW_PCONST (cfg, ins, addr);
8696 MonoInst *iargs [1];
8697 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8698 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8702 /* Generate IR to do the actual load/store operation */
8704 if (*ip == CEE_LDSFLDA) {
8705 ins->klass = mono_class_from_mono_type (field->type);
8706 ins->type = STACK_PTR;
8708 } else if (*ip == CEE_STSFLD) {
8713 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8714 store->flags |= ins_flag;
8716 gboolean is_const = FALSE;
8717 MonoVTable *vtable = NULL;
8719 if (!context_used) {
8720 vtable = mono_class_vtable (cfg->domain, klass);
8721 CHECK_TYPELOAD (klass);
8723 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8724 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8725 gpointer addr = (char*)vtable->data + field->offset;
8726 int ro_type = field->type->type;
8727 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8728 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8730 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8733 case MONO_TYPE_BOOLEAN:
8735 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8739 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8742 case MONO_TYPE_CHAR:
8744 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8748 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8753 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8757 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8763 case MONO_TYPE_FNPTR:
8764 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8765 type_to_eval_stack_type ((cfg), field->type, *sp);
8768 case MONO_TYPE_STRING:
8769 case MONO_TYPE_OBJECT:
8770 case MONO_TYPE_CLASS:
8771 case MONO_TYPE_SZARRAY:
8772 case MONO_TYPE_ARRAY:
8773 if (!mono_gc_is_moving ()) {
8774 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8775 type_to_eval_stack_type ((cfg), field->type, *sp);
8783 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8788 case MONO_TYPE_VALUETYPE:
8798 CHECK_STACK_OVF (1);
8800 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8801 load->flags |= ins_flag;
8814 token = read32 (ip + 1);
8815 klass = mini_get_class (method, token, generic_context);
8816 CHECK_TYPELOAD (klass);
8817 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8818 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8819 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8820 generic_class_is_reference_type (cfg, klass)) {
8821 /* insert call to write barrier */
8822 emit_write_barrier (cfg, sp [0], sp [1], -1);
8834 const char *data_ptr;
8836 guint32 field_token;
8842 token = read32 (ip + 1);
8844 klass = mini_get_class (method, token, generic_context);
8845 CHECK_TYPELOAD (klass);
8847 if (cfg->generic_sharing_context)
8848 context_used = mono_class_check_context_used (klass);
8850 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8851 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8852 ins->sreg1 = sp [0]->dreg;
8853 ins->type = STACK_I4;
8854 ins->dreg = alloc_ireg (cfg);
8855 MONO_ADD_INS (cfg->cbb, ins);
8856 *sp = mono_decompose_opcode (cfg, ins);
8861 MonoClass *array_class = mono_array_class_get (klass, 1);
8862 /* FIXME: we cannot get a managed
8863 allocator because we can't get the
8864 open generic class's vtable. We
8865 have the same problem in
8866 handle_alloc(). This
8867 needs to be solved so that we can
8868 have managed allocs of shared
8871 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8872 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8874 MonoMethod *managed_alloc = NULL;
8876 /* FIXME: Decompose later to help abcrem */
8879 args [0] = emit_get_rgctx_klass (cfg, context_used,
8880 array_class, MONO_RGCTX_INFO_VTABLE);
8885 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8887 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8889 if (cfg->opt & MONO_OPT_SHARED) {
8890 /* Decompose now to avoid problems with references to the domainvar */
8891 MonoInst *iargs [3];
8893 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8894 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8897 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8899 /* Decompose later since it is needed by abcrem */
8900 MonoClass *array_type = mono_array_class_get (klass, 1);
8901 mono_class_vtable (cfg->domain, array_type);
8902 CHECK_TYPELOAD (array_type);
8904 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8905 ins->dreg = alloc_preg (cfg);
8906 ins->sreg1 = sp [0]->dreg;
8907 ins->inst_newa_class = klass;
8908 ins->type = STACK_OBJ;
8910 MONO_ADD_INS (cfg->cbb, ins);
8911 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8912 cfg->cbb->has_array_access = TRUE;
8914 /* Needed so mono_emit_load_get_addr () gets called */
8915 mono_get_got_var (cfg);
8925 * we inline/optimize the initialization sequence if possible.
8926 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8927 * for small sizes open code the memcpy
8928 * ensure the rva field is big enough
8930 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8931 MonoMethod *memcpy_method = get_memcpy_method ();
8932 MonoInst *iargs [3];
8933 int add_reg = alloc_preg (cfg);
8935 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8936 if (cfg->compile_aot) {
8937 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8939 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8941 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8942 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8951 if (sp [0]->type != STACK_OBJ)
8954 dreg = alloc_preg (cfg);
8955 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8956 ins->dreg = alloc_preg (cfg);
8957 ins->sreg1 = sp [0]->dreg;
8958 ins->type = STACK_I4;
8959 /* This flag will be inherited by the decomposition */
8960 ins->flags |= MONO_INST_FAULT;
8961 MONO_ADD_INS (cfg->cbb, ins);
8962 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8963 cfg->cbb->has_array_access = TRUE;
8971 if (sp [0]->type != STACK_OBJ)
8974 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8976 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8977 CHECK_TYPELOAD (klass);
8978 /* we need to make sure that this array is exactly the type it needs
8979 * to be for correctness. the wrappers are lax with their usage
8980 * so we need to ignore them here
8982 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8983 MonoClass *array_class = mono_array_class_get (klass, 1);
8984 mini_emit_check_array_type (cfg, sp [0], array_class);
8985 CHECK_TYPELOAD (array_class);
8989 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9004 case CEE_LDELEM_REF: {
9010 if (*ip == CEE_LDELEM) {
9012 token = read32 (ip + 1);
9013 klass = mini_get_class (method, token, generic_context);
9014 CHECK_TYPELOAD (klass);
9015 mono_class_init (klass);
9018 klass = array_access_to_klass (*ip);
9020 if (sp [0]->type != STACK_OBJ)
9023 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9025 if (sp [1]->opcode == OP_ICONST) {
9026 int array_reg = sp [0]->dreg;
9027 int index_reg = sp [1]->dreg;
9028 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9030 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9031 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9033 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9034 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9037 if (*ip == CEE_LDELEM)
9050 case CEE_STELEM_REF:
9057 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9059 if (*ip == CEE_STELEM) {
9061 token = read32 (ip + 1);
9062 klass = mini_get_class (method, token, generic_context);
9063 CHECK_TYPELOAD (klass);
9064 mono_class_init (klass);
9067 klass = array_access_to_klass (*ip);
9069 if (sp [0]->type != STACK_OBJ)
9072 /* storing a NULL doesn't need any of the complex checks in stelemref */
9073 if (generic_class_is_reference_type (cfg, klass) &&
9074 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9075 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9076 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9077 MonoInst *iargs [3];
9080 mono_class_setup_vtable (obj_array);
9081 g_assert (helper->slot);
9083 if (sp [0]->type != STACK_OBJ)
9085 if (sp [2]->type != STACK_OBJ)
9092 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9094 if (sp [1]->opcode == OP_ICONST) {
9095 int array_reg = sp [0]->dreg;
9096 int index_reg = sp [1]->dreg;
9097 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9099 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9100 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9102 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9103 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9107 if (*ip == CEE_STELEM)
9114 case CEE_CKFINITE: {
9118 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9119 ins->sreg1 = sp [0]->dreg;
9120 ins->dreg = alloc_freg (cfg);
9121 ins->type = STACK_R8;
9122 MONO_ADD_INS (bblock, ins);
9124 *sp++ = mono_decompose_opcode (cfg, ins);
9129 case CEE_REFANYVAL: {
9130 MonoInst *src_var, *src;
9132 int klass_reg = alloc_preg (cfg);
9133 int dreg = alloc_preg (cfg);
9136 MONO_INST_NEW (cfg, ins, *ip);
9139 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9140 CHECK_TYPELOAD (klass);
9141 mono_class_init (klass);
9143 if (cfg->generic_sharing_context)
9144 context_used = mono_class_check_context_used (klass);
9147 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9149 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9150 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9151 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9154 MonoInst *klass_ins;
9156 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9157 klass, MONO_RGCTX_INFO_KLASS);
9160 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9161 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9163 mini_emit_class_check (cfg, klass_reg, klass);
9165 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9166 ins->type = STACK_MP;
9171 case CEE_MKREFANY: {
9172 MonoInst *loc, *addr;
9175 MONO_INST_NEW (cfg, ins, *ip);
9178 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9179 CHECK_TYPELOAD (klass);
9180 mono_class_init (klass);
9182 if (cfg->generic_sharing_context)
9183 context_used = mono_class_check_context_used (klass);
9185 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9186 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9189 MonoInst *const_ins;
9190 int type_reg = alloc_preg (cfg);
9192 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9193 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9194 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9195 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9196 } else if (cfg->compile_aot) {
9197 int const_reg = alloc_preg (cfg);
9198 int type_reg = alloc_preg (cfg);
9200 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9201 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9202 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9203 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9205 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9206 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9208 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9210 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9211 ins->type = STACK_VTYPE;
9212 ins->klass = mono_defaults.typed_reference_class;
9219 MonoClass *handle_class;
9221 CHECK_STACK_OVF (1);
9224 n = read32 (ip + 1);
9226 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9227 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9228 handle = mono_method_get_wrapper_data (method, n);
9229 handle_class = mono_method_get_wrapper_data (method, n + 1);
9230 if (handle_class == mono_defaults.typehandle_class)
9231 handle = &((MonoClass*)handle)->byval_arg;
9234 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9238 mono_class_init (handle_class);
9239 if (cfg->generic_sharing_context) {
9240 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9241 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9242 /* This case handles ldtoken
9243 of an open type, like for
9246 } else if (handle_class == mono_defaults.typehandle_class) {
9247 /* If we get a MONO_TYPE_CLASS
9248 then we need to provide the
9250 instantiation of it. */
9251 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9254 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9255 } else if (handle_class == mono_defaults.fieldhandle_class)
9256 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9257 else if (handle_class == mono_defaults.methodhandle_class)
9258 context_used = mono_method_check_context_used (handle);
9260 g_assert_not_reached ();
9263 if ((cfg->opt & MONO_OPT_SHARED) &&
9264 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9265 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9266 MonoInst *addr, *vtvar, *iargs [3];
9267 int method_context_used;
9269 if (cfg->generic_sharing_context)
9270 method_context_used = mono_method_check_context_used (method);
9272 method_context_used = 0;
9274 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9276 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9277 EMIT_NEW_ICONST (cfg, iargs [1], n);
9278 if (method_context_used) {
9279 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9280 method, MONO_RGCTX_INFO_METHOD);
9281 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9283 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9284 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9286 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9288 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9290 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9292 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9293 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9294 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9295 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9296 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9297 MonoClass *tclass = mono_class_from_mono_type (handle);
9299 mono_class_init (tclass);
9301 ins = emit_get_rgctx_klass (cfg, context_used,
9302 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9303 } else if (cfg->compile_aot) {
9304 if (method->wrapper_type) {
9305 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9306 /* Special case for static synchronized wrappers */
9307 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9309 /* FIXME: n is not a normal token */
9310 cfg->disable_aot = TRUE;
9311 EMIT_NEW_PCONST (cfg, ins, NULL);
9314 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9317 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9319 ins->type = STACK_OBJ;
9320 ins->klass = cmethod->klass;
9323 MonoInst *addr, *vtvar;
9325 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9328 if (handle_class == mono_defaults.typehandle_class) {
9329 ins = emit_get_rgctx_klass (cfg, context_used,
9330 mono_class_from_mono_type (handle),
9331 MONO_RGCTX_INFO_TYPE);
9332 } else if (handle_class == mono_defaults.methodhandle_class) {
9333 ins = emit_get_rgctx_method (cfg, context_used,
9334 handle, MONO_RGCTX_INFO_METHOD);
9335 } else if (handle_class == mono_defaults.fieldhandle_class) {
9336 ins = emit_get_rgctx_field (cfg, context_used,
9337 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9339 g_assert_not_reached ();
9341 } else if (cfg->compile_aot) {
9342 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9344 EMIT_NEW_PCONST (cfg, ins, handle);
9346 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9347 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9348 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9358 MONO_INST_NEW (cfg, ins, OP_THROW);
9360 ins->sreg1 = sp [0]->dreg;
9362 bblock->out_of_line = TRUE;
9363 MONO_ADD_INS (bblock, ins);
9364 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9365 MONO_ADD_INS (bblock, ins);
9368 link_bblock (cfg, bblock, end_bblock);
9369 start_new_bblock = 1;
9371 case CEE_ENDFINALLY:
9372 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9373 MONO_ADD_INS (bblock, ins);
9375 start_new_bblock = 1;
9378 * Control will leave the method so empty the stack, otherwise
9379 * the next basic block will start with a nonempty stack.
9381 while (sp != stack_start) {
9389 if (*ip == CEE_LEAVE) {
9391 target = ip + 5 + (gint32)read32(ip + 1);
9394 target = ip + 2 + (signed char)(ip [1]);
9397 /* empty the stack */
9398 while (sp != stack_start) {
9403 * If this leave statement is in a catch block, check for a
9404 * pending exception, and rethrow it if necessary.
9405 * We avoid doing this in runtime invoke wrappers, since those are called
9406 * by native code which excepts the wrapper to catch all exceptions.
9408 for (i = 0; i < header->num_clauses; ++i) {
9409 MonoExceptionClause *clause = &header->clauses [i];
9412 * Use <= in the final comparison to handle clauses with multiple
9413 * leave statements, like in bug #78024.
9414 * The ordering of the exception clauses guarantees that we find the
9417 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9419 MonoBasicBlock *dont_throw;
9424 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9427 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9429 NEW_BBLOCK (cfg, dont_throw);
9432 * Currently, we always rethrow the abort exception, despite the
9433 * fact that this is not correct. See thread6.cs for an example.
9434 * But propagating the abort exception is more important than
9435 * getting the sematics right.
9437 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9438 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9439 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9441 MONO_START_BB (cfg, dont_throw);
9446 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9448 MonoExceptionClause *clause;
9450 for (tmp = handlers; tmp; tmp = tmp->next) {
9452 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9454 link_bblock (cfg, bblock, tblock);
9455 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9456 ins->inst_target_bb = tblock;
9457 ins->inst_eh_block = clause;
9458 MONO_ADD_INS (bblock, ins);
9459 bblock->has_call_handler = 1;
9460 if (COMPILE_LLVM (cfg)) {
9461 MonoBasicBlock *target_bb;
9464 * Link the finally bblock with the target, since it will
9465 * conceptually branch there.
9466 * FIXME: Have to link the bblock containing the endfinally.
9468 GET_BBLOCK (cfg, target_bb, target);
9469 link_bblock (cfg, tblock, target_bb);
9472 g_list_free (handlers);
9475 MONO_INST_NEW (cfg, ins, OP_BR);
9476 MONO_ADD_INS (bblock, ins);
9477 GET_BBLOCK (cfg, tblock, target);
9478 link_bblock (cfg, bblock, tblock);
9479 ins->inst_target_bb = tblock;
9480 start_new_bblock = 1;
9482 if (*ip == CEE_LEAVE)
9491 * Mono specific opcodes
9493 case MONO_CUSTOM_PREFIX: {
9495 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9499 case CEE_MONO_ICALL: {
9501 MonoJitICallInfo *info;
9503 token = read32 (ip + 2);
9504 func = mono_method_get_wrapper_data (method, token);
9505 info = mono_find_jit_icall_by_addr (func);
9508 CHECK_STACK (info->sig->param_count);
9509 sp -= info->sig->param_count;
9511 ins = mono_emit_jit_icall (cfg, info->func, sp);
9512 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9516 inline_costs += 10 * num_calls++;
9520 case CEE_MONO_LDPTR: {
9523 CHECK_STACK_OVF (1);
9525 token = read32 (ip + 2);
9527 ptr = mono_method_get_wrapper_data (method, token);
9528 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9529 MonoJitICallInfo *callinfo;
9530 const char *icall_name;
9532 icall_name = method->name + strlen ("__icall_wrapper_");
9533 g_assert (icall_name);
9534 callinfo = mono_find_jit_icall_by_name (icall_name);
9535 g_assert (callinfo);
9537 if (ptr == callinfo->func) {
9538 /* Will be transformed into an AOTCONST later */
9539 EMIT_NEW_PCONST (cfg, ins, ptr);
9545 /* FIXME: Generalize this */
9546 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9547 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9552 EMIT_NEW_PCONST (cfg, ins, ptr);
9555 inline_costs += 10 * num_calls++;
9556 /* Can't embed random pointers into AOT code */
9557 cfg->disable_aot = 1;
9560 case CEE_MONO_ICALL_ADDR: {
9561 MonoMethod *cmethod;
9564 CHECK_STACK_OVF (1);
9566 token = read32 (ip + 2);
9568 cmethod = mono_method_get_wrapper_data (method, token);
9570 if (cfg->compile_aot) {
9571 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9573 ptr = mono_lookup_internal_call (cmethod);
9575 EMIT_NEW_PCONST (cfg, ins, ptr);
9581 case CEE_MONO_VTADDR: {
9582 MonoInst *src_var, *src;
9588 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9589 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9594 case CEE_MONO_NEWOBJ: {
9595 MonoInst *iargs [2];
9597 CHECK_STACK_OVF (1);
9599 token = read32 (ip + 2);
9600 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9601 mono_class_init (klass);
9602 NEW_DOMAINCONST (cfg, iargs [0]);
9603 MONO_ADD_INS (cfg->cbb, iargs [0]);
9604 NEW_CLASSCONST (cfg, iargs [1], klass);
9605 MONO_ADD_INS (cfg->cbb, iargs [1]);
9606 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9608 inline_costs += 10 * num_calls++;
9611 case CEE_MONO_OBJADDR:
9614 MONO_INST_NEW (cfg, ins, OP_MOVE);
9615 ins->dreg = alloc_preg (cfg);
9616 ins->sreg1 = sp [0]->dreg;
9617 ins->type = STACK_MP;
9618 MONO_ADD_INS (cfg->cbb, ins);
9622 case CEE_MONO_LDNATIVEOBJ:
9624 * Similar to LDOBJ, but instead load the unmanaged
9625 * representation of the vtype to the stack.
9630 token = read32 (ip + 2);
9631 klass = mono_method_get_wrapper_data (method, token);
9632 g_assert (klass->valuetype);
9633 mono_class_init (klass);
9636 MonoInst *src, *dest, *temp;
9639 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9640 temp->backend.is_pinvoke = 1;
9641 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9642 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9644 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9645 dest->type = STACK_VTYPE;
9646 dest->klass = klass;
9652 case CEE_MONO_RETOBJ: {
9654 * Same as RET, but return the native representation of a vtype
9657 g_assert (cfg->ret);
9658 g_assert (mono_method_signature (method)->pinvoke);
9663 token = read32 (ip + 2);
9664 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9666 if (!cfg->vret_addr) {
9667 g_assert (cfg->ret_var_is_local);
9669 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9671 EMIT_NEW_RETLOADA (cfg, ins);
9673 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9675 if (sp != stack_start)
9678 MONO_INST_NEW (cfg, ins, OP_BR);
9679 ins->inst_target_bb = end_bblock;
9680 MONO_ADD_INS (bblock, ins);
9681 link_bblock (cfg, bblock, end_bblock);
9682 start_new_bblock = 1;
9686 case CEE_MONO_CISINST:
9687 case CEE_MONO_CCASTCLASS: {
9692 token = read32 (ip + 2);
9693 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9694 if (ip [1] == CEE_MONO_CISINST)
9695 ins = handle_cisinst (cfg, klass, sp [0]);
9697 ins = handle_ccastclass (cfg, klass, sp [0]);
9703 case CEE_MONO_SAVE_LMF:
9704 case CEE_MONO_RESTORE_LMF:
9705 #ifdef MONO_ARCH_HAVE_LMF_OPS
9706 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9707 MONO_ADD_INS (bblock, ins);
9708 cfg->need_lmf_area = TRUE;
9712 case CEE_MONO_CLASSCONST:
9713 CHECK_STACK_OVF (1);
9715 token = read32 (ip + 2);
9716 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9719 inline_costs += 10 * num_calls++;
9721 case CEE_MONO_NOT_TAKEN:
9722 bblock->out_of_line = TRUE;
9726 CHECK_STACK_OVF (1);
9728 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9729 ins->dreg = alloc_preg (cfg);
9730 ins->inst_offset = (gint32)read32 (ip + 2);
9731 ins->type = STACK_PTR;
9732 MONO_ADD_INS (bblock, ins);
9736 case CEE_MONO_DYN_CALL: {
9739 /* It would be easier to call a trampoline, but that would put an
9740 * extra frame on the stack, confusing exception handling. So
9741 * implement it inline using an opcode for now.
9744 if (!cfg->dyn_call_var) {
9745 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9746 /* prevent it from being register allocated */
9747 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9750 /* Has to use a call inst since it local regalloc expects it */
9751 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9752 ins = (MonoInst*)call;
9754 ins->sreg1 = sp [0]->dreg;
9755 ins->sreg2 = sp [1]->dreg;
9756 MONO_ADD_INS (bblock, ins);
9758 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9759 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9763 inline_costs += 10 * num_calls++;
9768 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9778 /* somewhat similar to LDTOKEN */
9779 MonoInst *addr, *vtvar;
9780 CHECK_STACK_OVF (1);
9781 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9783 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9784 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9786 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9787 ins->type = STACK_VTYPE;
9788 ins->klass = mono_defaults.argumenthandle_class;
9801 * The following transforms:
9802 * CEE_CEQ into OP_CEQ
9803 * CEE_CGT into OP_CGT
9804 * CEE_CGT_UN into OP_CGT_UN
9805 * CEE_CLT into OP_CLT
9806 * CEE_CLT_UN into OP_CLT_UN
9808 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9810 MONO_INST_NEW (cfg, ins, cmp->opcode);
9812 cmp->sreg1 = sp [0]->dreg;
9813 cmp->sreg2 = sp [1]->dreg;
9814 type_from_op (cmp, sp [0], sp [1]);
9816 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9817 cmp->opcode = OP_LCOMPARE;
9818 else if (sp [0]->type == STACK_R8)
9819 cmp->opcode = OP_FCOMPARE;
9821 cmp->opcode = OP_ICOMPARE;
9822 MONO_ADD_INS (bblock, cmp);
9823 ins->type = STACK_I4;
9824 ins->dreg = alloc_dreg (cfg, ins->type);
9825 type_from_op (ins, sp [0], sp [1]);
9827 if (cmp->opcode == OP_FCOMPARE) {
9829 * The backends expect the fceq opcodes to do the
9832 cmp->opcode = OP_NOP;
9833 ins->sreg1 = cmp->sreg1;
9834 ins->sreg2 = cmp->sreg2;
9836 MONO_ADD_INS (bblock, ins);
9843 MonoMethod *cil_method;
9844 gboolean needs_static_rgctx_invoke;
9846 CHECK_STACK_OVF (1);
9848 n = read32 (ip + 2);
9849 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9850 if (!cmethod || mono_loader_get_last_error ())
9852 mono_class_init (cmethod->klass);
9854 mono_save_token_info (cfg, image, n, cmethod);
9856 if (cfg->generic_sharing_context)
9857 context_used = mono_method_check_context_used (cmethod);
9859 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9861 cil_method = cmethod;
9862 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9863 METHOD_ACCESS_FAILURE;
9865 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9866 if (check_linkdemand (cfg, method, cmethod))
9868 CHECK_CFG_EXCEPTION;
9869 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9870 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9874 * Optimize the common case of ldftn+delegate creation
9876 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9877 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9878 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9879 MonoInst *target_ins;
9881 int invoke_context_used = 0;
9883 invoke = mono_get_delegate_invoke (ctor_method->klass);
9884 if (!invoke || !mono_method_signature (invoke))
9887 if (cfg->generic_sharing_context)
9888 invoke_context_used = mono_method_check_context_used (invoke);
9890 target_ins = sp [-1];
9892 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9893 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9894 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9895 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9896 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9900 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9901 /* FIXME: SGEN support */
9902 if (invoke_context_used == 0) {
9904 if (cfg->verbose_level > 3)
9905 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9907 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9908 CHECK_CFG_EXCEPTION;
9917 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9918 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9922 inline_costs += 10 * num_calls++;
9925 case CEE_LDVIRTFTN: {
9930 n = read32 (ip + 2);
9931 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9932 if (!cmethod || mono_loader_get_last_error ())
9934 mono_class_init (cmethod->klass);
9936 if (cfg->generic_sharing_context)
9937 context_used = mono_method_check_context_used (cmethod);
9939 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9940 if (check_linkdemand (cfg, method, cmethod))
9942 CHECK_CFG_EXCEPTION;
9943 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9944 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9950 args [1] = emit_get_rgctx_method (cfg, context_used,
9951 cmethod, MONO_RGCTX_INFO_METHOD);
9954 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9956 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9959 inline_costs += 10 * num_calls++;
9963 CHECK_STACK_OVF (1);
9965 n = read16 (ip + 2);
9967 EMIT_NEW_ARGLOAD (cfg, ins, n);
9972 CHECK_STACK_OVF (1);
9974 n = read16 (ip + 2);
9976 NEW_ARGLOADA (cfg, ins, n);
9977 MONO_ADD_INS (cfg->cbb, ins);
9985 n = read16 (ip + 2);
9987 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9989 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9993 CHECK_STACK_OVF (1);
9995 n = read16 (ip + 2);
9997 EMIT_NEW_LOCLOAD (cfg, ins, n);
10002 unsigned char *tmp_ip;
10003 CHECK_STACK_OVF (1);
10005 n = read16 (ip + 2);
10008 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10014 EMIT_NEW_LOCLOADA (cfg, ins, n);
10023 n = read16 (ip + 2);
10025 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10027 emit_stloc_ir (cfg, sp, header, n);
10034 if (sp != stack_start)
10036 if (cfg->method != method)
10038 * Inlining this into a loop in a parent could lead to
10039 * stack overflows which is different behavior than the
10040 * non-inlined case, thus disable inlining in this case.
10042 goto inline_failure;
10044 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10045 ins->dreg = alloc_preg (cfg);
10046 ins->sreg1 = sp [0]->dreg;
10047 ins->type = STACK_PTR;
10048 MONO_ADD_INS (cfg->cbb, ins);
10050 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10052 ins->flags |= MONO_INST_INIT;
10057 case CEE_ENDFILTER: {
10058 MonoExceptionClause *clause, *nearest;
10059 int cc, nearest_num;
10063 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10065 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10066 ins->sreg1 = (*sp)->dreg;
10067 MONO_ADD_INS (bblock, ins);
10068 start_new_bblock = 1;
10073 for (cc = 0; cc < header->num_clauses; ++cc) {
10074 clause = &header->clauses [cc];
10075 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10076 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10077 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10082 g_assert (nearest);
10083 if ((ip - header->code) != nearest->handler_offset)
10088 case CEE_UNALIGNED_:
10089 ins_flag |= MONO_INST_UNALIGNED;
10090 /* FIXME: record alignment? we can assume 1 for now */
10094 case CEE_VOLATILE_:
10095 ins_flag |= MONO_INST_VOLATILE;
10099 ins_flag |= MONO_INST_TAILCALL;
10100 cfg->flags |= MONO_CFG_HAS_TAIL;
10101 /* Can't inline tail calls at this time */
10102 inline_costs += 100000;
10109 token = read32 (ip + 2);
10110 klass = mini_get_class (method, token, generic_context);
10111 CHECK_TYPELOAD (klass);
10112 if (generic_class_is_reference_type (cfg, klass))
10113 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10115 mini_emit_initobj (cfg, *sp, NULL, klass);
10119 case CEE_CONSTRAINED_:
10121 token = read32 (ip + 2);
10122 if (method->wrapper_type != MONO_WRAPPER_NONE)
10123 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10125 constrained_call = mono_class_get_full (image, token, generic_context);
10126 CHECK_TYPELOAD (constrained_call);
10130 case CEE_INITBLK: {
10131 MonoInst *iargs [3];
10135 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10136 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10137 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10138 /* emit_memset only works when val == 0 */
10139 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10141 iargs [0] = sp [0];
10142 iargs [1] = sp [1];
10143 iargs [2] = sp [2];
10144 if (ip [1] == CEE_CPBLK) {
10145 MonoMethod *memcpy_method = get_memcpy_method ();
10146 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10148 MonoMethod *memset_method = get_memset_method ();
10149 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10159 ins_flag |= MONO_INST_NOTYPECHECK;
10161 ins_flag |= MONO_INST_NORANGECHECK;
10162 /* we ignore the no-nullcheck for now since we
10163 * really do it explicitly only when doing callvirt->call
10167 case CEE_RETHROW: {
10169 int handler_offset = -1;
10171 for (i = 0; i < header->num_clauses; ++i) {
10172 MonoExceptionClause *clause = &header->clauses [i];
10173 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10174 handler_offset = clause->handler_offset;
10179 bblock->flags |= BB_EXCEPTION_UNSAFE;
10181 g_assert (handler_offset != -1);
10183 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10184 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10185 ins->sreg1 = load->dreg;
10186 MONO_ADD_INS (bblock, ins);
10188 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10189 MONO_ADD_INS (bblock, ins);
10192 link_bblock (cfg, bblock, end_bblock);
10193 start_new_bblock = 1;
10201 CHECK_STACK_OVF (1);
10203 token = read32 (ip + 2);
10204 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10205 MonoType *type = mono_type_create_from_typespec (image, token);
10206 token = mono_type_size (type, &ialign);
10208 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10209 CHECK_TYPELOAD (klass);
10210 mono_class_init (klass);
10211 token = mono_class_value_size (klass, &align);
10213 EMIT_NEW_ICONST (cfg, ins, token);
10218 case CEE_REFANYTYPE: {
10219 MonoInst *src_var, *src;
10225 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10227 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10228 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10229 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10234 case CEE_READONLY_:
10247 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10257 g_warning ("opcode 0x%02x not handled", *ip);
10261 if (start_new_bblock != 1)
10264 bblock->cil_length = ip - bblock->cil_code;
10265 bblock->next_bb = end_bblock;
10267 if (cfg->method == method && cfg->domainvar) {
10269 MonoInst *get_domain;
10271 cfg->cbb = init_localsbb;
10273 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10274 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10277 get_domain->dreg = alloc_preg (cfg);
10278 MONO_ADD_INS (cfg->cbb, get_domain);
10280 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10281 MONO_ADD_INS (cfg->cbb, store);
10284 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10285 if (cfg->compile_aot)
10286 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10287 mono_get_got_var (cfg);
10290 if (cfg->method == method && cfg->got_var)
10291 mono_emit_load_got_addr (cfg);
10296 cfg->cbb = init_localsbb;
10298 for (i = 0; i < header->num_locals; ++i) {
10299 MonoType *ptype = header->locals [i];
10300 int t = ptype->type;
10301 dreg = cfg->locals [i]->dreg;
10303 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10304 t = mono_class_enum_basetype (ptype->data.klass)->type;
10305 if (ptype->byref) {
10306 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10307 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10308 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10309 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10310 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10311 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10312 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10313 ins->type = STACK_R8;
10314 ins->inst_p0 = (void*)&r8_0;
10315 ins->dreg = alloc_dreg (cfg, STACK_R8);
10316 MONO_ADD_INS (init_localsbb, ins);
10317 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10318 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10319 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10320 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10322 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10327 if (cfg->init_ref_vars && cfg->method == method) {
10328 /* Emit initialization for ref vars */
10329 // FIXME: Avoid duplication initialization for IL locals.
10330 for (i = 0; i < cfg->num_varinfo; ++i) {
10331 MonoInst *ins = cfg->varinfo [i];
10333 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10334 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10338 /* Add a sequence point for method entry/exit events */
10340 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10341 MONO_ADD_INS (init_localsbb, ins);
10342 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10343 MONO_ADD_INS (cfg->bb_exit, ins);
10348 if (cfg->method == method) {
10349 MonoBasicBlock *bb;
10350 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10351 bb->region = mono_find_block_region (cfg, bb->real_offset);
10353 mono_create_spvar_for_region (cfg, bb->region);
10354 if (cfg->verbose_level > 2)
10355 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10359 g_slist_free (class_inits);
10360 dont_inline = g_list_remove (dont_inline, method);
10362 if (inline_costs < 0) {
10365 /* Method is too large */
10366 mname = mono_method_full_name (method, TRUE);
10367 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10368 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10370 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10371 mono_basic_block_free (original_bb);
10375 if ((cfg->verbose_level > 2) && (cfg->method == method))
10376 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10378 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10379 mono_basic_block_free (original_bb);
10380 return inline_costs;
10383 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10390 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10394 set_exception_type_from_invalid_il (cfg, method, ip);
10398 g_slist_free (class_inits);
10399 mono_basic_block_free (original_bb);
10400 dont_inline = g_list_remove (dont_inline, method);
10401 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10406 store_membase_reg_to_store_membase_imm (int opcode)
10409 case OP_STORE_MEMBASE_REG:
10410 return OP_STORE_MEMBASE_IMM;
10411 case OP_STOREI1_MEMBASE_REG:
10412 return OP_STOREI1_MEMBASE_IMM;
10413 case OP_STOREI2_MEMBASE_REG:
10414 return OP_STOREI2_MEMBASE_IMM;
10415 case OP_STOREI4_MEMBASE_REG:
10416 return OP_STOREI4_MEMBASE_IMM;
10417 case OP_STOREI8_MEMBASE_REG:
10418 return OP_STOREI8_MEMBASE_IMM;
10420 g_assert_not_reached ();
10426 #endif /* DISABLE_JIT */
10429 mono_op_to_op_imm (int opcode)
10433 return OP_IADD_IMM;
10435 return OP_ISUB_IMM;
10437 return OP_IDIV_IMM;
10439 return OP_IDIV_UN_IMM;
10441 return OP_IREM_IMM;
10443 return OP_IREM_UN_IMM;
10445 return OP_IMUL_IMM;
10447 return OP_IAND_IMM;
10451 return OP_IXOR_IMM;
10453 return OP_ISHL_IMM;
10455 return OP_ISHR_IMM;
10457 return OP_ISHR_UN_IMM;
10460 return OP_LADD_IMM;
10462 return OP_LSUB_IMM;
10464 return OP_LAND_IMM;
10468 return OP_LXOR_IMM;
10470 return OP_LSHL_IMM;
10472 return OP_LSHR_IMM;
10474 return OP_LSHR_UN_IMM;
10477 return OP_COMPARE_IMM;
10479 return OP_ICOMPARE_IMM;
10481 return OP_LCOMPARE_IMM;
10483 case OP_STORE_MEMBASE_REG:
10484 return OP_STORE_MEMBASE_IMM;
10485 case OP_STOREI1_MEMBASE_REG:
10486 return OP_STOREI1_MEMBASE_IMM;
10487 case OP_STOREI2_MEMBASE_REG:
10488 return OP_STOREI2_MEMBASE_IMM;
10489 case OP_STOREI4_MEMBASE_REG:
10490 return OP_STOREI4_MEMBASE_IMM;
10492 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10494 return OP_X86_PUSH_IMM;
10495 case OP_X86_COMPARE_MEMBASE_REG:
10496 return OP_X86_COMPARE_MEMBASE_IMM;
10498 #if defined(TARGET_AMD64)
10499 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10500 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10502 case OP_VOIDCALL_REG:
10503 return OP_VOIDCALL;
10511 return OP_LOCALLOC_IMM;
10518 ldind_to_load_membase (int opcode)
10522 return OP_LOADI1_MEMBASE;
10524 return OP_LOADU1_MEMBASE;
10526 return OP_LOADI2_MEMBASE;
10528 return OP_LOADU2_MEMBASE;
10530 return OP_LOADI4_MEMBASE;
10532 return OP_LOADU4_MEMBASE;
10534 return OP_LOAD_MEMBASE;
10535 case CEE_LDIND_REF:
10536 return OP_LOAD_MEMBASE;
10538 return OP_LOADI8_MEMBASE;
10540 return OP_LOADR4_MEMBASE;
10542 return OP_LOADR8_MEMBASE;
10544 g_assert_not_reached ();
10551 stind_to_store_membase (int opcode)
10555 return OP_STOREI1_MEMBASE_REG;
10557 return OP_STOREI2_MEMBASE_REG;
10559 return OP_STOREI4_MEMBASE_REG;
10561 case CEE_STIND_REF:
10562 return OP_STORE_MEMBASE_REG;
10564 return OP_STOREI8_MEMBASE_REG;
10566 return OP_STORER4_MEMBASE_REG;
10568 return OP_STORER8_MEMBASE_REG;
10570 g_assert_not_reached ();
10577 mono_load_membase_to_load_mem (int opcode)
10579 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10580 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10582 case OP_LOAD_MEMBASE:
10583 return OP_LOAD_MEM;
10584 case OP_LOADU1_MEMBASE:
10585 return OP_LOADU1_MEM;
10586 case OP_LOADU2_MEMBASE:
10587 return OP_LOADU2_MEM;
10588 case OP_LOADI4_MEMBASE:
10589 return OP_LOADI4_MEM;
10590 case OP_LOADU4_MEMBASE:
10591 return OP_LOADU4_MEM;
10592 #if SIZEOF_REGISTER == 8
10593 case OP_LOADI8_MEMBASE:
10594 return OP_LOADI8_MEM;
10603 op_to_op_dest_membase (int store_opcode, int opcode)
10605 #if defined(TARGET_X86)
10606 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10611 return OP_X86_ADD_MEMBASE_REG;
10613 return OP_X86_SUB_MEMBASE_REG;
10615 return OP_X86_AND_MEMBASE_REG;
10617 return OP_X86_OR_MEMBASE_REG;
10619 return OP_X86_XOR_MEMBASE_REG;
10622 return OP_X86_ADD_MEMBASE_IMM;
10625 return OP_X86_SUB_MEMBASE_IMM;
10628 return OP_X86_AND_MEMBASE_IMM;
10631 return OP_X86_OR_MEMBASE_IMM;
10634 return OP_X86_XOR_MEMBASE_IMM;
10640 #if defined(TARGET_AMD64)
10641 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10646 return OP_X86_ADD_MEMBASE_REG;
10648 return OP_X86_SUB_MEMBASE_REG;
10650 return OP_X86_AND_MEMBASE_REG;
10652 return OP_X86_OR_MEMBASE_REG;
10654 return OP_X86_XOR_MEMBASE_REG;
10656 return OP_X86_ADD_MEMBASE_IMM;
10658 return OP_X86_SUB_MEMBASE_IMM;
10660 return OP_X86_AND_MEMBASE_IMM;
10662 return OP_X86_OR_MEMBASE_IMM;
10664 return OP_X86_XOR_MEMBASE_IMM;
10666 return OP_AMD64_ADD_MEMBASE_REG;
10668 return OP_AMD64_SUB_MEMBASE_REG;
10670 return OP_AMD64_AND_MEMBASE_REG;
10672 return OP_AMD64_OR_MEMBASE_REG;
10674 return OP_AMD64_XOR_MEMBASE_REG;
10677 return OP_AMD64_ADD_MEMBASE_IMM;
10680 return OP_AMD64_SUB_MEMBASE_IMM;
10683 return OP_AMD64_AND_MEMBASE_IMM;
10686 return OP_AMD64_OR_MEMBASE_IMM;
10689 return OP_AMD64_XOR_MEMBASE_IMM;
10699 op_to_op_store_membase (int store_opcode, int opcode)
10701 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10704 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10705 return OP_X86_SETEQ_MEMBASE;
10707 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10708 return OP_X86_SETNE_MEMBASE;
10716 op_to_op_src1_membase (int load_opcode, int opcode)
10719 /* FIXME: This has sign extension issues */
10721 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10722 return OP_X86_COMPARE_MEMBASE8_IMM;
10725 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10730 return OP_X86_PUSH_MEMBASE;
10731 case OP_COMPARE_IMM:
10732 case OP_ICOMPARE_IMM:
10733 return OP_X86_COMPARE_MEMBASE_IMM;
10736 return OP_X86_COMPARE_MEMBASE_REG;
10740 #ifdef TARGET_AMD64
10741 /* FIXME: This has sign extension issues */
10743 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10744 return OP_X86_COMPARE_MEMBASE8_IMM;
10749 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10750 return OP_X86_PUSH_MEMBASE;
10752 /* FIXME: This only works for 32 bit immediates
10753 case OP_COMPARE_IMM:
10754 case OP_LCOMPARE_IMM:
10755 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10756 return OP_AMD64_COMPARE_MEMBASE_IMM;
10758 case OP_ICOMPARE_IMM:
10759 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10760 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10764 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10765 return OP_AMD64_COMPARE_MEMBASE_REG;
10768 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10769 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10778 op_to_op_src2_membase (int load_opcode, int opcode)
10781 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10787 return OP_X86_COMPARE_REG_MEMBASE;
10789 return OP_X86_ADD_REG_MEMBASE;
10791 return OP_X86_SUB_REG_MEMBASE;
10793 return OP_X86_AND_REG_MEMBASE;
10795 return OP_X86_OR_REG_MEMBASE;
10797 return OP_X86_XOR_REG_MEMBASE;
10801 #ifdef TARGET_AMD64
10802 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10805 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10807 return OP_X86_ADD_REG_MEMBASE;
10809 return OP_X86_SUB_REG_MEMBASE;
10811 return OP_X86_AND_REG_MEMBASE;
10813 return OP_X86_OR_REG_MEMBASE;
10815 return OP_X86_XOR_REG_MEMBASE;
10817 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10821 return OP_AMD64_COMPARE_REG_MEMBASE;
10823 return OP_AMD64_ADD_REG_MEMBASE;
10825 return OP_AMD64_SUB_REG_MEMBASE;
10827 return OP_AMD64_AND_REG_MEMBASE;
10829 return OP_AMD64_OR_REG_MEMBASE;
10831 return OP_AMD64_XOR_REG_MEMBASE;
10840 mono_op_to_op_imm_noemul (int opcode)
10843 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10849 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10857 return mono_op_to_op_imm (opcode);
10861 #ifndef DISABLE_JIT
10864 * mono_handle_global_vregs:
10866 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10870 mono_handle_global_vregs (MonoCompile *cfg)
10872 gint32 *vreg_to_bb;
10873 MonoBasicBlock *bb;
10876 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10878 #ifdef MONO_ARCH_SIMD_INTRINSICS
10879 if (cfg->uses_simd_intrinsics)
10880 mono_simd_simplify_indirection (cfg);
10883 /* Find local vregs used in more than one bb */
10884 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10885 MonoInst *ins = bb->code;
10886 int block_num = bb->block_num;
10888 if (cfg->verbose_level > 2)
10889 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10892 for (; ins; ins = ins->next) {
10893 const char *spec = INS_INFO (ins->opcode);
10894 int regtype = 0, regindex;
10897 if (G_UNLIKELY (cfg->verbose_level > 2))
10898 mono_print_ins (ins);
10900 g_assert (ins->opcode >= MONO_CEE_LAST);
10902 for (regindex = 0; regindex < 4; regindex ++) {
10905 if (regindex == 0) {
10906 regtype = spec [MONO_INST_DEST];
10907 if (regtype == ' ')
10910 } else if (regindex == 1) {
10911 regtype = spec [MONO_INST_SRC1];
10912 if (regtype == ' ')
10915 } else if (regindex == 2) {
10916 regtype = spec [MONO_INST_SRC2];
10917 if (regtype == ' ')
10920 } else if (regindex == 3) {
10921 regtype = spec [MONO_INST_SRC3];
10922 if (regtype == ' ')
10927 #if SIZEOF_REGISTER == 4
10928 /* In the LLVM case, the long opcodes are not decomposed */
10929 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10931 * Since some instructions reference the original long vreg,
10932 * and some reference the two component vregs, it is quite hard
10933 * to determine when it needs to be global. So be conservative.
10935 if (!get_vreg_to_inst (cfg, vreg)) {
10936 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10938 if (cfg->verbose_level > 2)
10939 printf ("LONG VREG R%d made global.\n", vreg);
10943 * Make the component vregs volatile since the optimizations can
10944 * get confused otherwise.
10946 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10947 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10951 g_assert (vreg != -1);
10953 prev_bb = vreg_to_bb [vreg];
10954 if (prev_bb == 0) {
10955 /* 0 is a valid block num */
10956 vreg_to_bb [vreg] = block_num + 1;
10957 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10958 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10961 if (!get_vreg_to_inst (cfg, vreg)) {
10962 if (G_UNLIKELY (cfg->verbose_level > 2))
10963 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10967 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10970 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10973 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10976 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10979 g_assert_not_reached ();
10983 /* Flag as having been used in more than one bb */
10984 vreg_to_bb [vreg] = -1;
10990 /* If a variable is used in only one bblock, convert it into a local vreg */
10991 for (i = 0; i < cfg->num_varinfo; i++) {
10992 MonoInst *var = cfg->varinfo [i];
10993 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10995 switch (var->type) {
11001 #if SIZEOF_REGISTER == 8
11004 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11005 /* Enabling this screws up the fp stack on x86 */
11008 /* Arguments are implicitly global */
11009 /* Putting R4 vars into registers doesn't work currently */
11010 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11012 * Make that the variable's liveness interval doesn't contain a call, since
11013 * that would cause the lvreg to be spilled, making the whole optimization
11016 /* This is too slow for JIT compilation */
11018 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11020 int def_index, call_index, ins_index;
11021 gboolean spilled = FALSE;
11026 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11027 const char *spec = INS_INFO (ins->opcode);
11029 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11030 def_index = ins_index;
11032 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11033 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11034 if (call_index > def_index) {
11040 if (MONO_IS_CALL (ins))
11041 call_index = ins_index;
11051 if (G_UNLIKELY (cfg->verbose_level > 2))
11052 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11053 var->flags |= MONO_INST_IS_DEAD;
11054 cfg->vreg_to_inst [var->dreg] = NULL;
11061 * Compress the varinfo and vars tables so the liveness computation is faster and
11062 * takes up less space.
11065 for (i = 0; i < cfg->num_varinfo; ++i) {
11066 MonoInst *var = cfg->varinfo [i];
11067 if (pos < i && cfg->locals_start == i)
11068 cfg->locals_start = pos;
11069 if (!(var->flags & MONO_INST_IS_DEAD)) {
11071 cfg->varinfo [pos] = cfg->varinfo [i];
11072 cfg->varinfo [pos]->inst_c0 = pos;
11073 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11074 cfg->vars [pos].idx = pos;
11075 #if SIZEOF_REGISTER == 4
11076 if (cfg->varinfo [pos]->type == STACK_I8) {
11077 /* Modify the two component vars too */
11080 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11081 var1->inst_c0 = pos;
11082 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11083 var1->inst_c0 = pos;
11090 cfg->num_varinfo = pos;
11091 if (cfg->locals_start > cfg->num_varinfo)
11092 cfg->locals_start = cfg->num_varinfo;
11096 * mono_spill_global_vars:
11098 * Generate spill code for variables which are not allocated to registers,
11099 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11100 * code is generated which could be optimized by the local optimization passes.
11103 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11105 MonoBasicBlock *bb;
11107 int orig_next_vreg;
11108 guint32 *vreg_to_lvreg;
11110 guint32 i, lvregs_len;
11111 gboolean dest_has_lvreg = FALSE;
11112 guint32 stacktypes [128];
11113 MonoInst **live_range_start, **live_range_end;
11114 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11116 *need_local_opts = FALSE;
11118 memset (spec2, 0, sizeof (spec2));
11120 /* FIXME: Move this function to mini.c */
11121 stacktypes ['i'] = STACK_PTR;
11122 stacktypes ['l'] = STACK_I8;
11123 stacktypes ['f'] = STACK_R8;
11124 #ifdef MONO_ARCH_SIMD_INTRINSICS
11125 stacktypes ['x'] = STACK_VTYPE;
11128 #if SIZEOF_REGISTER == 4
11129 /* Create MonoInsts for longs */
11130 for (i = 0; i < cfg->num_varinfo; i++) {
11131 MonoInst *ins = cfg->varinfo [i];
11133 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11134 switch (ins->type) {
11139 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11142 g_assert (ins->opcode == OP_REGOFFSET);
11144 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11146 tree->opcode = OP_REGOFFSET;
11147 tree->inst_basereg = ins->inst_basereg;
11148 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11150 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11152 tree->opcode = OP_REGOFFSET;
11153 tree->inst_basereg = ins->inst_basereg;
11154 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11164 /* FIXME: widening and truncation */
11167 * As an optimization, when a variable allocated to the stack is first loaded into
11168 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11169 * the variable again.
11171 orig_next_vreg = cfg->next_vreg;
11172 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11173 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11177 * These arrays contain the first and last instructions accessing a given
11179 * Since we emit bblocks in the same order we process them here, and we
11180 * don't split live ranges, these will precisely describe the live range of
11181 * the variable, i.e. the instruction range where a valid value can be found
11182 * in the variables location.
11183 * The live range is computed using the liveness info computed by the liveness pass.
11184 * We can't use vmv->range, since that is an abstract live range, and we need
11185 * one which is instruction precise.
11186 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11188 /* FIXME: Only do this if debugging info is requested */
11189 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11190 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11191 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11192 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11194 /* Add spill loads/stores */
11195 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11198 if (cfg->verbose_level > 2)
11199 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11201 /* Clear vreg_to_lvreg array */
11202 for (i = 0; i < lvregs_len; i++)
11203 vreg_to_lvreg [lvregs [i]] = 0;
11207 MONO_BB_FOR_EACH_INS (bb, ins) {
11208 const char *spec = INS_INFO (ins->opcode);
11209 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11210 gboolean store, no_lvreg;
11211 int sregs [MONO_MAX_SRC_REGS];
11213 if (G_UNLIKELY (cfg->verbose_level > 2))
11214 mono_print_ins (ins);
11216 if (ins->opcode == OP_NOP)
11220 * We handle LDADDR here as well, since it can only be decomposed
11221 * when variable addresses are known.
11223 if (ins->opcode == OP_LDADDR) {
11224 MonoInst *var = ins->inst_p0;
11226 if (var->opcode == OP_VTARG_ADDR) {
11227 /* Happens on SPARC/S390 where vtypes are passed by reference */
11228 MonoInst *vtaddr = var->inst_left;
11229 if (vtaddr->opcode == OP_REGVAR) {
11230 ins->opcode = OP_MOVE;
11231 ins->sreg1 = vtaddr->dreg;
11233 else if (var->inst_left->opcode == OP_REGOFFSET) {
11234 ins->opcode = OP_LOAD_MEMBASE;
11235 ins->inst_basereg = vtaddr->inst_basereg;
11236 ins->inst_offset = vtaddr->inst_offset;
11240 g_assert (var->opcode == OP_REGOFFSET);
11242 ins->opcode = OP_ADD_IMM;
11243 ins->sreg1 = var->inst_basereg;
11244 ins->inst_imm = var->inst_offset;
11247 *need_local_opts = TRUE;
11248 spec = INS_INFO (ins->opcode);
11251 if (ins->opcode < MONO_CEE_LAST) {
11252 mono_print_ins (ins);
11253 g_assert_not_reached ();
11257 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11261 if (MONO_IS_STORE_MEMBASE (ins)) {
11262 tmp_reg = ins->dreg;
11263 ins->dreg = ins->sreg2;
11264 ins->sreg2 = tmp_reg;
11267 spec2 [MONO_INST_DEST] = ' ';
11268 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11269 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11270 spec2 [MONO_INST_SRC3] = ' ';
11272 } else if (MONO_IS_STORE_MEMINDEX (ins))
11273 g_assert_not_reached ();
11278 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11279 printf ("\t %.3s %d", spec, ins->dreg);
11280 num_sregs = mono_inst_get_src_registers (ins, sregs);
11281 for (srcindex = 0; srcindex < 3; ++srcindex)
11282 printf (" %d", sregs [srcindex]);
11289 regtype = spec [MONO_INST_DEST];
11290 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11293 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11294 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11295 MonoInst *store_ins;
11297 MonoInst *def_ins = ins;
11298 int dreg = ins->dreg; /* The original vreg */
11300 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11302 if (var->opcode == OP_REGVAR) {
11303 ins->dreg = var->dreg;
11304 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11306 * Instead of emitting a load+store, use a _membase opcode.
11308 g_assert (var->opcode == OP_REGOFFSET);
11309 if (ins->opcode == OP_MOVE) {
11313 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11314 ins->inst_basereg = var->inst_basereg;
11315 ins->inst_offset = var->inst_offset;
11318 spec = INS_INFO (ins->opcode);
11322 g_assert (var->opcode == OP_REGOFFSET);
11324 prev_dreg = ins->dreg;
11326 /* Invalidate any previous lvreg for this vreg */
11327 vreg_to_lvreg [ins->dreg] = 0;
11331 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11333 store_opcode = OP_STOREI8_MEMBASE_REG;
11336 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11338 if (regtype == 'l') {
11339 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11340 mono_bblock_insert_after_ins (bb, ins, store_ins);
11341 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11342 mono_bblock_insert_after_ins (bb, ins, store_ins);
11343 def_ins = store_ins;
11346 g_assert (store_opcode != OP_STOREV_MEMBASE);
11348 /* Try to fuse the store into the instruction itself */
11349 /* FIXME: Add more instructions */
11350 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11351 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11352 ins->inst_imm = ins->inst_c0;
11353 ins->inst_destbasereg = var->inst_basereg;
11354 ins->inst_offset = var->inst_offset;
11355 spec = INS_INFO (ins->opcode);
11356 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11357 ins->opcode = store_opcode;
11358 ins->inst_destbasereg = var->inst_basereg;
11359 ins->inst_offset = var->inst_offset;
11363 tmp_reg = ins->dreg;
11364 ins->dreg = ins->sreg2;
11365 ins->sreg2 = tmp_reg;
11368 spec2 [MONO_INST_DEST] = ' ';
11369 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11370 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11371 spec2 [MONO_INST_SRC3] = ' ';
11373 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11374 // FIXME: The backends expect the base reg to be in inst_basereg
11375 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11377 ins->inst_basereg = var->inst_basereg;
11378 ins->inst_offset = var->inst_offset;
11379 spec = INS_INFO (ins->opcode);
11381 /* printf ("INS: "); mono_print_ins (ins); */
11382 /* Create a store instruction */
11383 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11385 /* Insert it after the instruction */
11386 mono_bblock_insert_after_ins (bb, ins, store_ins);
11388 def_ins = store_ins;
11391 * We can't assign ins->dreg to var->dreg here, since the
11392 * sregs could use it. So set a flag, and do it after
11395 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11396 dest_has_lvreg = TRUE;
11401 if (def_ins && !live_range_start [dreg]) {
11402 live_range_start [dreg] = def_ins;
11403 live_range_start_bb [dreg] = bb;
11410 num_sregs = mono_inst_get_src_registers (ins, sregs);
11411 for (srcindex = 0; srcindex < 3; ++srcindex) {
11412 regtype = spec [MONO_INST_SRC1 + srcindex];
11413 sreg = sregs [srcindex];
11415 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11416 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11417 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11418 MonoInst *use_ins = ins;
11419 MonoInst *load_ins;
11420 guint32 load_opcode;
11422 if (var->opcode == OP_REGVAR) {
11423 sregs [srcindex] = var->dreg;
11424 //mono_inst_set_src_registers (ins, sregs);
11425 live_range_end [sreg] = use_ins;
11426 live_range_end_bb [sreg] = bb;
11430 g_assert (var->opcode == OP_REGOFFSET);
11432 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11434 g_assert (load_opcode != OP_LOADV_MEMBASE);
11436 if (vreg_to_lvreg [sreg]) {
11437 g_assert (vreg_to_lvreg [sreg] != -1);
11439 /* The variable is already loaded to an lvreg */
11440 if (G_UNLIKELY (cfg->verbose_level > 2))
11441 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11442 sregs [srcindex] = vreg_to_lvreg [sreg];
11443 //mono_inst_set_src_registers (ins, sregs);
11447 /* Try to fuse the load into the instruction */
11448 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11449 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11450 sregs [0] = var->inst_basereg;
11451 //mono_inst_set_src_registers (ins, sregs);
11452 ins->inst_offset = var->inst_offset;
11453 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11454 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11455 sregs [1] = var->inst_basereg;
11456 //mono_inst_set_src_registers (ins, sregs);
11457 ins->inst_offset = var->inst_offset;
11459 if (MONO_IS_REAL_MOVE (ins)) {
11460 ins->opcode = OP_NOP;
11463 //printf ("%d ", srcindex); mono_print_ins (ins);
11465 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11467 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11468 if (var->dreg == prev_dreg) {
11470 * sreg refers to the value loaded by the load
11471 * emitted below, but we need to use ins->dreg
11472 * since it refers to the store emitted earlier.
11476 g_assert (sreg != -1);
11477 vreg_to_lvreg [var->dreg] = sreg;
11478 g_assert (lvregs_len < 1024);
11479 lvregs [lvregs_len ++] = var->dreg;
11483 sregs [srcindex] = sreg;
11484 //mono_inst_set_src_registers (ins, sregs);
11486 if (regtype == 'l') {
11487 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11488 mono_bblock_insert_before_ins (bb, ins, load_ins);
11489 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11490 mono_bblock_insert_before_ins (bb, ins, load_ins);
11491 use_ins = load_ins;
11494 #if SIZEOF_REGISTER == 4
11495 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11497 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11498 mono_bblock_insert_before_ins (bb, ins, load_ins);
11499 use_ins = load_ins;
11503 if (var->dreg < orig_next_vreg) {
11504 live_range_end [var->dreg] = use_ins;
11505 live_range_end_bb [var->dreg] = bb;
11509 mono_inst_set_src_registers (ins, sregs);
11511 if (dest_has_lvreg) {
11512 g_assert (ins->dreg != -1);
11513 vreg_to_lvreg [prev_dreg] = ins->dreg;
11514 g_assert (lvregs_len < 1024);
11515 lvregs [lvregs_len ++] = prev_dreg;
11516 dest_has_lvreg = FALSE;
11520 tmp_reg = ins->dreg;
11521 ins->dreg = ins->sreg2;
11522 ins->sreg2 = tmp_reg;
11525 if (MONO_IS_CALL (ins)) {
11526 /* Clear vreg_to_lvreg array */
11527 for (i = 0; i < lvregs_len; i++)
11528 vreg_to_lvreg [lvregs [i]] = 0;
11530 } else if (ins->opcode == OP_NOP) {
11532 MONO_INST_NULLIFY_SREGS (ins);
11535 if (cfg->verbose_level > 2)
11536 mono_print_ins_index (1, ins);
11539 /* Extend the live range based on the liveness info */
11540 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11541 for (i = 0; i < cfg->num_varinfo; i ++) {
11542 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11544 if (vreg_is_volatile (cfg, vi->vreg))
11545 /* The liveness info is incomplete */
11548 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11549 /* Live from at least the first ins of this bb */
11550 live_range_start [vi->vreg] = bb->code;
11551 live_range_start_bb [vi->vreg] = bb;
11554 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11555 /* Live at least until the last ins of this bb */
11556 live_range_end [vi->vreg] = bb->last_ins;
11557 live_range_end_bb [vi->vreg] = bb;
11563 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11565 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11566 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11568 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11569 for (i = 0; i < cfg->num_varinfo; ++i) {
11570 int vreg = MONO_VARINFO (cfg, i)->vreg;
11573 if (live_range_start [vreg]) {
11574 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11576 ins->inst_c1 = vreg;
11577 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11579 if (live_range_end [vreg]) {
11580 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11582 ins->inst_c1 = vreg;
11583 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11584 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11586 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11592 g_free (live_range_start);
11593 g_free (live_range_end);
11594 g_free (live_range_start_bb);
11595 g_free (live_range_end_bb);
11600 * - use 'iadd' instead of 'int_add'
11601 * - handling ovf opcodes: decompose in method_to_ir.
11602 * - unify iregs/fregs
11603 * -> partly done, the missing parts are:
11604 * - a more complete unification would involve unifying the hregs as well, so
11605 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11606 * would no longer map to the machine hregs, so the code generators would need to
11607 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11608 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11609 * fp/non-fp branches speeds it up by about 15%.
11610 * - use sext/zext opcodes instead of shifts
11612 * - get rid of TEMPLOADs if possible and use vregs instead
11613 * - clean up usage of OP_P/OP_ opcodes
11614 * - cleanup usage of DUMMY_USE
11615 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11617 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11618 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11619 * - make sure handle_stack_args () is called before the branch is emitted
11620 * - when the new IR is done, get rid of all unused stuff
11621 * - COMPARE/BEQ as separate instructions or unify them ?
11622 * - keeping them separate allows specialized compare instructions like
11623 * compare_imm, compare_membase
11624 * - most back ends unify fp compare+branch, fp compare+ceq
11625 * - integrate mono_save_args into inline_method
11626 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11627 * - handle long shift opts on 32 bit platforms somehow: they require
11628 * 3 sregs (2 for arg1 and 1 for arg2)
11629 * - make byref a 'normal' type.
11630 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11631 * variable if needed.
11632 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11633 * like inline_method.
11634 * - remove inlining restrictions
11635 * - fix LNEG and enable cfold of INEG
11636 * - generalize x86 optimizations like ldelema as a peephole optimization
11637 * - add store_mem_imm for amd64
11638 * - optimize the loading of the interruption flag in the managed->native wrappers
11639 * - avoid special handling of OP_NOP in passes
11640 * - move code inserting instructions into one function/macro.
11641 * - try a coalescing phase after liveness analysis
11642 * - add float -> vreg conversion + local optimizations on !x86
11643 * - figure out how to handle decomposed branches during optimizations, ie.
11644 * compare+branch, op_jump_table+op_br etc.
11645 * - promote RuntimeXHandles to vregs
11646 * - vtype cleanups:
11647 * - add a NEW_VARLOADA_VREG macro
11648 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11649 * accessing vtype fields.
11650 * - get rid of I8CONST on 64 bit platforms
11651 * - dealing with the increase in code size due to branches created during opcode
11653 * - use extended basic blocks
11654 * - all parts of the JIT
11655 * - handle_global_vregs () && local regalloc
11656 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11657 * - sources of increase in code size:
11660 * - isinst and castclass
11661 * - lvregs not allocated to global registers even if used multiple times
11662 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11664 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11665 * - add all micro optimizations from the old JIT
11666 * - put tree optimizations into the deadce pass
11667 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11668 * specific function.
11669 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11670 * fcompare + branchCC.
11671 * - create a helper function for allocating a stack slot, taking into account
11672 * MONO_CFG_HAS_SPILLUP.
11674 * - merge the ia64 switch changes.
11675 * - optimize mono_regstate2_alloc_int/float.
11676 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11677 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11678 * parts of the tree could be separated by other instructions, killing the tree
11679 * arguments, or stores killing loads etc. Also, should we fold loads into other
11680 * instructions if the result of the load is used multiple times ?
11681 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11682 * - LAST MERGE: 108395.
11683 * - when returning vtypes in registers, generate IR and append it to the end of the
11684 * last bb instead of doing it in the epilog.
11685 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11693 - When to decompose opcodes:
11694 - earlier: this makes some optimizations hard to implement, since the low level IR
11695 no longer contains the neccessary information. But it is easier to do.
11696 - later: harder to implement, enables more optimizations.
11697 - Branches inside bblocks:
11698 - created when decomposing complex opcodes.
11699 - branches to another bblock: harmless, but not tracked by the branch
11700 optimizations, so need to branch to a label at the start of the bblock.
11701 - branches to inside the same bblock: very problematic, trips up the local
11702 reg allocator. Can be fixed by spitting the current bblock, but that is a
11703 complex operation, since some local vregs can become global vregs etc.
11704 - Local/global vregs:
11705 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11706 local register allocator.
11707 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11708 structure, created by mono_create_var (). Assigned to hregs or the stack by
11709 the global register allocator.
11710 - When to do optimizations like alu->alu_imm:
11711 - earlier -> saves work later on since the IR will be smaller/simpler
11712 - later -> can work on more instructions
11713 - Handling of valuetypes:
11714 - When a vtype is pushed on the stack, a new temporary is created, an
11715 instruction computing its address (LDADDR) is emitted and pushed on
11716 the stack. Need to optimize cases when the vtype is used immediately as in
11717 argument passing, stloc etc.
11718 - Instead of the to_end stuff in the old JIT, simply call the function handling
11719 the values on the stack before emitting the last instruction of the bb.
11722 #endif /* DISABLE_JIT */