2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/metadata/mono-basic-block.h>
61 #include "jit-icalls.h"
63 #include "debugger-agent.h"
65 #define BRANCH_COST 10
66 #define INLINE_LENGTH_LIMIT 20
67 #define INLINE_FAILURE do {\
68 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
71 #define CHECK_CFG_EXCEPTION do {\
72 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
75 #define METHOD_ACCESS_FAILURE do { \
76 char *method_fname = mono_method_full_name (method, TRUE); \
77 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
78 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
79 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
80 g_free (method_fname); \
81 g_free (cil_method_fname); \
82 goto exception_exit; \
84 #define FIELD_ACCESS_FAILURE do { \
85 char *method_fname = mono_method_full_name (method, TRUE); \
86 char *field_fname = mono_field_full_name (field); \
87 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
88 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
89 g_free (method_fname); \
90 g_free (field_fname); \
91 goto exception_exit; \
93 #define GENERIC_SHARING_FAILURE(opcode) do { \
94 if (cfg->generic_sharing_context) { \
95 if (cfg->verbose_level > 2) \
96 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
97 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
98 goto exception_exit; \
101 #define OUT_OF_MEMORY_FAILURE do { \
102 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
103 goto exception_exit; \
105 /* Determine whenever 'ins' represents a load of the 'this' argument */
106 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
108 static int ldind_to_load_membase (int opcode);
109 static int stind_to_store_membase (int opcode);
111 int mono_op_to_op_imm (int opcode);
112 int mono_op_to_op_imm_noemul (int opcode);
114 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
115 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
116 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
118 /* helper methods signatures */
119 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
120 static MonoMethodSignature *helper_sig_domain_get = NULL;
121 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
123 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
124 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
128 * Instruction metadata
136 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
137 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
143 #if SIZEOF_REGISTER == 8
148 /* keep in sync with the enum in mini.h */
151 #include "mini-ops.h"
156 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
157 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
159 * This should contain the index of the last sreg + 1. This is not the same
160 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
162 const gint8 ins_sreg_counts[] = {
163 #include "mini-ops.h"
168 #define MONO_INIT_VARINFO(vi,id) do { \
169 (vi)->range.first_use.pos.bid = 0xffff; \
175 mono_inst_set_src_registers (MonoInst *ins, int *regs)
177 ins->sreg1 = regs [0];
178 ins->sreg2 = regs [1];
179 ins->sreg3 = regs [2];
183 mono_alloc_ireg (MonoCompile *cfg)
185 return alloc_ireg (cfg);
189 mono_alloc_freg (MonoCompile *cfg)
191 return alloc_freg (cfg);
195 mono_alloc_preg (MonoCompile *cfg)
197 return alloc_preg (cfg);
201 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
203 return alloc_dreg (cfg, stack_type);
207 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
213 switch (type->type) {
216 case MONO_TYPE_BOOLEAN:
228 case MONO_TYPE_FNPTR:
230 case MONO_TYPE_CLASS:
231 case MONO_TYPE_STRING:
232 case MONO_TYPE_OBJECT:
233 case MONO_TYPE_SZARRAY:
234 case MONO_TYPE_ARRAY:
238 #if SIZEOF_REGISTER == 8
247 case MONO_TYPE_VALUETYPE:
248 if (type->data.klass->enumtype) {
249 type = mono_class_enum_basetype (type->data.klass);
252 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
255 case MONO_TYPE_TYPEDBYREF:
257 case MONO_TYPE_GENERICINST:
258 type = &type->data.generic_class->container_class->byval_arg;
262 g_assert (cfg->generic_sharing_context);
265 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
271 mono_print_bb (MonoBasicBlock *bb, const char *msg)
276 printf ("\n%s %d: [IN: ", msg, bb->block_num);
277 for (i = 0; i < bb->in_count; ++i)
278 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
280 for (i = 0; i < bb->out_count; ++i)
281 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
283 for (tree = bb->code; tree; tree = tree->next)
284 mono_print_ins_index (-1, tree);
288 * Can't put this at the beginning, since other files reference stuff from this
293 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
295 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
297 #define GET_BBLOCK(cfg,tblock,ip) do { \
298 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
300 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
301 NEW_BBLOCK (cfg, (tblock)); \
302 (tblock)->cil_code = (ip); \
303 ADD_BBLOCK (cfg, (tblock)); \
307 #if defined(TARGET_X86) || defined(TARGET_AMD64)
308 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
309 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
310 (dest)->dreg = alloc_preg ((cfg)); \
311 (dest)->sreg1 = (sr1); \
312 (dest)->sreg2 = (sr2); \
313 (dest)->inst_imm = (imm); \
314 (dest)->backend.shift_amount = (shift); \
315 MONO_ADD_INS ((cfg)->cbb, (dest)); \
319 #if SIZEOF_REGISTER == 8
320 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
321 /* FIXME: Need to add many more cases */ \
322 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
324 int dr = alloc_preg (cfg); \
325 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
326 (ins)->sreg2 = widen->dreg; \
330 #define ADD_WIDEN_OP(ins, arg1, arg2)
333 #define ADD_BINOP(op) do { \
334 MONO_INST_NEW (cfg, ins, (op)); \
336 ins->sreg1 = sp [0]->dreg; \
337 ins->sreg2 = sp [1]->dreg; \
338 type_from_op (ins, sp [0], sp [1]); \
340 /* Have to insert a widening op */ \
341 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
342 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
343 MONO_ADD_INS ((cfg)->cbb, (ins)); \
344 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
347 #define ADD_UNOP(op) do { \
348 MONO_INST_NEW (cfg, ins, (op)); \
350 ins->sreg1 = sp [0]->dreg; \
351 type_from_op (ins, sp [0], NULL); \
353 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
354 MONO_ADD_INS ((cfg)->cbb, (ins)); \
355 *sp++ = mono_decompose_opcode (cfg, ins); \
358 #define ADD_BINCOND(next_block) do { \
361 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
362 cmp->sreg1 = sp [0]->dreg; \
363 cmp->sreg2 = sp [1]->dreg; \
364 type_from_op (cmp, sp [0], sp [1]); \
366 type_from_op (ins, sp [0], sp [1]); \
367 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
368 GET_BBLOCK (cfg, tblock, target); \
369 link_bblock (cfg, bblock, tblock); \
370 ins->inst_true_bb = tblock; \
371 if ((next_block)) { \
372 link_bblock (cfg, bblock, (next_block)); \
373 ins->inst_false_bb = (next_block); \
374 start_new_bblock = 1; \
376 GET_BBLOCK (cfg, tblock, ip); \
377 link_bblock (cfg, bblock, tblock); \
378 ins->inst_false_bb = tblock; \
379 start_new_bblock = 2; \
381 if (sp != stack_start) { \
382 handle_stack_args (cfg, stack_start, sp - stack_start); \
383 CHECK_UNVERIFIABLE (cfg); \
385 MONO_ADD_INS (bblock, cmp); \
386 MONO_ADD_INS (bblock, ins); \
390 * link_bblock: Links two basic blocks
392 * links two basic blocks in the control flow graph, the 'from'
393 * argument is the starting block and the 'to' argument is the block
394 * the control flow ends to after 'from'.
397 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
399 MonoBasicBlock **newa;
403 if (from->cil_code) {
405 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
407 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
410 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
412 printf ("edge from entry to exit\n");
417 for (i = 0; i < from->out_count; ++i) {
418 if (to == from->out_bb [i]) {
424 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
425 for (i = 0; i < from->out_count; ++i) {
426 newa [i] = from->out_bb [i];
434 for (i = 0; i < to->in_count; ++i) {
435 if (from == to->in_bb [i]) {
441 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
442 for (i = 0; i < to->in_count; ++i) {
443 newa [i] = to->in_bb [i];
452 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
454 link_bblock (cfg, from, to);
458 * mono_find_block_region:
460 * We mark each basic block with a region ID. We use that to avoid BB
461 * optimizations when blocks are in different regions.
464 * A region token that encodes where this region is, and information
465 * about the clause owner for this block.
467 * The region encodes the try/catch/filter clause that owns this block
468 * as well as the type. -1 is a special value that represents a block
469 * that is in none of try/catch/filter.
472 mono_find_block_region (MonoCompile *cfg, int offset)
474 MonoMethodHeader *header = cfg->header;
475 MonoExceptionClause *clause;
478 for (i = 0; i < header->num_clauses; ++i) {
479 clause = &header->clauses [i];
480 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
481 (offset < (clause->handler_offset)))
482 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
484 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
485 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
486 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
487 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
488 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
490 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
493 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
494 return ((i + 1) << 8) | clause->flags;
501 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
503 MonoMethodHeader *header = cfg->header;
504 MonoExceptionClause *clause;
508 for (i = 0; i < header->num_clauses; ++i) {
509 clause = &header->clauses [i];
510 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
511 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
512 if (clause->flags == type)
513 res = g_list_append (res, clause);
520 mono_create_spvar_for_region (MonoCompile *cfg, int region)
524 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
528 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
529 /* prevent it from being register allocated */
530 var->flags |= MONO_INST_INDIRECT;
532 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
536 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
538 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
542 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
546 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
550 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
551 /* prevent it from being register allocated */
552 var->flags |= MONO_INST_INDIRECT;
554 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
560 * Returns the type used in the eval stack when @type is loaded.
561 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
564 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
568 inst->klass = klass = mono_class_from_mono_type (type);
570 inst->type = STACK_MP;
575 switch (type->type) {
577 inst->type = STACK_INV;
581 case MONO_TYPE_BOOLEAN:
587 inst->type = STACK_I4;
592 case MONO_TYPE_FNPTR:
593 inst->type = STACK_PTR;
595 case MONO_TYPE_CLASS:
596 case MONO_TYPE_STRING:
597 case MONO_TYPE_OBJECT:
598 case MONO_TYPE_SZARRAY:
599 case MONO_TYPE_ARRAY:
600 inst->type = STACK_OBJ;
604 inst->type = STACK_I8;
608 inst->type = STACK_R8;
610 case MONO_TYPE_VALUETYPE:
611 if (type->data.klass->enumtype) {
612 type = mono_class_enum_basetype (type->data.klass);
616 inst->type = STACK_VTYPE;
619 case MONO_TYPE_TYPEDBYREF:
620 inst->klass = mono_defaults.typed_reference_class;
621 inst->type = STACK_VTYPE;
623 case MONO_TYPE_GENERICINST:
624 type = &type->data.generic_class->container_class->byval_arg;
627 case MONO_TYPE_MVAR :
628 /* FIXME: all the arguments must be references for now,
629 * later look inside cfg and see if the arg num is
632 g_assert (cfg->generic_sharing_context);
633 inst->type = STACK_OBJ;
636 g_error ("unknown type 0x%02x in eval stack type", type->type);
641 * The following tables are used to quickly validate the IL code in type_from_op ().
644 bin_num_table [STACK_MAX] [STACK_MAX] = {
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
652 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
657 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
660 /* reduce the size of this table */
662 bin_int_table [STACK_MAX] [STACK_MAX] = {
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
674 bin_comp_table [STACK_MAX] [STACK_MAX] = {
675 /* Inv i L p F & O vt */
677 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
678 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
679 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
680 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
681 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
682 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
683 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
686 /* reduce the size of this table */
688 shift_table [STACK_MAX] [STACK_MAX] = {
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
700 * Tables to map from the non-specific opcode to the matching
701 * type-specific opcode.
703 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
705 binops_op_map [STACK_MAX] = {
706 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
709 /* handles from CEE_NEG to CEE_CONV_U8 */
711 unops_op_map [STACK_MAX] = {
712 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
715 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
717 ovfops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
721 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
723 ovf2ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
727 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
729 ovf3ops_op_map [STACK_MAX] = {
730 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
733 /* handles from CEE_BEQ to CEE_BLT_UN */
735 beqops_op_map [STACK_MAX] = {
736 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
739 /* handles from CEE_CEQ to CEE_CLT_UN */
741 ceqops_op_map [STACK_MAX] = {
742 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
746 * Sets ins->type (the type on the eval stack) according to the
747 * type of the opcode and the arguments to it.
748 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
750 * FIXME: this function sets ins->type unconditionally in some cases, but
751 * it should set it to invalid for some types (a conv.x on an object)
754 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
756 switch (ins->opcode) {
763 /* FIXME: check unverifiable args for STACK_MP */
764 ins->type = bin_num_table [src1->type] [src2->type];
765 ins->opcode += binops_op_map [ins->type];
772 ins->type = bin_int_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = shift_table [src1->type] [src2->type];
779 ins->opcode += binops_op_map [ins->type];
784 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
785 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
786 ins->opcode = OP_LCOMPARE;
787 else if (src1->type == STACK_R8)
788 ins->opcode = OP_FCOMPARE;
790 ins->opcode = OP_ICOMPARE;
792 case OP_ICOMPARE_IMM:
793 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
794 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
795 ins->opcode = OP_LCOMPARE_IMM;
807 ins->opcode += beqops_op_map [src1->type];
810 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
817 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
818 ins->opcode += ceqops_op_map [src1->type];
822 ins->type = neg_table [src1->type];
823 ins->opcode += unops_op_map [ins->type];
826 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
827 ins->type = src1->type;
829 ins->type = STACK_INV;
830 ins->opcode += unops_op_map [ins->type];
836 ins->type = STACK_I4;
837 ins->opcode += unops_op_map [src1->type];
840 ins->type = STACK_R8;
841 switch (src1->type) {
844 ins->opcode = OP_ICONV_TO_R_UN;
847 ins->opcode = OP_LCONV_TO_R_UN;
851 case CEE_CONV_OVF_I1:
852 case CEE_CONV_OVF_U1:
853 case CEE_CONV_OVF_I2:
854 case CEE_CONV_OVF_U2:
855 case CEE_CONV_OVF_I4:
856 case CEE_CONV_OVF_U4:
857 ins->type = STACK_I4;
858 ins->opcode += ovf3ops_op_map [src1->type];
860 case CEE_CONV_OVF_I_UN:
861 case CEE_CONV_OVF_U_UN:
862 ins->type = STACK_PTR;
863 ins->opcode += ovf2ops_op_map [src1->type];
865 case CEE_CONV_OVF_I1_UN:
866 case CEE_CONV_OVF_I2_UN:
867 case CEE_CONV_OVF_I4_UN:
868 case CEE_CONV_OVF_U1_UN:
869 case CEE_CONV_OVF_U2_UN:
870 case CEE_CONV_OVF_U4_UN:
871 ins->type = STACK_I4;
872 ins->opcode += ovf2ops_op_map [src1->type];
875 ins->type = STACK_PTR;
876 switch (src1->type) {
878 ins->opcode = OP_ICONV_TO_U;
882 #if SIZEOF_REGISTER == 8
883 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_MOVE;
889 ins->opcode = OP_LCONV_TO_U;
892 ins->opcode = OP_FCONV_TO_U;
898 ins->type = STACK_I8;
899 ins->opcode += unops_op_map [src1->type];
901 case CEE_CONV_OVF_I8:
902 case CEE_CONV_OVF_U8:
903 ins->type = STACK_I8;
904 ins->opcode += ovf3ops_op_map [src1->type];
906 case CEE_CONV_OVF_U8_UN:
907 case CEE_CONV_OVF_I8_UN:
908 ins->type = STACK_I8;
909 ins->opcode += ovf2ops_op_map [src1->type];
913 ins->type = STACK_R8;
914 ins->opcode += unops_op_map [src1->type];
917 ins->type = STACK_R8;
921 ins->type = STACK_I4;
922 ins->opcode += ovfops_op_map [src1->type];
927 ins->type = STACK_PTR;
928 ins->opcode += ovfops_op_map [src1->type];
936 ins->type = bin_num_table [src1->type] [src2->type];
937 ins->opcode += ovfops_op_map [src1->type];
938 if (ins->type == STACK_R8)
939 ins->type = STACK_INV;
941 case OP_LOAD_MEMBASE:
942 ins->type = STACK_PTR;
944 case OP_LOADI1_MEMBASE:
945 case OP_LOADU1_MEMBASE:
946 case OP_LOADI2_MEMBASE:
947 case OP_LOADU2_MEMBASE:
948 case OP_LOADI4_MEMBASE:
949 case OP_LOADU4_MEMBASE:
950 ins->type = STACK_PTR;
952 case OP_LOADI8_MEMBASE:
953 ins->type = STACK_I8;
955 case OP_LOADR4_MEMBASE:
956 case OP_LOADR8_MEMBASE:
957 ins->type = STACK_R8;
960 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
964 if (ins->type == STACK_MP)
965 ins->klass = mono_defaults.object_class;
970 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
976 param_table [STACK_MAX] [STACK_MAX] = {
981 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
985 switch (args->type) {
995 for (i = 0; i < sig->param_count; ++i) {
996 switch (args [i].type) {
1000 if (!sig->params [i]->byref)
1004 if (sig->params [i]->byref)
1006 switch (sig->params [i]->type) {
1007 case MONO_TYPE_CLASS:
1008 case MONO_TYPE_STRING:
1009 case MONO_TYPE_OBJECT:
1010 case MONO_TYPE_SZARRAY:
1011 case MONO_TYPE_ARRAY:
1018 if (sig->params [i]->byref)
1020 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1029 /*if (!param_table [args [i].type] [sig->params [i]->type])
1037 * When we need a pointer to the current domain many times in a method, we
1038 * call mono_domain_get() once and we store the result in a local variable.
1039 * This function returns the variable that represents the MonoDomain*.
1041 inline static MonoInst *
1042 mono_get_domainvar (MonoCompile *cfg)
1044 if (!cfg->domainvar)
1045 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1046 return cfg->domainvar;
1050 * The got_var contains the address of the Global Offset Table when AOT
1054 mono_get_got_var (MonoCompile *cfg)
1056 #ifdef MONO_ARCH_NEED_GOT_VAR
1057 if (!cfg->compile_aot)
1059 if (!cfg->got_var) {
1060 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1062 return cfg->got_var;
1069 mono_get_vtable_var (MonoCompile *cfg)
1071 g_assert (cfg->generic_sharing_context);
1073 if (!cfg->rgctx_var) {
1074 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1075 /* force the var to be stack allocated */
1076 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1079 return cfg->rgctx_var;
1083 type_from_stack_type (MonoInst *ins) {
1084 switch (ins->type) {
1085 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1086 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1087 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1088 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1090 return &ins->klass->this_arg;
1091 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1092 case STACK_VTYPE: return &ins->klass->byval_arg;
1094 g_error ("stack type %d to monotype not handled\n", ins->type);
1099 static G_GNUC_UNUSED int
1100 type_to_stack_type (MonoType *t)
1102 t = mono_type_get_underlying_type (t);
1106 case MONO_TYPE_BOOLEAN:
1109 case MONO_TYPE_CHAR:
1116 case MONO_TYPE_FNPTR:
1118 case MONO_TYPE_CLASS:
1119 case MONO_TYPE_STRING:
1120 case MONO_TYPE_OBJECT:
1121 case MONO_TYPE_SZARRAY:
1122 case MONO_TYPE_ARRAY:
1130 case MONO_TYPE_VALUETYPE:
1131 case MONO_TYPE_TYPEDBYREF:
1133 case MONO_TYPE_GENERICINST:
1134 if (mono_type_generic_inst_is_valuetype (t))
1140 g_assert_not_reached ();
1147 array_access_to_klass (int opcode)
1151 return mono_defaults.byte_class;
1153 return mono_defaults.uint16_class;
1156 return mono_defaults.int_class;
1159 return mono_defaults.sbyte_class;
1162 return mono_defaults.int16_class;
1165 return mono_defaults.int32_class;
1167 return mono_defaults.uint32_class;
1170 return mono_defaults.int64_class;
1173 return mono_defaults.single_class;
1176 return mono_defaults.double_class;
1177 case CEE_LDELEM_REF:
1178 case CEE_STELEM_REF:
1179 return mono_defaults.object_class;
1181 g_assert_not_reached ();
1187 * We try to share variables when possible
1190 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1195 /* inlining can result in deeper stacks */
1196 if (slot >= cfg->header->max_stack)
1197 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1199 pos = ins->type - 1 + slot * STACK_MAX;
1201 switch (ins->type) {
1208 if ((vnum = cfg->intvars [pos]))
1209 return cfg->varinfo [vnum];
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1211 cfg->intvars [pos] = res->inst_c0;
1214 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1220 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1223 * Don't use this if a generic_context is set, since that means AOT can't
1224 * look up the method using just the image+token.
1225 * table == 0 means this is a reference made from a wrapper.
1227 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1228 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1229 jump_info_token->image = image;
1230 jump_info_token->token = token;
1231 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1236 * This function is called to handle items that are left on the evaluation stack
1237 * at basic block boundaries. What happens is that we save the values to local variables
1238 * and we reload them later when first entering the target basic block (with the
1239 * handle_loaded_temps () function).
1240 * A single joint point will use the same variables (stored in the array bb->out_stack or
1241 * bb->in_stack, if the basic block is before or after the joint point).
1243 * This function needs to be called _before_ emitting the last instruction of
1244 * the bb (i.e. before emitting a branch).
1245 * If the stack merge fails at a join point, cfg->unverifiable is set.
1248 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1251 MonoBasicBlock *bb = cfg->cbb;
1252 MonoBasicBlock *outb;
1253 MonoInst *inst, **locals;
1258 if (cfg->verbose_level > 3)
1259 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1260 if (!bb->out_scount) {
1261 bb->out_scount = count;
1262 //printf ("bblock %d has out:", bb->block_num);
1264 for (i = 0; i < bb->out_count; ++i) {
1265 outb = bb->out_bb [i];
1266 /* exception handlers are linked, but they should not be considered for stack args */
1267 if (outb->flags & BB_EXCEPTION_HANDLER)
1269 //printf (" %d", outb->block_num);
1270 if (outb->in_stack) {
1272 bb->out_stack = outb->in_stack;
1278 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1279 for (i = 0; i < count; ++i) {
1281 * try to reuse temps already allocated for this purpouse, if they occupy the same
1282 * stack slot and if they are of the same type.
1283 * This won't cause conflicts since if 'local' is used to
1284 * store one of the values in the in_stack of a bblock, then
1285 * the same variable will be used for the same outgoing stack
1287 * This doesn't work when inlining methods, since the bblocks
1288 * in the inlined methods do not inherit their in_stack from
1289 * the bblock they are inlined to. See bug #58863 for an
1292 if (cfg->inlined_method)
1293 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1295 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1300 for (i = 0; i < bb->out_count; ++i) {
1301 outb = bb->out_bb [i];
1302 /* exception handlers are linked, but they should not be considered for stack args */
1303 if (outb->flags & BB_EXCEPTION_HANDLER)
1305 if (outb->in_scount) {
1306 if (outb->in_scount != bb->out_scount) {
1307 cfg->unverifiable = TRUE;
1310 continue; /* check they are the same locals */
1312 outb->in_scount = count;
1313 outb->in_stack = bb->out_stack;
1316 locals = bb->out_stack;
1318 for (i = 0; i < count; ++i) {
1319 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1320 inst->cil_code = sp [i]->cil_code;
1321 sp [i] = locals [i];
1322 if (cfg->verbose_level > 3)
1323 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1327 * It is possible that the out bblocks already have in_stack assigned, and
1328 * the in_stacks differ. In this case, we will store to all the different
1335 /* Find a bblock which has a different in_stack */
1337 while (bindex < bb->out_count) {
1338 outb = bb->out_bb [bindex];
1339 /* exception handlers are linked, but they should not be considered for stack args */
1340 if (outb->flags & BB_EXCEPTION_HANDLER) {
1344 if (outb->in_stack != locals) {
1345 for (i = 0; i < count; ++i) {
1346 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1347 inst->cil_code = sp [i]->cil_code;
1348 sp [i] = locals [i];
1349 if (cfg->verbose_level > 3)
1350 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1352 locals = outb->in_stack;
1361 /* Emit code which loads interface_offsets [klass->interface_id]
1362 * The array is stored in memory before vtable.
1365 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1367 if (cfg->compile_aot) {
1368 int ioffset_reg = alloc_preg (cfg);
1369 int iid_reg = alloc_preg (cfg);
1371 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1372 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1381 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1383 int ibitmap_reg = alloc_preg (cfg);
1384 #ifdef COMPRESSED_INTERFACE_BITMAP
1386 MonoInst *res, *ins;
1387 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1388 MONO_ADD_INS (cfg->cbb, ins);
1390 if (cfg->compile_aot)
1391 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1393 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1394 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1395 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1397 int ibitmap_byte_reg = alloc_preg (cfg);
1399 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1401 if (cfg->compile_aot) {
1402 int iid_reg = alloc_preg (cfg);
1403 int shifted_iid_reg = alloc_preg (cfg);
1404 int ibitmap_byte_address_reg = alloc_preg (cfg);
1405 int masked_iid_reg = alloc_preg (cfg);
1406 int iid_one_bit_reg = alloc_preg (cfg);
1407 int iid_bit_reg = alloc_preg (cfg);
1408 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1410 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1413 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1414 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1415 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1417 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1424 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1425 * stored in "klass_reg" implements the interface "klass".
1428 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1430 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1434 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1435 * stored in "vtable_reg" implements the interface "klass".
1438 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1440 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1444 * Emit code which checks whenever the interface id of @klass is smaller than
1445 * than the value given by max_iid_reg.
1448 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1449 MonoBasicBlock *false_target)
1451 if (cfg->compile_aot) {
1452 int iid_reg = alloc_preg (cfg);
1453 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1454 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1457 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1459 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1461 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1464 /* Same as above, but obtains max_iid from a vtable */
1466 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1467 MonoBasicBlock *false_target)
1469 int max_iid_reg = alloc_preg (cfg);
1471 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1472 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1475 /* Same as above, but obtains max_iid from a klass */
1477 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1478 MonoBasicBlock *false_target)
1480 int max_iid_reg = alloc_preg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1483 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1487 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1489 int idepth_reg = alloc_preg (cfg);
1490 int stypes_reg = alloc_preg (cfg);
1491 int stype = alloc_preg (cfg);
1493 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1495 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1496 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1501 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1502 } else if (cfg->compile_aot) {
1503 int const_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1513 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1519 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1521 int intf_reg = alloc_preg (cfg);
1523 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1524 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1527 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1529 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1533 * Variant of the above that takes a register to the class, not the vtable.
1536 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1538 int intf_bit_reg = alloc_preg (cfg);
1540 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1541 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1544 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1546 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1550 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1553 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1554 } else if (cfg->compile_aot) {
1555 int const_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1561 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1565 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1567 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1571 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1573 if (cfg->compile_aot) {
1574 int const_reg = alloc_preg (cfg);
1575 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1576 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1580 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1584 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1587 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1590 int rank_reg = alloc_preg (cfg);
1591 int eclass_reg = alloc_preg (cfg);
1593 g_assert (!klass_inst);
1594 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1596 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1597 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1598 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1599 if (klass->cast_class == mono_defaults.object_class) {
1600 int parent_reg = alloc_preg (cfg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1602 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1603 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1604 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1605 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1606 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1607 } else if (klass->cast_class == mono_defaults.enum_class) {
1608 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1609 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1610 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1612 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1613 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1616 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1617 /* Check that the object is a vector too */
1618 int bounds_reg = alloc_preg (cfg);
1619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1621 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1624 int idepth_reg = alloc_preg (cfg);
1625 int stypes_reg = alloc_preg (cfg);
1626 int stype = alloc_preg (cfg);
1628 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1629 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1630 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1631 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1633 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1634 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1635 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1640 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1642 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1646 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1650 g_assert (val == 0);
1655 if ((size <= 4) && (size <= align)) {
1658 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1664 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1666 #if SIZEOF_REGISTER == 8
1668 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1674 val_reg = alloc_preg (cfg);
1676 if (SIZEOF_REGISTER == 8)
1677 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1679 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1682 /* This could be optimized further if neccesary */
1684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1691 #if !NO_UNALIGNED_ACCESS
1692 if (SIZEOF_REGISTER == 8) {
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1712 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1724 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1731 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1732 g_assert (size < 10000);
1735 /* This could be optimized further if neccesary */
1737 cur_reg = alloc_preg (cfg);
1738 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1739 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1746 #if !NO_UNALIGNED_ACCESS
1747 if (SIZEOF_REGISTER == 8) {
1749 cur_reg = alloc_preg (cfg);
1750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1751 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1760 cur_reg = alloc_preg (cfg);
1761 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1762 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1768 cur_reg = alloc_preg (cfg);
1769 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1776 cur_reg = alloc_preg (cfg);
1777 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1778 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1786 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1792 type = mini_get_basic_type_from_generic (gsctx, type);
1793 switch (type->type) {
1794 case MONO_TYPE_VOID:
1795 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1798 case MONO_TYPE_BOOLEAN:
1801 case MONO_TYPE_CHAR:
1804 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1808 case MONO_TYPE_FNPTR:
1809 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1810 case MONO_TYPE_CLASS:
1811 case MONO_TYPE_STRING:
1812 case MONO_TYPE_OBJECT:
1813 case MONO_TYPE_SZARRAY:
1814 case MONO_TYPE_ARRAY:
1815 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1818 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1821 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1822 case MONO_TYPE_VALUETYPE:
1823 if (type->data.klass->enumtype) {
1824 type = mono_class_enum_basetype (type->data.klass);
1827 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1828 case MONO_TYPE_TYPEDBYREF:
1829 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1830 case MONO_TYPE_GENERICINST:
1831 type = &type->data.generic_class->container_class->byval_arg;
1834 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1840 * target_type_is_incompatible:
1841 * @cfg: MonoCompile context
1843 * Check that the item @arg on the evaluation stack can be stored
1844 * in the target type (can be a local, or field, etc).
1845 * The cfg arg can be used to check if we need verification or just
1848 * Returns: non-0 value if arg can't be stored on a target.
1851 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1853 MonoType *simple_type;
1856 if (target->byref) {
1857 /* FIXME: check that the pointed to types match */
1858 if (arg->type == STACK_MP)
1859 return arg->klass != mono_class_from_mono_type (target);
1860 if (arg->type == STACK_PTR)
1865 simple_type = mono_type_get_underlying_type (target);
1866 switch (simple_type->type) {
1867 case MONO_TYPE_VOID:
1871 case MONO_TYPE_BOOLEAN:
1874 case MONO_TYPE_CHAR:
1877 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1881 /* STACK_MP is needed when setting pinned locals */
1882 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1887 case MONO_TYPE_FNPTR:
1888 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1891 case MONO_TYPE_CLASS:
1892 case MONO_TYPE_STRING:
1893 case MONO_TYPE_OBJECT:
1894 case MONO_TYPE_SZARRAY:
1895 case MONO_TYPE_ARRAY:
1896 if (arg->type != STACK_OBJ)
1898 /* FIXME: check type compatibility */
1902 if (arg->type != STACK_I8)
1907 if (arg->type != STACK_R8)
1910 case MONO_TYPE_VALUETYPE:
1911 if (arg->type != STACK_VTYPE)
1913 klass = mono_class_from_mono_type (simple_type);
1914 if (klass != arg->klass)
1917 case MONO_TYPE_TYPEDBYREF:
1918 if (arg->type != STACK_VTYPE)
1920 klass = mono_class_from_mono_type (simple_type);
1921 if (klass != arg->klass)
1924 case MONO_TYPE_GENERICINST:
1925 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1926 if (arg->type != STACK_VTYPE)
1928 klass = mono_class_from_mono_type (simple_type);
1929 if (klass != arg->klass)
1933 if (arg->type != STACK_OBJ)
1935 /* FIXME: check type compatibility */
1939 case MONO_TYPE_MVAR:
1940 /* FIXME: all the arguments must be references for now,
1941 * later look inside cfg and see if the arg num is
1942 * really a reference
1944 g_assert (cfg->generic_sharing_context);
1945 if (arg->type != STACK_OBJ)
1949 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1955 * Prepare arguments for passing to a function call.
1956 * Return a non-zero value if the arguments can't be passed to the given
1958 * The type checks are not yet complete and some conversions may need
1959 * casts on 32 or 64 bit architectures.
1961 * FIXME: implement this using target_type_is_incompatible ()
1964 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1966 MonoType *simple_type;
1970 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1974 for (i = 0; i < sig->param_count; ++i) {
1975 if (sig->params [i]->byref) {
1976 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1980 simple_type = sig->params [i];
1981 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1983 switch (simple_type->type) {
1984 case MONO_TYPE_VOID:
1989 case MONO_TYPE_BOOLEAN:
1992 case MONO_TYPE_CHAR:
1995 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2001 case MONO_TYPE_FNPTR:
2002 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2005 case MONO_TYPE_CLASS:
2006 case MONO_TYPE_STRING:
2007 case MONO_TYPE_OBJECT:
2008 case MONO_TYPE_SZARRAY:
2009 case MONO_TYPE_ARRAY:
2010 if (args [i]->type != STACK_OBJ)
2015 if (args [i]->type != STACK_I8)
2020 if (args [i]->type != STACK_R8)
2023 case MONO_TYPE_VALUETYPE:
2024 if (simple_type->data.klass->enumtype) {
2025 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2028 if (args [i]->type != STACK_VTYPE)
2031 case MONO_TYPE_TYPEDBYREF:
2032 if (args [i]->type != STACK_VTYPE)
2035 case MONO_TYPE_GENERICINST:
2036 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2040 g_error ("unknown type 0x%02x in check_call_signature",
2048 callvirt_to_call (int opcode)
2053 case OP_VOIDCALLVIRT:
2062 g_assert_not_reached ();
2069 callvirt_to_call_membase (int opcode)
2073 return OP_CALL_MEMBASE;
2074 case OP_VOIDCALLVIRT:
2075 return OP_VOIDCALL_MEMBASE;
2077 return OP_FCALL_MEMBASE;
2079 return OP_LCALL_MEMBASE;
2081 return OP_VCALL_MEMBASE;
2083 g_assert_not_reached ();
2089 #ifdef MONO_ARCH_HAVE_IMT
2091 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2095 if (COMPILE_LLVM (cfg)) {
2096 method_reg = alloc_preg (cfg);
2099 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2100 } else if (cfg->compile_aot) {
2101 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2104 MONO_INST_NEW (cfg, ins, OP_PCONST);
2105 ins->inst_p0 = call->method;
2106 ins->dreg = method_reg;
2107 MONO_ADD_INS (cfg->cbb, ins);
2111 call->imt_arg_reg = method_reg;
2113 #ifdef MONO_ARCH_IMT_REG
2114 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2116 /* Need this to keep the IMT arg alive */
2117 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2122 #ifdef MONO_ARCH_IMT_REG
2123 method_reg = alloc_preg (cfg);
2126 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2127 } else if (cfg->compile_aot) {
2128 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2131 MONO_INST_NEW (cfg, ins, OP_PCONST);
2132 ins->inst_p0 = call->method;
2133 ins->dreg = method_reg;
2134 MONO_ADD_INS (cfg->cbb, ins);
2137 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2139 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2144 static MonoJumpInfo *
2145 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2147 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2151 ji->data.target = target;
2156 inline static MonoCallInst *
2157 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2158 MonoInst **args, int calli, int virtual, int tail)
2161 #ifdef MONO_ARCH_SOFT_FLOAT
2166 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2168 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2171 call->signature = sig;
2173 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2176 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2177 call->vret_var = cfg->vret_addr;
2178 //g_assert_not_reached ();
2180 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2181 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2184 temp->backend.is_pinvoke = sig->pinvoke;
2187 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2188 * address of return value to increase optimization opportunities.
2189 * Before vtype decomposition, the dreg of the call ins itself represents the
2190 * fact the call modifies the return value. After decomposition, the call will
2191 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2192 * will be transformed into an LDADDR.
2194 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2195 loada->dreg = alloc_preg (cfg);
2196 loada->inst_p0 = temp;
2197 /* We reference the call too since call->dreg could change during optimization */
2198 loada->inst_p1 = call;
2199 MONO_ADD_INS (cfg->cbb, loada);
2201 call->inst.dreg = temp->dreg;
2203 call->vret_var = loada;
2204 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2205 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2207 #ifdef MONO_ARCH_SOFT_FLOAT
2208 if (COMPILE_SOFT_FLOAT (cfg)) {
2210 * If the call has a float argument, we would need to do an r8->r4 conversion using
2211 * an icall, but that cannot be done during the call sequence since it would clobber
2212 * the call registers + the stack. So we do it before emitting the call.
2214 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2216 MonoInst *in = call->args [i];
2218 if (i >= sig->hasthis)
2219 t = sig->params [i - sig->hasthis];
2221 t = &mono_defaults.int_class->byval_arg;
2222 t = mono_type_get_underlying_type (t);
2224 if (!t->byref && t->type == MONO_TYPE_R4) {
2225 MonoInst *iargs [1];
2229 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2231 /* The result will be in an int vreg */
2232 call->args [i] = conv;
2239 if (COMPILE_LLVM (cfg))
2240 mono_llvm_emit_call (cfg, call);
2242 mono_arch_emit_call (cfg, call);
2244 mono_arch_emit_call (cfg, call);
2247 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2248 cfg->flags |= MONO_CFG_HAS_CALLS;
2253 inline static MonoInst*
2254 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2256 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2258 call->inst.sreg1 = addr->dreg;
2260 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2262 return (MonoInst*)call;
2266 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2268 #ifdef MONO_ARCH_RGCTX_REG
2269 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2270 cfg->uses_rgctx_reg = TRUE;
2271 call->rgctx_reg = TRUE;
2273 call->rgctx_arg_reg = rgctx_reg;
2280 inline static MonoInst*
2281 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2287 rgctx_reg = mono_alloc_preg (cfg);
2288 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2290 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2292 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2293 return (MonoInst*)call;
2297 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2299 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2302 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2303 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2305 gboolean might_be_remote;
2306 gboolean virtual = this != NULL;
2307 gboolean enable_for_aot = TRUE;
2311 if (method->string_ctor) {
2312 /* Create the real signature */
2313 /* FIXME: Cache these */
2314 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2315 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2320 might_be_remote = this && sig->hasthis &&
2321 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2322 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2324 context_used = mono_method_check_context_used (method);
2325 if (might_be_remote && context_used) {
2328 g_assert (cfg->generic_sharing_context);
2330 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2332 return mono_emit_calli (cfg, sig, args, addr);
2335 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2337 if (might_be_remote)
2338 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2340 call->method = method;
2341 call->inst.flags |= MONO_INST_HAS_METHOD;
2342 call->inst.inst_left = this;
2345 int vtable_reg, slot_reg, this_reg;
2347 this_reg = this->dreg;
2349 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2350 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2351 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2353 /* Make a call to delegate->invoke_impl */
2354 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2355 call->inst.inst_basereg = this_reg;
2356 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2357 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2359 return (MonoInst*)call;
2363 if ((!cfg->compile_aot || enable_for_aot) &&
2364 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2365 (MONO_METHOD_IS_FINAL (method) &&
2366 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2367 !(method->klass->marshalbyref && context_used)) {
2369 * the method is not virtual, we just need to ensure this is not null
2370 * and then we can call the method directly.
2372 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2374 * The check above ensures method is not gshared, this is needed since
2375 * gshared methods can't have wrappers.
2377 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2380 if (!method->string_ctor)
2381 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2383 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2385 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2387 return (MonoInst*)call;
2390 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2392 * the method is virtual, but we can statically dispatch since either
2393 * it's class or the method itself are sealed.
2394 * But first we need to ensure it's not a null reference.
2396 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2398 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2399 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2401 return (MonoInst*)call;
2404 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2406 vtable_reg = alloc_preg (cfg);
2407 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2408 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2410 #ifdef MONO_ARCH_HAVE_IMT
2412 guint32 imt_slot = mono_method_get_imt_slot (method);
2413 emit_imt_argument (cfg, call, imt_arg);
2414 slot_reg = vtable_reg;
2415 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2418 if (slot_reg == -1) {
2419 slot_reg = alloc_preg (cfg);
2420 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2421 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2424 slot_reg = vtable_reg;
2425 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2426 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2427 #ifdef MONO_ARCH_HAVE_IMT
2429 g_assert (mono_method_signature (method)->generic_param_count);
2430 emit_imt_argument (cfg, call, imt_arg);
2435 call->inst.sreg1 = slot_reg;
2436 call->virtual = TRUE;
2439 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2441 return (MonoInst*)call;
2445 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2446 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2453 rgctx_reg = mono_alloc_preg (cfg);
2454 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2456 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2458 call = (MonoCallInst*)ins;
2460 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
2466 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2468 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2472 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2479 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2482 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2484 return (MonoInst*)call;
2488 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2490 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2494 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2498 * mono_emit_abs_call:
2500 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2502 inline static MonoInst*
2503 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2504 MonoMethodSignature *sig, MonoInst **args)
2506 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2510 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2513 if (cfg->abs_patches == NULL)
2514 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2515 g_hash_table_insert (cfg->abs_patches, ji, ji);
2516 ins = mono_emit_native_call (cfg, ji, sig, args);
2517 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2522 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2524 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2525 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2529 * Native code might return non register sized integers
2530 * without initializing the upper bits.
2532 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2533 case OP_LOADI1_MEMBASE:
2534 widen_op = OP_ICONV_TO_I1;
2536 case OP_LOADU1_MEMBASE:
2537 widen_op = OP_ICONV_TO_U1;
2539 case OP_LOADI2_MEMBASE:
2540 widen_op = OP_ICONV_TO_I2;
2542 case OP_LOADU2_MEMBASE:
2543 widen_op = OP_ICONV_TO_U2;
2549 if (widen_op != -1) {
2550 int dreg = alloc_preg (cfg);
2553 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2554 widen->type = ins->type;
2564 get_memcpy_method (void)
2566 static MonoMethod *memcpy_method = NULL;
2567 if (!memcpy_method) {
2568 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2570 g_error ("Old corlib found. Install a new one");
2572 return memcpy_method;
2576 create_write_barrier_bitmap (MonoClass *klass, unsigned *wb_bitmap, int offset)
2578 MonoClassField *field;
2579 gpointer iter = NULL;
2581 while ((field = mono_class_get_fields (klass, &iter))) {
2584 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2586 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2587 if (mono_type_is_reference (field->type)) {
2588 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2589 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2591 /*FIXME support nested value types so this works for: struct X { Y y; int z;} struct Y { object a,b; }*/
2592 MonoClass *field_class = mono_class_from_mono_type (field->type);
2593 if (field_class->has_references)
2594 create_write_barrier_bitmap (field_class, wb_bitmap, offset + foffset);
2600 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2602 int card_table_shift_bits;
2603 gpointer card_table_mask;
2605 MonoInst *dummy_use;
2606 int nursery_shift_bits;
2607 size_t nursery_size;
2608 gboolean has_card_table_wb = FALSE;
2610 if (!cfg->gen_write_barriers)
2613 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2615 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2617 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2618 has_card_table_wb = TRUE;
2621 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2624 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2625 wbarrier->sreg1 = ptr->dreg;
2627 wbarrier->sreg2 = value->dreg;
2629 wbarrier->sreg2 = value_reg;
2630 MONO_ADD_INS (cfg->cbb, wbarrier);
2631 } else if (card_table) {
2632 int offset_reg = alloc_preg (cfg);
2633 int card_reg = alloc_preg (cfg);
2636 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2637 if (card_table_mask)
2638 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2640 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2641 * IMM's larger than 32bits.
2643 if (cfg->compile_aot) {
2644 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2646 MONO_INST_NEW (cfg, ins, OP_PCONST);
2647 ins->inst_p0 = card_table;
2648 ins->dreg = card_reg;
2649 MONO_ADD_INS (cfg->cbb, ins);
2652 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2653 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2655 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2656 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2660 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2662 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2663 dummy_use->sreg1 = value_reg;
2664 MONO_ADD_INS (cfg->cbb, dummy_use);
2669 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2671 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2672 unsigned need_wb = 0;
2677 /*types with references can't have alignment smaller than sizeof(void*) */
2678 if (align < SIZEOF_VOID_P)
2681 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2682 if (size > 32 * SIZEOF_VOID_P)
2685 create_write_barrier_bitmap (klass, &need_wb, 0);
2687 /* We don't unroll more than 5 stores to avoid code bloat. */
2688 if (size > 5 * SIZEOF_VOID_P) {
2689 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2690 size += (SIZEOF_VOID_P - 1);
2691 size &= ~(SIZEOF_VOID_P - 1);
2693 EMIT_NEW_ICONST (cfg, iargs [2], size);
2694 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2695 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2699 destreg = iargs [0]->dreg;
2700 srcreg = iargs [1]->dreg;
2703 dest_ptr_reg = alloc_preg (cfg);
2704 tmp_reg = alloc_preg (cfg);
2707 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2709 while (size >= SIZEOF_VOID_P) {
2710 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2711 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2714 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2716 offset += SIZEOF_VOID_P;
2717 size -= SIZEOF_VOID_P;
2720 /*tmp += sizeof (void*)*/
2721 if (size >= SIZEOF_VOID_P) {
2722 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2723 MONO_ADD_INS (cfg->cbb, iargs [0]);
2727 /* Those cannot be references since size < sizeof (void*) */
2729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2737 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2743 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2753 * Emit code to copy a valuetype of type @klass whose address is stored in
2754 * @src->dreg to memory whose address is stored at @dest->dreg.
2757 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2759 MonoInst *iargs [4];
2762 MonoMethod *memcpy_method;
2766 * This check breaks with spilled vars... need to handle it during verification anyway.
2767 * g_assert (klass && klass == src->klass && klass == dest->klass);
2771 n = mono_class_native_size (klass, &align);
2773 n = mono_class_value_size (klass, &align);
2775 /* if native is true there should be no references in the struct */
2776 if (cfg->gen_write_barriers && klass->has_references && !native) {
2777 /* Avoid barriers when storing to the stack */
2778 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2779 (dest->opcode == OP_LDADDR))) {
2780 int context_used = 0;
2785 if (cfg->generic_sharing_context)
2786 context_used = mono_class_check_context_used (klass);
2788 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2789 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2791 } else if (context_used) {
2792 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2794 if (cfg->compile_aot) {
2795 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2797 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2798 mono_class_compute_gc_descriptor (klass);
2802 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2807 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2808 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2809 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2813 EMIT_NEW_ICONST (cfg, iargs [2], n);
2815 memcpy_method = get_memcpy_method ();
2816 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2821 get_memset_method (void)
2823 static MonoMethod *memset_method = NULL;
2824 if (!memset_method) {
2825 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2827 g_error ("Old corlib found. Install a new one");
2829 return memset_method;
2833 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2835 MonoInst *iargs [3];
2838 MonoMethod *memset_method;
2840 /* FIXME: Optimize this for the case when dest is an LDADDR */
2842 mono_class_init (klass);
2843 n = mono_class_value_size (klass, &align);
2845 if (n <= sizeof (gpointer) * 5) {
2846 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2849 memset_method = get_memset_method ();
2851 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2852 EMIT_NEW_ICONST (cfg, iargs [2], n);
2853 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2858 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2860 MonoInst *this = NULL;
2862 g_assert (cfg->generic_sharing_context);
2864 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2865 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2866 !method->klass->valuetype)
2867 EMIT_NEW_ARGLOAD (cfg, this, 0);
2869 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2870 MonoInst *mrgctx_loc, *mrgctx_var;
2873 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2875 mrgctx_loc = mono_get_vtable_var (cfg);
2876 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2879 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2880 MonoInst *vtable_loc, *vtable_var;
2884 vtable_loc = mono_get_vtable_var (cfg);
2885 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2887 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2888 MonoInst *mrgctx_var = vtable_var;
2891 vtable_reg = alloc_preg (cfg);
2892 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2893 vtable_var->type = STACK_PTR;
2899 int vtable_reg, res_reg;
2901 vtable_reg = alloc_preg (cfg);
2902 res_reg = alloc_preg (cfg);
2903 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2908 static MonoJumpInfoRgctxEntry *
2909 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2911 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2912 res->method = method;
2913 res->in_mrgctx = in_mrgctx;
2914 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2915 res->data->type = patch_type;
2916 res->data->data.target = patch_data;
2917 res->info_type = info_type;
2922 static inline MonoInst*
2923 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2925 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2929 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2930 MonoClass *klass, int rgctx_type)
2932 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2933 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2935 return emit_rgctx_fetch (cfg, rgctx, entry);
2939 * emit_get_rgctx_method:
2941 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2942 * normal constants, else emit a load from the rgctx.
2945 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2946 MonoMethod *cmethod, int rgctx_type)
2948 if (!context_used) {
2951 switch (rgctx_type) {
2952 case MONO_RGCTX_INFO_METHOD:
2953 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2955 case MONO_RGCTX_INFO_METHOD_RGCTX:
2956 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2959 g_assert_not_reached ();
2962 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2963 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2965 return emit_rgctx_fetch (cfg, rgctx, entry);
2970 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2971 MonoClassField *field, int rgctx_type)
2973 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2974 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2976 return emit_rgctx_fetch (cfg, rgctx, entry);
2980 * On return the caller must check @klass for load errors.
2983 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2985 MonoInst *vtable_arg;
2987 int context_used = 0;
2989 if (cfg->generic_sharing_context)
2990 context_used = mono_class_check_context_used (klass);
2993 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2994 klass, MONO_RGCTX_INFO_VTABLE);
2996 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3000 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3003 if (COMPILE_LLVM (cfg))
3004 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3006 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3007 #ifdef MONO_ARCH_VTABLE_REG
3008 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3009 cfg->uses_vtable_reg = TRUE;
3016 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3018 if (mini_get_debug_options ()->better_cast_details) {
3019 int to_klass_reg = alloc_preg (cfg);
3020 int vtable_reg = alloc_preg (cfg);
3021 int klass_reg = alloc_preg (cfg);
3022 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3025 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3029 MONO_ADD_INS (cfg->cbb, tls_get);
3030 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3031 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3033 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3034 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3035 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3040 reset_cast_details (MonoCompile *cfg)
3042 /* Reset the variables holding the cast details */
3043 if (mini_get_debug_options ()->better_cast_details) {
3044 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3046 MONO_ADD_INS (cfg->cbb, tls_get);
3047 /* It is enough to reset the from field */
3048 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3053 * On return the caller must check @array_class for load errors
3056 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3058 int vtable_reg = alloc_preg (cfg);
3059 int context_used = 0;
3061 if (cfg->generic_sharing_context)
3062 context_used = mono_class_check_context_used (array_class);
3064 save_cast_details (cfg, array_class, obj->dreg);
3066 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3068 if (cfg->opt & MONO_OPT_SHARED) {
3069 int class_reg = alloc_preg (cfg);
3070 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3071 if (cfg->compile_aot) {
3072 int klass_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3074 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3076 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3078 } else if (context_used) {
3079 MonoInst *vtable_ins;
3081 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3082 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3084 if (cfg->compile_aot) {
3088 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3090 vt_reg = alloc_preg (cfg);
3091 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3092 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3095 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3101 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3103 reset_cast_details (cfg);
3107 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3108 * generic code is generated.
3111 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3113 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3116 MonoInst *rgctx, *addr;
3118 /* FIXME: What if the class is shared? We might not
3119 have to get the address of the method from the
3121 addr = emit_get_rgctx_method (cfg, context_used, method,
3122 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3124 rgctx = emit_get_rgctx (cfg, method, context_used);
3126 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3128 return mono_emit_method_call (cfg, method, &val, NULL);
3133 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3137 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3138 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3139 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3140 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3142 obj_reg = sp [0]->dreg;
3143 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3144 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3146 /* FIXME: generics */
3147 g_assert (klass->rank == 0);
3150 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3151 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3153 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3154 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3157 MonoInst *element_class;
3159 /* This assertion is from the unboxcast insn */
3160 g_assert (klass->rank == 0);
3162 element_class = emit_get_rgctx_klass (cfg, context_used,
3163 klass->element_class, MONO_RGCTX_INFO_KLASS);
3165 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3166 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3168 save_cast_details (cfg, klass->element_class, obj_reg);
3169 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3170 reset_cast_details (cfg);
3173 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
3174 MONO_ADD_INS (cfg->cbb, add);
3175 add->type = STACK_MP;
3182 * Returns NULL and set the cfg exception on error.
3185 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3187 MonoInst *iargs [2];
3193 MonoInst *iargs [2];
3196 FIXME: we cannot get managed_alloc here because we can't get
3197 the class's vtable (because it's not a closed class)
3199 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3200 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3203 if (cfg->opt & MONO_OPT_SHARED)
3204 rgctx_info = MONO_RGCTX_INFO_KLASS;
3206 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3207 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3209 if (cfg->opt & MONO_OPT_SHARED) {
3210 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3212 alloc_ftn = mono_object_new;
3215 alloc_ftn = mono_object_new_specific;
3218 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3221 if (cfg->opt & MONO_OPT_SHARED) {
3222 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3223 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3225 alloc_ftn = mono_object_new;
3226 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3227 /* This happens often in argument checking code, eg. throw new FooException... */
3228 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3229 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3230 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3232 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3233 MonoMethod *managed_alloc = NULL;
3237 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3238 cfg->exception_ptr = klass;
3242 #ifndef MONO_CROSS_COMPILE
3243 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3246 if (managed_alloc) {
3247 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3248 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3250 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3252 guint32 lw = vtable->klass->instance_size;
3253 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3254 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3255 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3258 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3262 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3266 * Returns NULL and set the cfg exception on error.
3269 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3271 MonoInst *alloc, *ins;
3273 if (mono_class_is_nullable (klass)) {
3274 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3277 /* FIXME: What if the class is shared? We might not
3278 have to get the method address from the RGCTX. */
3279 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3280 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3281 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3283 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3285 return mono_emit_method_call (cfg, method, &val, NULL);
3289 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3293 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3300 mini_class_has_reference_variant_generic_argument (MonoClass *klass, int context_used)
3303 MonoGenericContainer *container;
3304 MonoGenericInst *ginst;
3306 if (klass->generic_class) {
3307 container = klass->generic_class->container_class->generic_container;
3308 ginst = klass->generic_class->context.class_inst;
3309 } else if (klass->generic_container && context_used) {
3310 container = klass->generic_container;
3311 ginst = container->context.class_inst;
3316 for (i = 0; i < container->type_argc; ++i) {
3318 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3320 type = ginst->type_argv [i];
3321 if (MONO_TYPE_IS_REFERENCE (type))
3324 if (context_used && (type->type == MONO_TYPE_VAR || type->type == MONO_TYPE_MVAR))
3330 // FIXME: This doesn't work yet (class libs tests fail?)
3331 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3334 * Returns NULL and set the cfg exception on error.
3337 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3339 MonoBasicBlock *is_null_bb;
3340 int obj_reg = src->dreg;
3341 int vtable_reg = alloc_preg (cfg);
3342 MonoInst *klass_inst = NULL;
3347 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3348 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3349 MonoInst *cache_ins;
3351 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3356 /* klass - it's the second element of the cache entry*/
3357 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3360 args [2] = cache_ins;
3362 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3365 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3367 if (is_complex_isinst (klass)) {
3368 /* Complex case, handle by an icall */
3374 args [1] = klass_inst;
3376 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3378 /* Simple case, handled by the code below */
3382 NEW_BBLOCK (cfg, is_null_bb);
3384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3385 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3387 save_cast_details (cfg, klass, obj_reg);
3389 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3391 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3393 int klass_reg = alloc_preg (cfg);
3395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3397 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3398 /* the remoting code is broken, access the class for now */
3399 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3400 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3402 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3403 cfg->exception_ptr = klass;
3406 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3411 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3413 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3414 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3418 MONO_START_BB (cfg, is_null_bb);
3420 reset_cast_details (cfg);
3426 * Returns NULL and set the cfg exception on error.
3429 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3432 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3433 int obj_reg = src->dreg;
3434 int vtable_reg = alloc_preg (cfg);
3435 int res_reg = alloc_preg (cfg);
3436 MonoInst *klass_inst = NULL;
3441 if(mini_class_has_reference_variant_generic_argument (klass, context_used)) {
3442 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3443 MonoInst *cache_ins;
3445 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3450 /* klass - it's the second element of the cache entry*/
3451 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3454 args [2] = cache_ins;
3456 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3459 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3461 if (is_complex_isinst (klass)) {
3462 /* Complex case, handle by an icall */
3468 args [1] = klass_inst;
3470 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3472 /* Simple case, the code below can handle it */
3476 NEW_BBLOCK (cfg, is_null_bb);
3477 NEW_BBLOCK (cfg, false_bb);
3478 NEW_BBLOCK (cfg, end_bb);
3480 /* Do the assignment at the beginning, so the other assignment can be if converted */
3481 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3482 ins->type = STACK_OBJ;
3485 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3486 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3490 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3491 g_assert (!context_used);
3492 /* the is_null_bb target simply copies the input register to the output */
3493 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3495 int klass_reg = alloc_preg (cfg);
3498 int rank_reg = alloc_preg (cfg);
3499 int eclass_reg = alloc_preg (cfg);
3501 g_assert (!context_used);
3502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3504 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3505 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3506 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3507 if (klass->cast_class == mono_defaults.object_class) {
3508 int parent_reg = alloc_preg (cfg);
3509 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3510 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3511 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3512 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3513 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3514 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3515 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3516 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3517 } else if (klass->cast_class == mono_defaults.enum_class) {
3518 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3519 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3520 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3521 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3523 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3524 /* Check that the object is a vector too */
3525 int bounds_reg = alloc_preg (cfg);
3526 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3528 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3531 /* the is_null_bb target simply copies the input register to the output */
3532 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3534 } else if (mono_class_is_nullable (klass)) {
3535 g_assert (!context_used);
3536 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3537 /* the is_null_bb target simply copies the input register to the output */
3538 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3540 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3541 g_assert (!context_used);
3542 /* the remoting code is broken, access the class for now */
3543 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3544 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3546 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3547 cfg->exception_ptr = klass;
3550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3552 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3553 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3555 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3559 /* the is_null_bb target simply copies the input register to the output */
3560 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3565 MONO_START_BB (cfg, false_bb);
3567 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3568 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3570 MONO_START_BB (cfg, is_null_bb);
3572 MONO_START_BB (cfg, end_bb);
3578 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3580 /* This opcode takes as input an object reference and a class, and returns:
3581 0) if the object is an instance of the class,
3582 1) if the object is not instance of the class,
3583 2) if the object is a proxy whose type cannot be determined */
3586 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3587 int obj_reg = src->dreg;
3588 int dreg = alloc_ireg (cfg);
3590 int klass_reg = alloc_preg (cfg);
3592 NEW_BBLOCK (cfg, true_bb);
3593 NEW_BBLOCK (cfg, false_bb);
3594 NEW_BBLOCK (cfg, false2_bb);
3595 NEW_BBLOCK (cfg, end_bb);
3596 NEW_BBLOCK (cfg, no_proxy_bb);
3598 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3599 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3601 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3602 NEW_BBLOCK (cfg, interface_fail_bb);
3604 tmp_reg = alloc_preg (cfg);
3605 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3606 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3607 MONO_START_BB (cfg, interface_fail_bb);
3608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3610 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3612 tmp_reg = alloc_preg (cfg);
3613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3614 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3615 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3617 tmp_reg = alloc_preg (cfg);
3618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3619 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3621 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3622 tmp_reg = alloc_preg (cfg);
3623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3624 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3626 tmp_reg = alloc_preg (cfg);
3627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3628 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3629 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3631 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3632 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3634 MONO_START_BB (cfg, no_proxy_bb);
3636 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3639 MONO_START_BB (cfg, false_bb);
3641 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3642 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3644 MONO_START_BB (cfg, false2_bb);
3646 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3647 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3649 MONO_START_BB (cfg, true_bb);
3651 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3653 MONO_START_BB (cfg, end_bb);
3656 MONO_INST_NEW (cfg, ins, OP_ICONST);
3658 ins->type = STACK_I4;
3664 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3666 /* This opcode takes as input an object reference and a class, and returns:
3667 0) if the object is an instance of the class,
3668 1) if the object is a proxy whose type cannot be determined
3669 an InvalidCastException exception is thrown otherwhise*/
3672 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3673 int obj_reg = src->dreg;
3674 int dreg = alloc_ireg (cfg);
3675 int tmp_reg = alloc_preg (cfg);
3676 int klass_reg = alloc_preg (cfg);
3678 NEW_BBLOCK (cfg, end_bb);
3679 NEW_BBLOCK (cfg, ok_result_bb);
3681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3682 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3684 save_cast_details (cfg, klass, obj_reg);
3686 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3687 NEW_BBLOCK (cfg, interface_fail_bb);
3689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3690 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3691 MONO_START_BB (cfg, interface_fail_bb);
3692 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3694 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3696 tmp_reg = alloc_preg (cfg);
3697 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3698 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3699 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3701 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3702 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3705 NEW_BBLOCK (cfg, no_proxy_bb);
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3709 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3711 tmp_reg = alloc_preg (cfg);
3712 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3713 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3715 tmp_reg = alloc_preg (cfg);
3716 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3717 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3718 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3720 NEW_BBLOCK (cfg, fail_1_bb);
3722 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3724 MONO_START_BB (cfg, fail_1_bb);
3726 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3727 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3729 MONO_START_BB (cfg, no_proxy_bb);
3731 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3734 MONO_START_BB (cfg, ok_result_bb);
3736 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3738 MONO_START_BB (cfg, end_bb);
3741 MONO_INST_NEW (cfg, ins, OP_ICONST);
3743 ins->type = STACK_I4;
3749 * Returns NULL and set the cfg exception on error.
3751 static G_GNUC_UNUSED MonoInst*
3752 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3756 gpointer *trampoline;
3757 MonoInst *obj, *method_ins, *tramp_ins;
3761 obj = handle_alloc (cfg, klass, FALSE, 0);
3765 /* Inline the contents of mono_delegate_ctor */
3767 /* Set target field */
3768 /* Optimize away setting of NULL target */
3769 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3770 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3771 if (cfg->gen_write_barriers) {
3772 dreg = alloc_preg (cfg);
3773 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3774 emit_write_barrier (cfg, ptr, target, 0);
3778 /* Set method field */
3779 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3780 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3781 if (cfg->gen_write_barriers) {
3782 dreg = alloc_preg (cfg);
3783 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3784 emit_write_barrier (cfg, ptr, method_ins, 0);
3787 * To avoid looking up the compiled code belonging to the target method
3788 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3789 * store it, and we fill it after the method has been compiled.
3791 if (!cfg->compile_aot && !method->dynamic) {
3792 MonoInst *code_slot_ins;
3795 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3797 domain = mono_domain_get ();
3798 mono_domain_lock (domain);
3799 if (!domain_jit_info (domain)->method_code_hash)
3800 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3801 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3803 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3804 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3806 mono_domain_unlock (domain);
3808 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3810 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3813 /* Set invoke_impl field */
3814 if (cfg->compile_aot) {
3815 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3817 trampoline = mono_create_delegate_trampoline (klass);
3818 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3820 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3822 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3828 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3830 MonoJitICallInfo *info;
3832 /* Need to register the icall so it gets an icall wrapper */
3833 info = mono_get_array_new_va_icall (rank);
3835 cfg->flags |= MONO_CFG_HAS_VARARGS;
3837 /* mono_array_new_va () needs a vararg calling convention */
3838 cfg->disable_llvm = TRUE;
3840 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3841 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3845 mono_emit_load_got_addr (MonoCompile *cfg)
3847 MonoInst *getaddr, *dummy_use;
3849 if (!cfg->got_var || cfg->got_var_allocated)
3852 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3853 getaddr->dreg = cfg->got_var->dreg;
3855 /* Add it to the start of the first bblock */
3856 if (cfg->bb_entry->code) {
3857 getaddr->next = cfg->bb_entry->code;
3858 cfg->bb_entry->code = getaddr;
3861 MONO_ADD_INS (cfg->bb_entry, getaddr);
3863 cfg->got_var_allocated = TRUE;
3866 * Add a dummy use to keep the got_var alive, since real uses might
3867 * only be generated by the back ends.
3868 * Add it to end_bblock, so the variable's lifetime covers the whole
3870 * It would be better to make the usage of the got var explicit in all
3871 * cases when the backend needs it (i.e. calls, throw etc.), so this
3872 * wouldn't be needed.
3874 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3875 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3878 static int inline_limit;
3879 static gboolean inline_limit_inited;
3882 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3884 MonoMethodHeaderSummary header;
3886 #ifdef MONO_ARCH_SOFT_FLOAT
3887 MonoMethodSignature *sig = mono_method_signature (method);
3891 if (cfg->generic_sharing_context)
3894 if (cfg->inline_depth > 10)
3897 #ifdef MONO_ARCH_HAVE_LMF_OPS
3898 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3899 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3900 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3905 if (!mono_method_get_header_summary (method, &header))
3908 /*runtime, icall and pinvoke are checked by summary call*/
3909 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3910 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3911 (method->klass->marshalbyref) ||
3915 /* also consider num_locals? */
3916 /* Do the size check early to avoid creating vtables */
3917 if (!inline_limit_inited) {
3918 if (getenv ("MONO_INLINELIMIT"))
3919 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3921 inline_limit = INLINE_LENGTH_LIMIT;
3922 inline_limit_inited = TRUE;
3924 if (header.code_size >= inline_limit)
3928 * if we can initialize the class of the method right away, we do,
3929 * otherwise we don't allow inlining if the class needs initialization,
3930 * since it would mean inserting a call to mono_runtime_class_init()
3931 * inside the inlined code
3933 if (!(cfg->opt & MONO_OPT_SHARED)) {
3934 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3935 if (cfg->run_cctors && method->klass->has_cctor) {
3936 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3937 if (!method->klass->runtime_info)
3938 /* No vtable created yet */
3940 vtable = mono_class_vtable (cfg->domain, method->klass);
3943 /* This makes so that inline cannot trigger */
3944 /* .cctors: too many apps depend on them */
3945 /* running with a specific order... */
3946 if (! vtable->initialized)
3948 mono_runtime_class_init (vtable);
3950 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3951 if (!method->klass->runtime_info)
3952 /* No vtable created yet */
3954 vtable = mono_class_vtable (cfg->domain, method->klass);
3957 if (!vtable->initialized)
3962 * If we're compiling for shared code
3963 * the cctor will need to be run at aot method load time, for example,
3964 * or at the end of the compilation of the inlining method.
3966 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3971 * CAS - do not inline methods with declarative security
3972 * Note: this has to be before any possible return TRUE;
3974 if (mono_method_has_declsec (method))
3977 #ifdef MONO_ARCH_SOFT_FLOAT
3979 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3981 for (i = 0; i < sig->param_count; ++i)
3982 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3990 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3992 if (vtable->initialized && !cfg->compile_aot)
3995 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3998 if (!mono_class_needs_cctor_run (vtable->klass, method))
4001 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4002 /* The initialization is already done before the method is called */
4009 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4013 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4015 mono_class_init (klass);
4016 size = mono_class_array_element_size (klass);
4018 mult_reg = alloc_preg (cfg);
4019 array_reg = arr->dreg;
4020 index_reg = index->dreg;
4022 #if SIZEOF_REGISTER == 8
4023 /* The array reg is 64 bits but the index reg is only 32 */
4024 if (COMPILE_LLVM (cfg)) {
4026 index2_reg = index_reg;
4028 index2_reg = alloc_preg (cfg);
4029 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4032 if (index->type == STACK_I8) {
4033 index2_reg = alloc_preg (cfg);
4034 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4036 index2_reg = index_reg;
4041 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4043 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4044 if (size == 1 || size == 2 || size == 4 || size == 8) {
4045 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4047 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4048 ins->type = STACK_PTR;
4054 add_reg = alloc_preg (cfg);
4056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4057 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4058 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4059 ins->type = STACK_PTR;
4060 MONO_ADD_INS (cfg->cbb, ins);
4065 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4067 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4069 int bounds_reg = alloc_preg (cfg);
4070 int add_reg = alloc_preg (cfg);
4071 int mult_reg = alloc_preg (cfg);
4072 int mult2_reg = alloc_preg (cfg);
4073 int low1_reg = alloc_preg (cfg);
4074 int low2_reg = alloc_preg (cfg);
4075 int high1_reg = alloc_preg (cfg);
4076 int high2_reg = alloc_preg (cfg);
4077 int realidx1_reg = alloc_preg (cfg);
4078 int realidx2_reg = alloc_preg (cfg);
4079 int sum_reg = alloc_preg (cfg);
4084 mono_class_init (klass);
4085 size = mono_class_array_element_size (klass);
4087 index1 = index_ins1->dreg;
4088 index2 = index_ins2->dreg;
4090 /* range checking */
4091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4092 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4094 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4095 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4096 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4097 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4098 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4099 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4100 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4102 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4103 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4104 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4105 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4106 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4107 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4108 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4110 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4111 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4112 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4113 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4114 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4116 ins->type = STACK_MP;
4118 MONO_ADD_INS (cfg->cbb, ins);
4125 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4129 MonoMethod *addr_method;
4132 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4135 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4137 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4138 /* emit_ldelema_2 depends on OP_LMUL */
4139 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4140 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4144 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4145 addr_method = mono_marshal_get_array_address (rank, element_size);
4146 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4151 static MonoBreakPolicy
4152 always_insert_breakpoint (MonoMethod *method)
4154 return MONO_BREAK_POLICY_ALWAYS;
4157 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4160 * mono_set_break_policy:
4161 * policy_callback: the new callback function
4163 * Allow embedders to decide wherther to actually obey breakpoint instructions
4164 * (both break IL instructions and Debugger.Break () method calls), for example
4165 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4166 * untrusted or semi-trusted code.
4168 * @policy_callback will be called every time a break point instruction needs to
4169 * be inserted with the method argument being the method that calls Debugger.Break()
4170 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4171 * if it wants the breakpoint to not be effective in the given method.
4172 * #MONO_BREAK_POLICY_ALWAYS is the default.
4175 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4177 if (policy_callback)
4178 break_policy_func = policy_callback;
4180 break_policy_func = always_insert_breakpoint;
4184 should_insert_brekpoint (MonoMethod *method) {
4185 switch (break_policy_func (method)) {
4186 case MONO_BREAK_POLICY_ALWAYS:
4188 case MONO_BREAK_POLICY_NEVER:
4190 case MONO_BREAK_POLICY_ON_DBG:
4191 return mono_debug_using_mono_debugger ();
4193 g_warning ("Incorrect value returned from break policy callback");
4198 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4200 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4202 MonoInst *addr, *store, *load;
4203 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4205 /* the bounds check is already done by the callers */
4206 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4208 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4209 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4211 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4212 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4218 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4220 MonoInst *ins = NULL;
4221 #ifdef MONO_ARCH_SIMD_INTRINSICS
4222 if (cfg->opt & MONO_OPT_SIMD) {
4223 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4233 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4235 MonoInst *ins = NULL;
4237 static MonoClass *runtime_helpers_class = NULL;
4238 if (! runtime_helpers_class)
4239 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4240 "System.Runtime.CompilerServices", "RuntimeHelpers");
4242 if (cmethod->klass == mono_defaults.string_class) {
4243 if (strcmp (cmethod->name, "get_Chars") == 0) {
4244 int dreg = alloc_ireg (cfg);
4245 int index_reg = alloc_preg (cfg);
4246 int mult_reg = alloc_preg (cfg);
4247 int add_reg = alloc_preg (cfg);
4249 #if SIZEOF_REGISTER == 8
4250 /* The array reg is 64 bits but the index reg is only 32 */
4251 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4253 index_reg = args [1]->dreg;
4255 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4257 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4258 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4259 add_reg = ins->dreg;
4260 /* Avoid a warning */
4262 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4266 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4267 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4268 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4270 type_from_op (ins, NULL, NULL);
4272 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4273 int dreg = alloc_ireg (cfg);
4274 /* Decompose later to allow more optimizations */
4275 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4276 ins->type = STACK_I4;
4277 ins->flags |= MONO_INST_FAULT;
4278 cfg->cbb->has_array_access = TRUE;
4279 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4282 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4283 int mult_reg = alloc_preg (cfg);
4284 int add_reg = alloc_preg (cfg);
4286 /* The corlib functions check for oob already. */
4287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4288 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4289 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4290 return cfg->cbb->last_ins;
4293 } else if (cmethod->klass == mono_defaults.object_class) {
4295 if (strcmp (cmethod->name, "GetType") == 0) {
4296 int dreg = alloc_preg (cfg);
4297 int vt_reg = alloc_preg (cfg);
4298 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4299 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4300 type_from_op (ins, NULL, NULL);
4303 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4304 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4305 int dreg = alloc_ireg (cfg);
4306 int t1 = alloc_ireg (cfg);
4308 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4309 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4310 ins->type = STACK_I4;
4314 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4315 MONO_INST_NEW (cfg, ins, OP_NOP);
4316 MONO_ADD_INS (cfg->cbb, ins);
4320 } else if (cmethod->klass == mono_defaults.array_class) {
4321 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4322 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4324 #ifndef MONO_BIG_ARRAYS
4326 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4329 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4330 int dreg = alloc_ireg (cfg);
4331 int bounds_reg = alloc_ireg (cfg);
4332 MonoBasicBlock *end_bb, *szarray_bb;
4333 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4335 NEW_BBLOCK (cfg, end_bb);
4336 NEW_BBLOCK (cfg, szarray_bb);
4338 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4339 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4340 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4341 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4342 /* Non-szarray case */
4344 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4345 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4347 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4348 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4350 MONO_START_BB (cfg, szarray_bb);
4353 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4354 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4356 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4357 MONO_START_BB (cfg, end_bb);
4359 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4360 ins->type = STACK_I4;
4366 if (cmethod->name [0] != 'g')
4369 if (strcmp (cmethod->name, "get_Rank") == 0) {
4370 int dreg = alloc_ireg (cfg);
4371 int vtable_reg = alloc_preg (cfg);
4372 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4373 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4374 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4375 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4376 type_from_op (ins, NULL, NULL);
4379 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4380 int dreg = alloc_ireg (cfg);
4382 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4383 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4384 type_from_op (ins, NULL, NULL);
4389 } else if (cmethod->klass == runtime_helpers_class) {
4391 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4392 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4396 } else if (cmethod->klass == mono_defaults.thread_class) {
4397 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4398 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4399 MONO_ADD_INS (cfg->cbb, ins);
4401 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4402 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4403 MONO_ADD_INS (cfg->cbb, ins);
4406 } else if (cmethod->klass == mono_defaults.monitor_class) {
4407 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4408 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4411 if (COMPILE_LLVM (cfg)) {
4413 * Pass the argument normally, the LLVM backend will handle the
4414 * calling convention problems.
4416 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4418 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4419 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4420 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4421 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4424 return (MonoInst*)call;
4425 } else if (strcmp (cmethod->name, "Exit") == 0) {
4428 if (COMPILE_LLVM (cfg)) {
4429 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4431 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4432 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4433 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4434 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4437 return (MonoInst*)call;
4439 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4440 MonoMethod *fast_method = NULL;
4442 /* Avoid infinite recursion */
4443 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4444 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4445 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4448 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4449 strcmp (cmethod->name, "Exit") == 0)
4450 fast_method = mono_monitor_get_fast_path (cmethod);
4454 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4456 } else if (cmethod->klass->image == mono_defaults.corlib &&
4457 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4458 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4461 #if SIZEOF_REGISTER == 8
4462 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4463 /* 64 bit reads are already atomic */
4464 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4465 ins->dreg = mono_alloc_preg (cfg);
4466 ins->inst_basereg = args [0]->dreg;
4467 ins->inst_offset = 0;
4468 MONO_ADD_INS (cfg->cbb, ins);
4472 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4473 if (strcmp (cmethod->name, "Increment") == 0) {
4474 MonoInst *ins_iconst;
4477 if (fsig->params [0]->type == MONO_TYPE_I4)
4478 opcode = OP_ATOMIC_ADD_NEW_I4;
4479 #if SIZEOF_REGISTER == 8
4480 else if (fsig->params [0]->type == MONO_TYPE_I8)
4481 opcode = OP_ATOMIC_ADD_NEW_I8;
4484 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4485 ins_iconst->inst_c0 = 1;
4486 ins_iconst->dreg = mono_alloc_ireg (cfg);
4487 MONO_ADD_INS (cfg->cbb, ins_iconst);
4489 MONO_INST_NEW (cfg, ins, opcode);
4490 ins->dreg = mono_alloc_ireg (cfg);
4491 ins->inst_basereg = args [0]->dreg;
4492 ins->inst_offset = 0;
4493 ins->sreg2 = ins_iconst->dreg;
4494 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4495 MONO_ADD_INS (cfg->cbb, ins);
4497 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4498 MonoInst *ins_iconst;
4501 if (fsig->params [0]->type == MONO_TYPE_I4)
4502 opcode = OP_ATOMIC_ADD_NEW_I4;
4503 #if SIZEOF_REGISTER == 8
4504 else if (fsig->params [0]->type == MONO_TYPE_I8)
4505 opcode = OP_ATOMIC_ADD_NEW_I8;
4508 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4509 ins_iconst->inst_c0 = -1;
4510 ins_iconst->dreg = mono_alloc_ireg (cfg);
4511 MONO_ADD_INS (cfg->cbb, ins_iconst);
4513 MONO_INST_NEW (cfg, ins, opcode);
4514 ins->dreg = mono_alloc_ireg (cfg);
4515 ins->inst_basereg = args [0]->dreg;
4516 ins->inst_offset = 0;
4517 ins->sreg2 = ins_iconst->dreg;
4518 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4519 MONO_ADD_INS (cfg->cbb, ins);
4521 } else if (strcmp (cmethod->name, "Add") == 0) {
4524 if (fsig->params [0]->type == MONO_TYPE_I4)
4525 opcode = OP_ATOMIC_ADD_NEW_I4;
4526 #if SIZEOF_REGISTER == 8
4527 else if (fsig->params [0]->type == MONO_TYPE_I8)
4528 opcode = OP_ATOMIC_ADD_NEW_I8;
4532 MONO_INST_NEW (cfg, ins, opcode);
4533 ins->dreg = mono_alloc_ireg (cfg);
4534 ins->inst_basereg = args [0]->dreg;
4535 ins->inst_offset = 0;
4536 ins->sreg2 = args [1]->dreg;
4537 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4538 MONO_ADD_INS (cfg->cbb, ins);
4541 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4543 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4544 if (strcmp (cmethod->name, "Exchange") == 0) {
4546 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4548 if (fsig->params [0]->type == MONO_TYPE_I4)
4549 opcode = OP_ATOMIC_EXCHANGE_I4;
4550 #if SIZEOF_REGISTER == 8
4551 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4552 (fsig->params [0]->type == MONO_TYPE_I))
4553 opcode = OP_ATOMIC_EXCHANGE_I8;
4555 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4556 opcode = OP_ATOMIC_EXCHANGE_I4;
4561 MONO_INST_NEW (cfg, ins, opcode);
4562 ins->dreg = mono_alloc_ireg (cfg);
4563 ins->inst_basereg = args [0]->dreg;
4564 ins->inst_offset = 0;
4565 ins->sreg2 = args [1]->dreg;
4566 MONO_ADD_INS (cfg->cbb, ins);
4568 switch (fsig->params [0]->type) {
4570 ins->type = STACK_I4;
4574 ins->type = STACK_I8;
4576 case MONO_TYPE_OBJECT:
4577 ins->type = STACK_OBJ;
4580 g_assert_not_reached ();
4583 if (cfg->gen_write_barriers && is_ref)
4584 emit_write_barrier (cfg, args [0], args [1], -1);
4586 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4588 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4589 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4591 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4592 if (fsig->params [1]->type == MONO_TYPE_I4)
4594 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4595 size = sizeof (gpointer);
4596 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4599 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4600 ins->dreg = alloc_ireg (cfg);
4601 ins->sreg1 = args [0]->dreg;
4602 ins->sreg2 = args [1]->dreg;
4603 ins->sreg3 = args [2]->dreg;
4604 ins->type = STACK_I4;
4605 MONO_ADD_INS (cfg->cbb, ins);
4606 } else if (size == 8) {
4607 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4608 ins->dreg = alloc_ireg (cfg);
4609 ins->sreg1 = args [0]->dreg;
4610 ins->sreg2 = args [1]->dreg;
4611 ins->sreg3 = args [2]->dreg;
4612 ins->type = STACK_I8;
4613 MONO_ADD_INS (cfg->cbb, ins);
4615 /* g_assert_not_reached (); */
4617 if (cfg->gen_write_barriers && is_ref)
4618 emit_write_barrier (cfg, args [0], args [1], -1);
4620 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4624 } else if (cmethod->klass->image == mono_defaults.corlib) {
4625 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4626 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4627 if (should_insert_brekpoint (cfg->method))
4628 MONO_INST_NEW (cfg, ins, OP_BREAK);
4630 MONO_INST_NEW (cfg, ins, OP_NOP);
4631 MONO_ADD_INS (cfg->cbb, ins);
4634 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4635 && strcmp (cmethod->klass->name, "Environment") == 0) {
4637 EMIT_NEW_ICONST (cfg, ins, 1);
4639 EMIT_NEW_ICONST (cfg, ins, 0);
4643 } else if (cmethod->klass == mono_defaults.math_class) {
4645 * There is general branches code for Min/Max, but it does not work for
4647 * http://everything2.com/?node_id=1051618
4651 #ifdef MONO_ARCH_SIMD_INTRINSICS
4652 if (cfg->opt & MONO_OPT_SIMD) {
4653 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4659 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4663 * This entry point could be used later for arbitrary method
4666 inline static MonoInst*
4667 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4668 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4670 if (method->klass == mono_defaults.string_class) {
4671 /* managed string allocation support */
4672 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4673 MonoInst *iargs [2];
4674 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4675 MonoMethod *managed_alloc = NULL;
4677 g_assert (vtable); /*Should not fail since it System.String*/
4678 #ifndef MONO_CROSS_COMPILE
4679 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4683 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4684 iargs [1] = args [0];
4685 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4692 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4694 MonoInst *store, *temp;
4697 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4698 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4701 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4702 * would be different than the MonoInst's used to represent arguments, and
4703 * the ldelema implementation can't deal with that.
4704 * Solution: When ldelema is used on an inline argument, create a var for
4705 * it, emit ldelema on that var, and emit the saving code below in
4706 * inline_method () if needed.
4708 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4709 cfg->args [i] = temp;
4710 /* This uses cfg->args [i] which is set by the preceeding line */
4711 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4712 store->cil_code = sp [0]->cil_code;
4717 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4718 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4720 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4722 check_inline_called_method_name_limit (MonoMethod *called_method)
4725 static char *limit = NULL;
4727 if (limit == NULL) {
4728 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4730 if (limit_string != NULL)
4731 limit = limit_string;
4733 limit = (char *) "";
4736 if (limit [0] != '\0') {
4737 char *called_method_name = mono_method_full_name (called_method, TRUE);
4739 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4740 g_free (called_method_name);
4742 //return (strncmp_result <= 0);
4743 return (strncmp_result == 0);
4750 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4752 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4755 static char *limit = NULL;
4757 if (limit == NULL) {
4758 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4759 if (limit_string != NULL) {
4760 limit = limit_string;
4762 limit = (char *) "";
4766 if (limit [0] != '\0') {
4767 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4769 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4770 g_free (caller_method_name);
4772 //return (strncmp_result <= 0);
4773 return (strncmp_result == 0);
4781 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4782 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4784 MonoInst *ins, *rvar = NULL;
4785 MonoMethodHeader *cheader;
4786 MonoBasicBlock *ebblock, *sbblock;
4788 MonoMethod *prev_inlined_method;
4789 MonoInst **prev_locals, **prev_args;
4790 MonoType **prev_arg_types;
4791 guint prev_real_offset;
4792 GHashTable *prev_cbb_hash;
4793 MonoBasicBlock **prev_cil_offset_to_bb;
4794 MonoBasicBlock *prev_cbb;
4795 unsigned char* prev_cil_start;
4796 guint32 prev_cil_offset_to_bb_len;
4797 MonoMethod *prev_current_method;
4798 MonoGenericContext *prev_generic_context;
4799 gboolean ret_var_set, prev_ret_var_set;
4801 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4803 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4804 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4807 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4808 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4812 if (cfg->verbose_level > 2)
4813 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4815 if (!cmethod->inline_info) {
4816 mono_jit_stats.inlineable_methods++;
4817 cmethod->inline_info = 1;
4820 /* allocate local variables */
4821 cheader = mono_method_get_header (cmethod);
4823 if (cheader == NULL || mono_loader_get_last_error ()) {
4824 MonoLoaderError *error = mono_loader_get_last_error ();
4827 mono_metadata_free_mh (cheader);
4828 if (inline_always && error)
4829 mono_cfg_set_exception (cfg, error->exception_type);
4831 mono_loader_clear_error ();
4835 /*Must verify before creating locals as it can cause the JIT to assert.*/
4836 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4837 mono_metadata_free_mh (cheader);
4841 /* allocate space to store the return value */
4842 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4843 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4847 prev_locals = cfg->locals;
4848 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4849 for (i = 0; i < cheader->num_locals; ++i)
4850 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4852 /* allocate start and end blocks */
4853 /* This is needed so if the inline is aborted, we can clean up */
4854 NEW_BBLOCK (cfg, sbblock);
4855 sbblock->real_offset = real_offset;
4857 NEW_BBLOCK (cfg, ebblock);
4858 ebblock->block_num = cfg->num_bblocks++;
4859 ebblock->real_offset = real_offset;
4861 prev_args = cfg->args;
4862 prev_arg_types = cfg->arg_types;
4863 prev_inlined_method = cfg->inlined_method;
4864 cfg->inlined_method = cmethod;
4865 cfg->ret_var_set = FALSE;
4866 cfg->inline_depth ++;
4867 prev_real_offset = cfg->real_offset;
4868 prev_cbb_hash = cfg->cbb_hash;
4869 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4870 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4871 prev_cil_start = cfg->cil_start;
4872 prev_cbb = cfg->cbb;
4873 prev_current_method = cfg->current_method;
4874 prev_generic_context = cfg->generic_context;
4875 prev_ret_var_set = cfg->ret_var_set;
4877 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4879 ret_var_set = cfg->ret_var_set;
4881 cfg->inlined_method = prev_inlined_method;
4882 cfg->real_offset = prev_real_offset;
4883 cfg->cbb_hash = prev_cbb_hash;
4884 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4885 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4886 cfg->cil_start = prev_cil_start;
4887 cfg->locals = prev_locals;
4888 cfg->args = prev_args;
4889 cfg->arg_types = prev_arg_types;
4890 cfg->current_method = prev_current_method;
4891 cfg->generic_context = prev_generic_context;
4892 cfg->ret_var_set = prev_ret_var_set;
4893 cfg->inline_depth --;
4895 if ((costs >= 0 && costs < 60) || inline_always) {
4896 if (cfg->verbose_level > 2)
4897 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4899 mono_jit_stats.inlined_methods++;
4901 /* always add some code to avoid block split failures */
4902 MONO_INST_NEW (cfg, ins, OP_NOP);
4903 MONO_ADD_INS (prev_cbb, ins);
4905 prev_cbb->next_bb = sbblock;
4906 link_bblock (cfg, prev_cbb, sbblock);
4909 * Get rid of the begin and end bblocks if possible to aid local
4912 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4914 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4915 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4917 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4918 MonoBasicBlock *prev = ebblock->in_bb [0];
4919 mono_merge_basic_blocks (cfg, prev, ebblock);
4921 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4922 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4923 cfg->cbb = prev_cbb;
4931 * If the inlined method contains only a throw, then the ret var is not
4932 * set, so set it to a dummy value.
4935 static double r8_0 = 0.0;
4937 switch (rvar->type) {
4939 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4942 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4947 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4950 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4951 ins->type = STACK_R8;
4952 ins->inst_p0 = (void*)&r8_0;
4953 ins->dreg = rvar->dreg;
4954 MONO_ADD_INS (cfg->cbb, ins);
4957 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4960 g_assert_not_reached ();
4964 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4967 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4970 if (cfg->verbose_level > 2)
4971 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4972 cfg->exception_type = MONO_EXCEPTION_NONE;
4973 mono_loader_clear_error ();
4975 /* This gets rid of the newly added bblocks */
4976 cfg->cbb = prev_cbb;
4978 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4983 * Some of these comments may well be out-of-date.
4984 * Design decisions: we do a single pass over the IL code (and we do bblock
4985 * splitting/merging in the few cases when it's required: a back jump to an IL
4986 * address that was not already seen as bblock starting point).
4987 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4988 * Complex operations are decomposed in simpler ones right away. We need to let the
4989 * arch-specific code peek and poke inside this process somehow (except when the
4990 * optimizations can take advantage of the full semantic info of coarse opcodes).
4991 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4992 * MonoInst->opcode initially is the IL opcode or some simplification of that
4993 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4994 * opcode with value bigger than OP_LAST.
4995 * At this point the IR can be handed over to an interpreter, a dumb code generator
4996 * or to the optimizing code generator that will translate it to SSA form.
4998 * Profiling directed optimizations.
4999 * We may compile by default with few or no optimizations and instrument the code
5000 * or the user may indicate what methods to optimize the most either in a config file
5001 * or through repeated runs where the compiler applies offline the optimizations to
5002 * each method and then decides if it was worth it.
5005 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5006 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5007 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5008 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5009 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5010 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5011 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5012 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5014 /* offset from br.s -> br like opcodes */
5015 #define BIG_BRANCH_OFFSET 13
5018 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5020 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5022 return b == NULL || b == bb;
5026 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5028 unsigned char *ip = start;
5029 unsigned char *target;
5032 MonoBasicBlock *bblock;
5033 const MonoOpcode *opcode;
5036 cli_addr = ip - start;
5037 i = mono_opcode_value ((const guint8 **)&ip, end);
5040 opcode = &mono_opcodes [i];
5041 switch (opcode->argument) {
5042 case MonoInlineNone:
5045 case MonoInlineString:
5046 case MonoInlineType:
5047 case MonoInlineField:
5048 case MonoInlineMethod:
5051 case MonoShortInlineR:
5058 case MonoShortInlineVar:
5059 case MonoShortInlineI:
5062 case MonoShortInlineBrTarget:
5063 target = start + cli_addr + 2 + (signed char)ip [1];
5064 GET_BBLOCK (cfg, bblock, target);
5067 GET_BBLOCK (cfg, bblock, ip);
5069 case MonoInlineBrTarget:
5070 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5071 GET_BBLOCK (cfg, bblock, target);
5074 GET_BBLOCK (cfg, bblock, ip);
5076 case MonoInlineSwitch: {
5077 guint32 n = read32 (ip + 1);
5080 cli_addr += 5 + 4 * n;
5081 target = start + cli_addr;
5082 GET_BBLOCK (cfg, bblock, target);
5084 for (j = 0; j < n; ++j) {
5085 target = start + cli_addr + (gint32)read32 (ip);
5086 GET_BBLOCK (cfg, bblock, target);
5096 g_assert_not_reached ();
5099 if (i == CEE_THROW) {
5100 unsigned char *bb_start = ip - 1;
5102 /* Find the start of the bblock containing the throw */
5104 while ((bb_start >= start) && !bblock) {
5105 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5109 bblock->out_of_line = 1;
5118 static inline MonoMethod *
5119 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5123 if (m->wrapper_type != MONO_WRAPPER_NONE)
5124 return mono_method_get_wrapper_data (m, token);
5126 method = mono_get_method_full (m->klass->image, token, klass, context);
5131 static inline MonoMethod *
5132 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5134 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5136 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5142 static inline MonoClass*
5143 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5147 if (method->wrapper_type != MONO_WRAPPER_NONE)
5148 klass = mono_method_get_wrapper_data (method, token);
5150 klass = mono_class_get_full (method->klass->image, token, context);
5152 mono_class_init (klass);
5157 * Returns TRUE if the JIT should abort inlining because "callee"
5158 * is influenced by security attributes.
5161 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5165 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5169 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5170 if (result == MONO_JIT_SECURITY_OK)
5173 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5174 /* Generate code to throw a SecurityException before the actual call/link */
5175 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5178 NEW_ICONST (cfg, args [0], 4);
5179 NEW_METHODCONST (cfg, args [1], caller);
5180 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5181 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5182 /* don't hide previous results */
5183 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5184 cfg->exception_data = result;
5192 throw_exception (void)
5194 static MonoMethod *method = NULL;
5197 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5198 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5205 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5207 MonoMethod *thrower = throw_exception ();
5210 EMIT_NEW_PCONST (cfg, args [0], ex);
5211 mono_emit_method_call (cfg, thrower, args, NULL);
5215 * Return the original method is a wrapper is specified. We can only access
5216 * the custom attributes from the original method.
5219 get_original_method (MonoMethod *method)
5221 if (method->wrapper_type == MONO_WRAPPER_NONE)
5224 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5225 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5228 /* in other cases we need to find the original method */
5229 return mono_marshal_method_from_wrapper (method);
5233 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5234 MonoBasicBlock *bblock, unsigned char *ip)
5236 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5237 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5239 emit_throw_exception (cfg, ex);
5243 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5244 MonoBasicBlock *bblock, unsigned char *ip)
5246 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5247 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5249 emit_throw_exception (cfg, ex);
5253 * Check that the IL instructions at ip are the array initialization
5254 * sequence and return the pointer to the data and the size.
5257 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5260 * newarr[System.Int32]
5262 * ldtoken field valuetype ...
5263 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5265 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5266 guint32 token = read32 (ip + 7);
5267 guint32 field_token = read32 (ip + 2);
5268 guint32 field_index = field_token & 0xffffff;
5270 const char *data_ptr;
5272 MonoMethod *cmethod;
5273 MonoClass *dummy_class;
5274 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5280 *out_field_token = field_token;
5282 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5285 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5287 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5288 case MONO_TYPE_BOOLEAN:
5292 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5293 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5294 case MONO_TYPE_CHAR:
5304 return NULL; /* stupid ARM FP swapped format */
5314 if (size > mono_type_size (field->type, &dummy_align))
5317 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5318 if (!method->klass->image->dynamic) {
5319 field_index = read32 (ip + 2) & 0xffffff;
5320 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5321 data_ptr = mono_image_rva_map (method->klass->image, rva);
5322 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5323 /* for aot code we do the lookup on load */
5324 if (aot && data_ptr)
5325 return GUINT_TO_POINTER (rva);
5327 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5329 data_ptr = mono_field_get_data (field);
5337 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5339 char *method_fname = mono_method_full_name (method, TRUE);
5341 MonoMethodHeader *header = mono_method_get_header (method);
5343 if (header->code_size == 0)
5344 method_code = g_strdup ("method body is empty.");
5346 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5347 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5348 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5349 g_free (method_fname);
5350 g_free (method_code);
5351 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5355 set_exception_object (MonoCompile *cfg, MonoException *exception)
5357 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5358 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5359 cfg->exception_ptr = exception;
5363 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5367 if (cfg->generic_sharing_context)
5368 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5370 type = &klass->byval_arg;
5371 return MONO_TYPE_IS_REFERENCE (type);
5375 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5378 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5379 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5380 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5381 /* Optimize reg-reg moves away */
5383 * Can't optimize other opcodes, since sp[0] might point to
5384 * the last ins of a decomposed opcode.
5386 sp [0]->dreg = (cfg)->locals [n]->dreg;
5388 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5393 * ldloca inhibits many optimizations so try to get rid of it in common
5396 static inline unsigned char *
5397 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5406 local = read16 (ip + 2);
5410 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5411 gboolean skip = FALSE;
5413 /* From the INITOBJ case */
5414 token = read32 (ip + 2);
5415 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5416 CHECK_TYPELOAD (klass);
5417 if (generic_class_is_reference_type (cfg, klass)) {
5418 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5419 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5420 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5421 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5422 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5435 is_exception_class (MonoClass *class)
5438 if (class == mono_defaults.exception_class)
5440 class = class->parent;
5446 * is_jit_optimizer_disabled:
5448 * Determine whenever M's assembly has a DebuggableAttribute with the
5449 * IsJITOptimizerDisabled flag set.
5452 is_jit_optimizer_disabled (MonoMethod *m)
5454 MonoAssembly *ass = m->klass->image->assembly;
5455 MonoCustomAttrInfo* attrs;
5456 static MonoClass *klass;
5458 gboolean val = FALSE;
5461 if (ass->jit_optimizer_disabled_inited)
5462 return ass->jit_optimizer_disabled;
5464 klass = mono_class_from_name_cached (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5466 attrs = mono_custom_attrs_from_assembly (ass);
5468 for (i = 0; i < attrs->num_attrs; ++i) {
5469 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5472 MonoMethodSignature *sig;
5474 if (!attr->ctor || attr->ctor->klass != klass)
5476 /* Decode the attribute. See reflection.c */
5477 len = attr->data_size;
5478 p = (const char*)attr->data;
5479 g_assert (read16 (p) == 0x0001);
5482 // FIXME: Support named parameters
5483 sig = mono_method_signature (attr->ctor);
5484 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5486 /* Two boolean arguments */
5492 ass->jit_optimizer_disabled = val;
5493 mono_memory_barrier ();
5494 ass->jit_optimizer_disabled_inited = TRUE;
5500 * mono_method_to_ir:
5502 * Translate the .net IL into linear IR.
5505 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5506 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5507 guint inline_offset, gboolean is_virtual_call)
5510 MonoInst *ins, **sp, **stack_start;
5511 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5512 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5513 MonoMethod *cmethod, *method_definition;
5514 MonoInst **arg_array;
5515 MonoMethodHeader *header;
5517 guint32 token, ins_flag;
5519 MonoClass *constrained_call = NULL;
5520 unsigned char *ip, *end, *target, *err_pos;
5521 static double r8_0 = 0.0;
5522 MonoMethodSignature *sig;
5523 MonoGenericContext *generic_context = NULL;
5524 MonoGenericContainer *generic_container = NULL;
5525 MonoType **param_types;
5526 int i, n, start_new_bblock, dreg;
5527 int num_calls = 0, inline_costs = 0;
5528 int breakpoint_id = 0;
5530 MonoBoolean security, pinvoke;
5531 MonoSecurityManager* secman = NULL;
5532 MonoDeclSecurityActions actions;
5533 GSList *class_inits = NULL;
5534 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5536 gboolean init_locals, seq_points, skip_dead_blocks;
5537 gboolean disable_inline;
5539 disable_inline = is_jit_optimizer_disabled (method);
5541 /* serialization and xdomain stuff may need access to private fields and methods */
5542 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5543 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5544 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5545 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5546 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5547 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5549 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5551 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5552 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5553 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5554 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5556 image = method->klass->image;
5557 header = mono_method_get_header (method);
5559 MonoLoaderError *error;
5561 if ((error = mono_loader_get_last_error ())) {
5562 mono_cfg_set_exception (cfg, error->exception_type);
5564 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5565 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5567 goto exception_exit;
5569 generic_container = mono_method_get_generic_container (method);
5570 sig = mono_method_signature (method);
5571 num_args = sig->hasthis + sig->param_count;
5572 ip = (unsigned char*)header->code;
5573 cfg->cil_start = ip;
5574 end = ip + header->code_size;
5575 mono_jit_stats.cil_code_size += header->code_size;
5576 init_locals = header->init_locals;
5578 seq_points = cfg->gen_seq_points && cfg->method == method;
5581 * Methods without init_locals set could cause asserts in various passes
5586 method_definition = method;
5587 while (method_definition->is_inflated) {
5588 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5589 method_definition = imethod->declaring;
5592 /* SkipVerification is not allowed if core-clr is enabled */
5593 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5595 dont_verify_stloc = TRUE;
5598 if (mono_debug_using_mono_debugger ())
5599 cfg->keep_cil_nops = TRUE;
5601 if (sig->is_inflated)
5602 generic_context = mono_method_get_context (method);
5603 else if (generic_container)
5604 generic_context = &generic_container->context;
5605 cfg->generic_context = generic_context;
5607 if (!cfg->generic_sharing_context)
5608 g_assert (!sig->has_type_parameters);
5610 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5611 g_assert (method->is_inflated);
5612 g_assert (mono_method_get_context (method)->method_inst);
5614 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5615 g_assert (sig->generic_param_count);
5617 if (cfg->method == method) {
5618 cfg->real_offset = 0;
5620 cfg->real_offset = inline_offset;
5623 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5624 cfg->cil_offset_to_bb_len = header->code_size;
5626 cfg->current_method = method;
5628 if (cfg->verbose_level > 2)
5629 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5631 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5633 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5634 for (n = 0; n < sig->param_count; ++n)
5635 param_types [n + sig->hasthis] = sig->params [n];
5636 cfg->arg_types = param_types;
5638 dont_inline = g_list_prepend (dont_inline, method);
5639 if (cfg->method == method) {
5641 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5642 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5645 NEW_BBLOCK (cfg, start_bblock);
5646 cfg->bb_entry = start_bblock;
5647 start_bblock->cil_code = NULL;
5648 start_bblock->cil_length = 0;
5651 NEW_BBLOCK (cfg, end_bblock);
5652 cfg->bb_exit = end_bblock;
5653 end_bblock->cil_code = NULL;
5654 end_bblock->cil_length = 0;
5655 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5656 g_assert (cfg->num_bblocks == 2);
5658 arg_array = cfg->args;
5660 if (header->num_clauses) {
5661 cfg->spvars = g_hash_table_new (NULL, NULL);
5662 cfg->exvars = g_hash_table_new (NULL, NULL);
5664 /* handle exception clauses */
5665 for (i = 0; i < header->num_clauses; ++i) {
5666 MonoBasicBlock *try_bb;
5667 MonoExceptionClause *clause = &header->clauses [i];
5668 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5669 try_bb->real_offset = clause->try_offset;
5670 try_bb->try_start = TRUE;
5671 try_bb->region = ((i + 1) << 8) | clause->flags;
5672 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5673 tblock->real_offset = clause->handler_offset;
5674 tblock->flags |= BB_EXCEPTION_HANDLER;
5676 link_bblock (cfg, try_bb, tblock);
5678 if (*(ip + clause->handler_offset) == CEE_POP)
5679 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5681 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5682 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5683 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5685 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5686 MONO_ADD_INS (tblock, ins);
5688 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5689 MONO_ADD_INS (tblock, ins);
5691 /* todo: is a fault block unsafe to optimize? */
5692 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5693 tblock->flags |= BB_EXCEPTION_UNSAFE;
5697 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5699 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5701 /* catch and filter blocks get the exception object on the stack */
5702 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5703 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5704 MonoInst *dummy_use;
5706 /* mostly like handle_stack_args (), but just sets the input args */
5707 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5708 tblock->in_scount = 1;
5709 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5710 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5713 * Add a dummy use for the exvar so its liveness info will be
5717 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5719 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5720 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5721 tblock->flags |= BB_EXCEPTION_HANDLER;
5722 tblock->real_offset = clause->data.filter_offset;
5723 tblock->in_scount = 1;
5724 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5725 /* The filter block shares the exvar with the handler block */
5726 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5727 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5728 MONO_ADD_INS (tblock, ins);
5732 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5733 clause->data.catch_class &&
5734 cfg->generic_sharing_context &&
5735 mono_class_check_context_used (clause->data.catch_class)) {
5737 * In shared generic code with catch
5738 * clauses containing type variables
5739 * the exception handling code has to
5740 * be able to get to the rgctx.
5741 * Therefore we have to make sure that
5742 * the vtable/mrgctx argument (for
5743 * static or generic methods) or the
5744 * "this" argument (for non-static
5745 * methods) are live.
5747 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5748 mini_method_get_context (method)->method_inst ||
5749 method->klass->valuetype) {
5750 mono_get_vtable_var (cfg);
5752 MonoInst *dummy_use;
5754 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5759 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5760 cfg->cbb = start_bblock;
5761 cfg->args = arg_array;
5762 mono_save_args (cfg, sig, inline_args);
5765 /* FIRST CODE BLOCK */
5766 NEW_BBLOCK (cfg, bblock);
5767 bblock->cil_code = ip;
5771 ADD_BBLOCK (cfg, bblock);
5773 if (cfg->method == method) {
5774 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5775 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5776 MONO_INST_NEW (cfg, ins, OP_BREAK);
5777 MONO_ADD_INS (bblock, ins);
5781 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5782 secman = mono_security_manager_get_methods ();
5784 security = (secman && mono_method_has_declsec (method));
5785 /* at this point having security doesn't mean we have any code to generate */
5786 if (security && (cfg->method == method)) {
5787 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5788 * And we do not want to enter the next section (with allocation) if we
5789 * have nothing to generate */
5790 security = mono_declsec_get_demands (method, &actions);
5793 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5794 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5796 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5797 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5798 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5800 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5801 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5805 mono_custom_attrs_free (custom);
5808 custom = mono_custom_attrs_from_class (wrapped->klass);
5809 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5813 mono_custom_attrs_free (custom);
5816 /* not a P/Invoke after all */
5821 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5822 /* we use a separate basic block for the initialization code */
5823 NEW_BBLOCK (cfg, init_localsbb);
5824 cfg->bb_init = init_localsbb;
5825 init_localsbb->real_offset = cfg->real_offset;
5826 start_bblock->next_bb = init_localsbb;
5827 init_localsbb->next_bb = bblock;
5828 link_bblock (cfg, start_bblock, init_localsbb);
5829 link_bblock (cfg, init_localsbb, bblock);
5831 cfg->cbb = init_localsbb;
5833 start_bblock->next_bb = bblock;
5834 link_bblock (cfg, start_bblock, bblock);
5837 /* at this point we know, if security is TRUE, that some code needs to be generated */
5838 if (security && (cfg->method == method)) {
5841 mono_jit_stats.cas_demand_generation++;
5843 if (actions.demand.blob) {
5844 /* Add code for SecurityAction.Demand */
5845 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5846 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5847 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5848 mono_emit_method_call (cfg, secman->demand, args, NULL);
5850 if (actions.noncasdemand.blob) {
5851 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5852 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5853 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5854 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5855 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5856 mono_emit_method_call (cfg, secman->demand, args, NULL);
5858 if (actions.demandchoice.blob) {
5859 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5860 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5861 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5862 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5863 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5867 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5869 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5872 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5873 /* check if this is native code, e.g. an icall or a p/invoke */
5874 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5875 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5877 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5878 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5880 /* if this ia a native call then it can only be JITted from platform code */
5881 if ((icall || pinvk) && method->klass && method->klass->image) {
5882 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5883 MonoException *ex = icall ? mono_get_exception_security () :
5884 mono_get_exception_method_access ();
5885 emit_throw_exception (cfg, ex);
5892 if (header->code_size == 0)
5895 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5900 if (cfg->method == method)
5901 mono_debug_init_method (cfg, bblock, breakpoint_id);
5903 for (n = 0; n < header->num_locals; ++n) {
5904 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5909 /* We force the vtable variable here for all shared methods
5910 for the possibility that they might show up in a stack
5911 trace where their exact instantiation is needed. */
5912 if (cfg->generic_sharing_context && method == cfg->method) {
5913 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5914 mini_method_get_context (method)->method_inst ||
5915 method->klass->valuetype) {
5916 mono_get_vtable_var (cfg);
5918 /* FIXME: Is there a better way to do this?
5919 We need the variable live for the duration
5920 of the whole method. */
5921 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5925 /* add a check for this != NULL to inlined methods */
5926 if (is_virtual_call) {
5929 NEW_ARGLOAD (cfg, arg_ins, 0);
5930 MONO_ADD_INS (cfg->cbb, arg_ins);
5931 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5934 skip_dead_blocks = !dont_verify;
5935 if (skip_dead_blocks) {
5936 original_bb = bb = mono_basic_block_split (method, &error);
5937 if (!mono_error_ok (&error)) {
5938 mono_error_cleanup (&error);
5944 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5945 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5948 start_new_bblock = 0;
5951 if (cfg->method == method)
5952 cfg->real_offset = ip - header->code;
5954 cfg->real_offset = inline_offset;
5959 if (start_new_bblock) {
5960 bblock->cil_length = ip - bblock->cil_code;
5961 if (start_new_bblock == 2) {
5962 g_assert (ip == tblock->cil_code);
5964 GET_BBLOCK (cfg, tblock, ip);
5966 bblock->next_bb = tblock;
5969 start_new_bblock = 0;
5970 for (i = 0; i < bblock->in_scount; ++i) {
5971 if (cfg->verbose_level > 3)
5972 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5973 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5977 g_slist_free (class_inits);
5980 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5981 link_bblock (cfg, bblock, tblock);
5982 if (sp != stack_start) {
5983 handle_stack_args (cfg, stack_start, sp - stack_start);
5985 CHECK_UNVERIFIABLE (cfg);
5987 bblock->next_bb = tblock;
5990 for (i = 0; i < bblock->in_scount; ++i) {
5991 if (cfg->verbose_level > 3)
5992 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5993 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5996 g_slist_free (class_inits);
6001 if (skip_dead_blocks) {
6002 int ip_offset = ip - header->code;
6004 if (ip_offset == bb->end)
6008 int op_size = mono_opcode_size (ip, end);
6009 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6011 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6013 if (ip_offset + op_size == bb->end) {
6014 MONO_INST_NEW (cfg, ins, OP_NOP);
6015 MONO_ADD_INS (bblock, ins);
6016 start_new_bblock = 1;
6024 * Sequence points are points where the debugger can place a breakpoint.
6025 * Currently, we generate these automatically at points where the IL
6028 if (seq_points && sp == stack_start) {
6029 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6030 MONO_ADD_INS (cfg->cbb, ins);
6033 bblock->real_offset = cfg->real_offset;
6035 if ((cfg->method == method) && cfg->coverage_info) {
6036 guint32 cil_offset = ip - header->code;
6037 cfg->coverage_info->data [cil_offset].cil_code = ip;
6039 /* TODO: Use an increment here */
6040 #if defined(TARGET_X86)
6041 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6042 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6044 MONO_ADD_INS (cfg->cbb, ins);
6046 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6047 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6051 if (cfg->verbose_level > 3)
6052 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6056 if (cfg->keep_cil_nops)
6057 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6059 MONO_INST_NEW (cfg, ins, OP_NOP);
6061 MONO_ADD_INS (bblock, ins);
6064 if (should_insert_brekpoint (cfg->method))
6065 MONO_INST_NEW (cfg, ins, OP_BREAK);
6067 MONO_INST_NEW (cfg, ins, OP_NOP);
6069 MONO_ADD_INS (bblock, ins);
6075 CHECK_STACK_OVF (1);
6076 n = (*ip)-CEE_LDARG_0;
6078 EMIT_NEW_ARGLOAD (cfg, ins, n);
6086 CHECK_STACK_OVF (1);
6087 n = (*ip)-CEE_LDLOC_0;
6089 EMIT_NEW_LOCLOAD (cfg, ins, n);
6098 n = (*ip)-CEE_STLOC_0;
6101 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6103 emit_stloc_ir (cfg, sp, header, n);
6110 CHECK_STACK_OVF (1);
6113 EMIT_NEW_ARGLOAD (cfg, ins, n);
6119 CHECK_STACK_OVF (1);
6122 NEW_ARGLOADA (cfg, ins, n);
6123 MONO_ADD_INS (cfg->cbb, ins);
6133 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6135 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6140 CHECK_STACK_OVF (1);
6143 EMIT_NEW_LOCLOAD (cfg, ins, n);
6147 case CEE_LDLOCA_S: {
6148 unsigned char *tmp_ip;
6150 CHECK_STACK_OVF (1);
6151 CHECK_LOCAL (ip [1]);
6153 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6159 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6168 CHECK_LOCAL (ip [1]);
6169 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6171 emit_stloc_ir (cfg, sp, header, ip [1]);
6176 CHECK_STACK_OVF (1);
6177 EMIT_NEW_PCONST (cfg, ins, NULL);
6178 ins->type = STACK_OBJ;
6183 CHECK_STACK_OVF (1);
6184 EMIT_NEW_ICONST (cfg, ins, -1);
6197 CHECK_STACK_OVF (1);
6198 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6204 CHECK_STACK_OVF (1);
6206 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6212 CHECK_STACK_OVF (1);
6213 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6219 CHECK_STACK_OVF (1);
6220 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6221 ins->type = STACK_I8;
6222 ins->dreg = alloc_dreg (cfg, STACK_I8);
6224 ins->inst_l = (gint64)read64 (ip);
6225 MONO_ADD_INS (bblock, ins);
6231 gboolean use_aotconst = FALSE;
6233 #ifdef TARGET_POWERPC
6234 /* FIXME: Clean this up */
6235 if (cfg->compile_aot)
6236 use_aotconst = TRUE;
6239 /* FIXME: we should really allocate this only late in the compilation process */
6240 f = mono_domain_alloc (cfg->domain, sizeof (float));
6242 CHECK_STACK_OVF (1);
6248 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6250 dreg = alloc_freg (cfg);
6251 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6252 ins->type = STACK_R8;
6254 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6255 ins->type = STACK_R8;
6256 ins->dreg = alloc_dreg (cfg, STACK_R8);
6258 MONO_ADD_INS (bblock, ins);
6268 gboolean use_aotconst = FALSE;
6270 #ifdef TARGET_POWERPC
6271 /* FIXME: Clean this up */
6272 if (cfg->compile_aot)
6273 use_aotconst = TRUE;
6276 /* FIXME: we should really allocate this only late in the compilation process */
6277 d = mono_domain_alloc (cfg->domain, sizeof (double));
6279 CHECK_STACK_OVF (1);
6285 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6287 dreg = alloc_freg (cfg);
6288 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6289 ins->type = STACK_R8;
6291 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6292 ins->type = STACK_R8;
6293 ins->dreg = alloc_dreg (cfg, STACK_R8);
6295 MONO_ADD_INS (bblock, ins);
6304 MonoInst *temp, *store;
6306 CHECK_STACK_OVF (1);
6310 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6311 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6313 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6316 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6329 if (sp [0]->type == STACK_R8)
6330 /* we need to pop the value from the x86 FP stack */
6331 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6340 if (stack_start != sp)
6342 token = read32 (ip + 1);
6343 /* FIXME: check the signature matches */
6344 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6346 if (!cmethod || mono_loader_get_last_error ())
6349 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6350 GENERIC_SHARING_FAILURE (CEE_JMP);
6352 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6353 CHECK_CFG_EXCEPTION;
6355 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6357 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6360 /* Handle tail calls similarly to calls */
6361 n = fsig->param_count + fsig->hasthis;
6363 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6364 call->method = cmethod;
6365 call->tail_call = TRUE;
6366 call->signature = mono_method_signature (cmethod);
6367 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6368 call->inst.inst_p0 = cmethod;
6369 for (i = 0; i < n; ++i)
6370 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6372 mono_arch_emit_call (cfg, call);
6373 MONO_ADD_INS (bblock, (MonoInst*)call);
6376 for (i = 0; i < num_args; ++i)
6377 /* Prevent arguments from being optimized away */
6378 arg_array [i]->flags |= MONO_INST_VOLATILE;
6380 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6381 ins = (MonoInst*)call;
6382 ins->inst_p0 = cmethod;
6383 MONO_ADD_INS (bblock, ins);
6387 start_new_bblock = 1;
6392 case CEE_CALLVIRT: {
6393 MonoInst *addr = NULL;
6394 MonoMethodSignature *fsig = NULL;
6396 int virtual = *ip == CEE_CALLVIRT;
6397 int calli = *ip == CEE_CALLI;
6398 gboolean pass_imt_from_rgctx = FALSE;
6399 MonoInst *imt_arg = NULL;
6400 gboolean pass_vtable = FALSE;
6401 gboolean pass_mrgctx = FALSE;
6402 MonoInst *vtable_arg = NULL;
6403 gboolean check_this = FALSE;
6404 gboolean supported_tail_call = FALSE;
6407 token = read32 (ip + 1);
6414 if (method->wrapper_type != MONO_WRAPPER_NONE)
6415 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6417 fsig = mono_metadata_parse_signature (image, token);
6419 n = fsig->param_count + fsig->hasthis;
6421 if (method->dynamic && fsig->pinvoke) {
6425 * This is a call through a function pointer using a pinvoke
6426 * signature. Have to create a wrapper and call that instead.
6427 * FIXME: This is very slow, need to create a wrapper at JIT time
6428 * instead based on the signature.
6430 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6431 EMIT_NEW_PCONST (cfg, args [1], fsig);
6433 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6436 MonoMethod *cil_method;
6438 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6439 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6440 cil_method = cmethod;
6441 } else if (constrained_call) {
6442 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6444 * This is needed since get_method_constrained can't find
6445 * the method in klass representing a type var.
6446 * The type var is guaranteed to be a reference type in this
6449 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6450 cil_method = cmethod;
6451 g_assert (!cmethod->klass->valuetype);
6453 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6456 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6457 cil_method = cmethod;
6460 if (!cmethod || mono_loader_get_last_error ())
6462 if (!dont_verify && !cfg->skip_visibility) {
6463 MonoMethod *target_method = cil_method;
6464 if (method->is_inflated) {
6465 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6467 if (!mono_method_can_access_method (method_definition, target_method) &&
6468 !mono_method_can_access_method (method, cil_method))
6469 METHOD_ACCESS_FAILURE;
6472 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6473 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6475 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6476 /* MS.NET seems to silently convert this to a callvirt */
6481 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6482 * converts to a callvirt.
6484 * tests/bug-515884.il is an example of this behavior
6486 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6487 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6488 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6492 if (!cmethod->klass->inited)
6493 if (!mono_class_init (cmethod->klass))
6496 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6497 mini_class_is_system_array (cmethod->klass)) {
6498 array_rank = cmethod->klass->rank;
6499 fsig = mono_method_signature (cmethod);
6501 fsig = mono_method_signature (cmethod);
6506 if (fsig->pinvoke) {
6507 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6508 check_for_pending_exc, FALSE);
6509 fsig = mono_method_signature (wrapper);
6510 } else if (constrained_call) {
6511 fsig = mono_method_signature (cmethod);
6513 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6517 mono_save_token_info (cfg, image, token, cil_method);
6519 n = fsig->param_count + fsig->hasthis;
6521 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6522 if (check_linkdemand (cfg, method, cmethod))
6524 CHECK_CFG_EXCEPTION;
6527 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6528 g_assert_not_reached ();
6531 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6534 if (!cfg->generic_sharing_context && cmethod)
6535 g_assert (!mono_method_check_context_used (cmethod));
6539 //g_assert (!virtual || fsig->hasthis);
6543 if (constrained_call) {
6545 * We have the `constrained.' prefix opcode.
6547 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6549 * The type parameter is instantiated as a valuetype,
6550 * but that type doesn't override the method we're
6551 * calling, so we need to box `this'.
6553 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6554 ins->klass = constrained_call;
6555 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6556 CHECK_CFG_EXCEPTION;
6557 } else if (!constrained_call->valuetype) {
6558 int dreg = alloc_preg (cfg);
6561 * The type parameter is instantiated as a reference
6562 * type. We have a managed pointer on the stack, so
6563 * we need to dereference it here.
6565 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6566 ins->type = STACK_OBJ;
6568 } else if (cmethod->klass->valuetype)
6570 constrained_call = NULL;
6573 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6577 * If the callee is a shared method, then its static cctor
6578 * might not get called after the call was patched.
6580 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6581 emit_generic_class_init (cfg, cmethod->klass);
6582 CHECK_TYPELOAD (cmethod->klass);
6585 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6586 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6587 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6588 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6589 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6592 * Pass vtable iff target method might
6593 * be shared, which means that sharing
6594 * is enabled for its class and its
6595 * context is sharable (and it's not a
6598 if (sharing_enabled && context_sharable &&
6599 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6603 if (cmethod && mini_method_get_context (cmethod) &&
6604 mini_method_get_context (cmethod)->method_inst) {
6605 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6606 MonoGenericContext *context = mini_method_get_context (cmethod);
6607 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6609 g_assert (!pass_vtable);
6611 if (sharing_enabled && context_sharable)
6615 if (cfg->generic_sharing_context && cmethod) {
6616 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6618 context_used = mono_method_check_context_used (cmethod);
6620 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6621 /* Generic method interface
6622 calls are resolved via a
6623 helper function and don't
6625 if (!cmethod_context || !cmethod_context->method_inst)
6626 pass_imt_from_rgctx = TRUE;
6630 * If a shared method calls another
6631 * shared method then the caller must
6632 * have a generic sharing context
6633 * because the magic trampoline
6634 * requires it. FIXME: We shouldn't
6635 * have to force the vtable/mrgctx
6636 * variable here. Instead there
6637 * should be a flag in the cfg to
6638 * request a generic sharing context.
6641 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6642 mono_get_vtable_var (cfg);
6647 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6649 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6651 CHECK_TYPELOAD (cmethod->klass);
6652 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6657 g_assert (!vtable_arg);
6659 if (!cfg->compile_aot) {
6661 * emit_get_rgctx_method () calls mono_class_vtable () so check
6662 * for type load errors before.
6664 mono_class_setup_vtable (cmethod->klass);
6665 CHECK_TYPELOAD (cmethod->klass);
6668 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6670 /* !marshalbyref is needed to properly handle generic methods + remoting */
6671 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6672 MONO_METHOD_IS_FINAL (cmethod)) &&
6673 !cmethod->klass->marshalbyref) {
6680 if (pass_imt_from_rgctx) {
6681 g_assert (!pass_vtable);
6684 imt_arg = emit_get_rgctx_method (cfg, context_used,
6685 cmethod, MONO_RGCTX_INFO_METHOD);
6689 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6691 /* Calling virtual generic methods */
6692 if (cmethod && virtual &&
6693 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6694 !(MONO_METHOD_IS_FINAL (cmethod) &&
6695 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6696 mono_method_signature (cmethod)->generic_param_count) {
6697 MonoInst *this_temp, *this_arg_temp, *store;
6698 MonoInst *iargs [4];
6700 g_assert (mono_method_signature (cmethod)->is_inflated);
6702 /* Prevent inlining of methods that contain indirect calls */
6705 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6706 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6707 g_assert (!imt_arg);
6709 g_assert (cmethod->is_inflated);
6710 imt_arg = emit_get_rgctx_method (cfg, context_used,
6711 cmethod, MONO_RGCTX_INFO_METHOD);
6712 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6716 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6717 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6718 MONO_ADD_INS (bblock, store);
6720 /* FIXME: This should be a managed pointer */
6721 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6723 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6724 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6725 cmethod, MONO_RGCTX_INFO_METHOD);
6726 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6727 addr = mono_emit_jit_icall (cfg,
6728 mono_helper_compile_generic_method, iargs);
6730 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6732 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6735 if (!MONO_TYPE_IS_VOID (fsig->ret))
6736 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6738 CHECK_CFG_EXCEPTION;
6745 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6746 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6748 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6752 /* FIXME: runtime generic context pointer for jumps? */
6753 /* FIXME: handle this for generic sharing eventually */
6754 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6757 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6760 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6761 /* Handle tail calls similarly to calls */
6762 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6764 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6765 call->tail_call = TRUE;
6766 call->method = cmethod;
6767 call->signature = mono_method_signature (cmethod);
6770 * We implement tail calls by storing the actual arguments into the
6771 * argument variables, then emitting a CEE_JMP.
6773 for (i = 0; i < n; ++i) {
6774 /* Prevent argument from being register allocated */
6775 arg_array [i]->flags |= MONO_INST_VOLATILE;
6776 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6780 ins = (MonoInst*)call;
6781 ins->inst_p0 = cmethod;
6782 ins->inst_p1 = arg_array [0];
6783 MONO_ADD_INS (bblock, ins);
6784 link_bblock (cfg, bblock, end_bblock);
6785 start_new_bblock = 1;
6787 CHECK_CFG_EXCEPTION;
6789 /* skip CEE_RET as well */
6796 * Implement a workaround for the inherent races involved in locking:
6802 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6803 * try block, the Exit () won't be executed, see:
6804 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6805 * To work around this, we extend such try blocks to include the last x bytes
6806 * of the Monitor.Enter () call.
6808 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6809 MonoBasicBlock *tbb;
6811 GET_BBLOCK (cfg, tbb, ip + 5);
6813 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6814 * from Monitor.Enter like ArgumentNullException.
6816 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6817 /* Mark this bblock as needing to be extended */
6818 tbb->extend_try_block = TRUE;
6822 /* Conversion to a JIT intrinsic */
6823 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6825 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6826 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6831 CHECK_CFG_EXCEPTION;
6839 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6840 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6841 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
6842 !g_list_find (dont_inline, cmethod)) {
6844 gboolean always = FALSE;
6846 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6847 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6848 /* Prevent inlining of methods that call wrappers */
6850 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6854 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
6856 cfg->real_offset += 5;
6859 if (!MONO_TYPE_IS_VOID (fsig->ret))
6860 /* *sp is already set by inline_method */
6863 inline_costs += costs;
6869 inline_costs += 10 * num_calls++;
6871 /* Tail recursion elimination */
6872 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6873 gboolean has_vtargs = FALSE;
6876 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6879 /* keep it simple */
6880 for (i = fsig->param_count - 1; i >= 0; i--) {
6881 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6886 for (i = 0; i < n; ++i)
6887 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6888 MONO_INST_NEW (cfg, ins, OP_BR);
6889 MONO_ADD_INS (bblock, ins);
6890 tblock = start_bblock->out_bb [0];
6891 link_bblock (cfg, bblock, tblock);
6892 ins->inst_target_bb = tblock;
6893 start_new_bblock = 1;
6895 /* skip the CEE_RET, too */
6896 if (ip_in_bb (cfg, bblock, ip + 5))
6906 /* Generic sharing */
6907 /* FIXME: only do this for generic methods if
6908 they are not shared! */
6909 if (context_used && !imt_arg && !array_rank &&
6910 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6911 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6912 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6913 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6916 g_assert (cfg->generic_sharing_context && cmethod);
6920 * We are compiling a call to a
6921 * generic method from shared code,
6922 * which means that we have to look up
6923 * the method in the rgctx and do an
6926 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6929 /* Indirect calls */
6931 g_assert (!imt_arg);
6933 if (*ip == CEE_CALL)
6934 g_assert (context_used);
6935 else if (*ip == CEE_CALLI)
6936 g_assert (!vtable_arg);
6938 /* FIXME: what the hell is this??? */
6939 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6940 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6942 /* Prevent inlining of methods with indirect calls */
6947 int rgctx_reg = mono_alloc_preg (cfg);
6949 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6950 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6951 call = (MonoCallInst*)ins;
6952 set_rgctx_arg (cfg, call, rgctx_reg, vtable_arg);
6954 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6956 * Instead of emitting an indirect call, emit a direct call
6957 * with the contents of the aotconst as the patch info.
6959 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6961 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6962 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6965 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6968 if (!MONO_TYPE_IS_VOID (fsig->ret))
6969 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6971 CHECK_CFG_EXCEPTION;
6982 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6983 if (sp [fsig->param_count]->type == STACK_OBJ) {
6984 MonoInst *iargs [2];
6987 iargs [1] = sp [fsig->param_count];
6989 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6992 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6993 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6994 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6995 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6997 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7000 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7001 if (!cmethod->klass->element_class->valuetype && !readonly)
7002 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7003 CHECK_TYPELOAD (cmethod->klass);
7006 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7009 g_assert_not_reached ();
7012 CHECK_CFG_EXCEPTION;
7019 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7021 if (!MONO_TYPE_IS_VOID (fsig->ret))
7022 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7024 CHECK_CFG_EXCEPTION;
7034 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7036 } else if (imt_arg) {
7037 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
7039 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
7042 if (!MONO_TYPE_IS_VOID (fsig->ret))
7043 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7045 CHECK_CFG_EXCEPTION;
7052 if (cfg->method != method) {
7053 /* return from inlined method */
7055 * If in_count == 0, that means the ret is unreachable due to
7056 * being preceeded by a throw. In that case, inline_method () will
7057 * handle setting the return value
7058 * (test case: test_0_inline_throw ()).
7060 if (return_var && cfg->cbb->in_count) {
7064 //g_assert (returnvar != -1);
7065 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7066 cfg->ret_var_set = TRUE;
7070 MonoType *ret_type = mono_method_signature (method)->ret;
7074 * Place a seq point here too even through the IL stack is not
7075 * empty, so a step over on
7078 * will work correctly.
7080 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7081 MONO_ADD_INS (cfg->cbb, ins);
7084 g_assert (!return_var);
7087 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7090 if (!cfg->vret_addr) {
7093 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7095 EMIT_NEW_RETLOADA (cfg, ret_addr);
7097 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7098 ins->klass = mono_class_from_mono_type (ret_type);
7101 #ifdef MONO_ARCH_SOFT_FLOAT
7102 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7103 MonoInst *iargs [1];
7107 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7108 mono_arch_emit_setret (cfg, method, conv);
7110 mono_arch_emit_setret (cfg, method, *sp);
7113 mono_arch_emit_setret (cfg, method, *sp);
7118 if (sp != stack_start)
7120 MONO_INST_NEW (cfg, ins, OP_BR);
7122 ins->inst_target_bb = end_bblock;
7123 MONO_ADD_INS (bblock, ins);
7124 link_bblock (cfg, bblock, end_bblock);
7125 start_new_bblock = 1;
7129 MONO_INST_NEW (cfg, ins, OP_BR);
7131 target = ip + 1 + (signed char)(*ip);
7133 GET_BBLOCK (cfg, tblock, target);
7134 link_bblock (cfg, bblock, tblock);
7135 ins->inst_target_bb = tblock;
7136 if (sp != stack_start) {
7137 handle_stack_args (cfg, stack_start, sp - stack_start);
7139 CHECK_UNVERIFIABLE (cfg);
7141 MONO_ADD_INS (bblock, ins);
7142 start_new_bblock = 1;
7143 inline_costs += BRANCH_COST;
7157 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7159 target = ip + 1 + *(signed char*)ip;
7165 inline_costs += BRANCH_COST;
7169 MONO_INST_NEW (cfg, ins, OP_BR);
7172 target = ip + 4 + (gint32)read32(ip);
7174 GET_BBLOCK (cfg, tblock, target);
7175 link_bblock (cfg, bblock, tblock);
7176 ins->inst_target_bb = tblock;
7177 if (sp != stack_start) {
7178 handle_stack_args (cfg, stack_start, sp - stack_start);
7180 CHECK_UNVERIFIABLE (cfg);
7183 MONO_ADD_INS (bblock, ins);
7185 start_new_bblock = 1;
7186 inline_costs += BRANCH_COST;
7193 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7194 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7195 guint32 opsize = is_short ? 1 : 4;
7197 CHECK_OPSIZE (opsize);
7199 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7202 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7207 GET_BBLOCK (cfg, tblock, target);
7208 link_bblock (cfg, bblock, tblock);
7209 GET_BBLOCK (cfg, tblock, ip);
7210 link_bblock (cfg, bblock, tblock);
7212 if (sp != stack_start) {
7213 handle_stack_args (cfg, stack_start, sp - stack_start);
7214 CHECK_UNVERIFIABLE (cfg);
7217 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7218 cmp->sreg1 = sp [0]->dreg;
7219 type_from_op (cmp, sp [0], NULL);
7222 #if SIZEOF_REGISTER == 4
7223 if (cmp->opcode == OP_LCOMPARE_IMM) {
7224 /* Convert it to OP_LCOMPARE */
7225 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7226 ins->type = STACK_I8;
7227 ins->dreg = alloc_dreg (cfg, STACK_I8);
7229 MONO_ADD_INS (bblock, ins);
7230 cmp->opcode = OP_LCOMPARE;
7231 cmp->sreg2 = ins->dreg;
7234 MONO_ADD_INS (bblock, cmp);
7236 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7237 type_from_op (ins, sp [0], NULL);
7238 MONO_ADD_INS (bblock, ins);
7239 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7240 GET_BBLOCK (cfg, tblock, target);
7241 ins->inst_true_bb = tblock;
7242 GET_BBLOCK (cfg, tblock, ip);
7243 ins->inst_false_bb = tblock;
7244 start_new_bblock = 2;
7247 inline_costs += BRANCH_COST;
7262 MONO_INST_NEW (cfg, ins, *ip);
7264 target = ip + 4 + (gint32)read32(ip);
7270 inline_costs += BRANCH_COST;
7274 MonoBasicBlock **targets;
7275 MonoBasicBlock *default_bblock;
7276 MonoJumpInfoBBTable *table;
7277 int offset_reg = alloc_preg (cfg);
7278 int target_reg = alloc_preg (cfg);
7279 int table_reg = alloc_preg (cfg);
7280 int sum_reg = alloc_preg (cfg);
7281 gboolean use_op_switch;
7285 n = read32 (ip + 1);
7288 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7292 CHECK_OPSIZE (n * sizeof (guint32));
7293 target = ip + n * sizeof (guint32);
7295 GET_BBLOCK (cfg, default_bblock, target);
7296 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7298 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7299 for (i = 0; i < n; ++i) {
7300 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7301 targets [i] = tblock;
7302 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7306 if (sp != stack_start) {
7308 * Link the current bb with the targets as well, so handle_stack_args
7309 * will set their in_stack correctly.
7311 link_bblock (cfg, bblock, default_bblock);
7312 for (i = 0; i < n; ++i)
7313 link_bblock (cfg, bblock, targets [i]);
7315 handle_stack_args (cfg, stack_start, sp - stack_start);
7317 CHECK_UNVERIFIABLE (cfg);
7320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7321 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7324 for (i = 0; i < n; ++i)
7325 link_bblock (cfg, bblock, targets [i]);
7327 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7328 table->table = targets;
7329 table->table_size = n;
7331 use_op_switch = FALSE;
7333 /* ARM implements SWITCH statements differently */
7334 /* FIXME: Make it use the generic implementation */
7335 if (!cfg->compile_aot)
7336 use_op_switch = TRUE;
7339 if (COMPILE_LLVM (cfg))
7340 use_op_switch = TRUE;
7342 cfg->cbb->has_jump_table = 1;
7344 if (use_op_switch) {
7345 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7346 ins->sreg1 = src1->dreg;
7347 ins->inst_p0 = table;
7348 ins->inst_many_bb = targets;
7349 ins->klass = GUINT_TO_POINTER (n);
7350 MONO_ADD_INS (cfg->cbb, ins);
7352 if (sizeof (gpointer) == 8)
7353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7357 #if SIZEOF_REGISTER == 8
7358 /* The upper word might not be zero, and we add it to a 64 bit address later */
7359 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7362 if (cfg->compile_aot) {
7363 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7365 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7366 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7367 ins->inst_p0 = table;
7368 ins->dreg = table_reg;
7369 MONO_ADD_INS (cfg->cbb, ins);
7372 /* FIXME: Use load_memindex */
7373 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7374 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7375 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7377 start_new_bblock = 1;
7378 inline_costs += (BRANCH_COST * 2);
7398 dreg = alloc_freg (cfg);
7401 dreg = alloc_lreg (cfg);
7404 dreg = alloc_preg (cfg);
7407 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7408 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7409 ins->flags |= ins_flag;
7411 MONO_ADD_INS (bblock, ins);
7426 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7427 ins->flags |= ins_flag;
7429 MONO_ADD_INS (bblock, ins);
7431 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7432 emit_write_barrier (cfg, sp [0], sp [1], -1);
7441 MONO_INST_NEW (cfg, ins, (*ip));
7443 ins->sreg1 = sp [0]->dreg;
7444 ins->sreg2 = sp [1]->dreg;
7445 type_from_op (ins, sp [0], sp [1]);
7447 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7449 /* Use the immediate opcodes if possible */
7450 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7451 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7452 if (imm_opcode != -1) {
7453 ins->opcode = imm_opcode;
7454 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7457 sp [1]->opcode = OP_NOP;
7461 MONO_ADD_INS ((cfg)->cbb, (ins));
7463 *sp++ = mono_decompose_opcode (cfg, ins);
7480 MONO_INST_NEW (cfg, ins, (*ip));
7482 ins->sreg1 = sp [0]->dreg;
7483 ins->sreg2 = sp [1]->dreg;
7484 type_from_op (ins, sp [0], sp [1]);
7486 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7487 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7489 /* FIXME: Pass opcode to is_inst_imm */
7491 /* Use the immediate opcodes if possible */
7492 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7495 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7496 if (imm_opcode != -1) {
7497 ins->opcode = imm_opcode;
7498 if (sp [1]->opcode == OP_I8CONST) {
7499 #if SIZEOF_REGISTER == 8
7500 ins->inst_imm = sp [1]->inst_l;
7502 ins->inst_ls_word = sp [1]->inst_ls_word;
7503 ins->inst_ms_word = sp [1]->inst_ms_word;
7507 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7510 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7511 if (sp [1]->next == NULL)
7512 sp [1]->opcode = OP_NOP;
7515 MONO_ADD_INS ((cfg)->cbb, (ins));
7517 *sp++ = mono_decompose_opcode (cfg, ins);
7530 case CEE_CONV_OVF_I8:
7531 case CEE_CONV_OVF_U8:
7535 /* Special case this earlier so we have long constants in the IR */
7536 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7537 int data = sp [-1]->inst_c0;
7538 sp [-1]->opcode = OP_I8CONST;
7539 sp [-1]->type = STACK_I8;
7540 #if SIZEOF_REGISTER == 8
7541 if ((*ip) == CEE_CONV_U8)
7542 sp [-1]->inst_c0 = (guint32)data;
7544 sp [-1]->inst_c0 = data;
7546 sp [-1]->inst_ls_word = data;
7547 if ((*ip) == CEE_CONV_U8)
7548 sp [-1]->inst_ms_word = 0;
7550 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7552 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7559 case CEE_CONV_OVF_I4:
7560 case CEE_CONV_OVF_I1:
7561 case CEE_CONV_OVF_I2:
7562 case CEE_CONV_OVF_I:
7563 case CEE_CONV_OVF_U:
7566 if (sp [-1]->type == STACK_R8) {
7567 ADD_UNOP (CEE_CONV_OVF_I8);
7574 case CEE_CONV_OVF_U1:
7575 case CEE_CONV_OVF_U2:
7576 case CEE_CONV_OVF_U4:
7579 if (sp [-1]->type == STACK_R8) {
7580 ADD_UNOP (CEE_CONV_OVF_U8);
7587 case CEE_CONV_OVF_I1_UN:
7588 case CEE_CONV_OVF_I2_UN:
7589 case CEE_CONV_OVF_I4_UN:
7590 case CEE_CONV_OVF_I8_UN:
7591 case CEE_CONV_OVF_U1_UN:
7592 case CEE_CONV_OVF_U2_UN:
7593 case CEE_CONV_OVF_U4_UN:
7594 case CEE_CONV_OVF_U8_UN:
7595 case CEE_CONV_OVF_I_UN:
7596 case CEE_CONV_OVF_U_UN:
7603 CHECK_CFG_EXCEPTION;
7607 case CEE_ADD_OVF_UN:
7609 case CEE_MUL_OVF_UN:
7611 case CEE_SUB_OVF_UN:
7619 token = read32 (ip + 1);
7620 klass = mini_get_class (method, token, generic_context);
7621 CHECK_TYPELOAD (klass);
7623 if (generic_class_is_reference_type (cfg, klass)) {
7624 MonoInst *store, *load;
7625 int dreg = alloc_preg (cfg);
7627 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7628 load->flags |= ins_flag;
7629 MONO_ADD_INS (cfg->cbb, load);
7631 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7632 store->flags |= ins_flag;
7633 MONO_ADD_INS (cfg->cbb, store);
7635 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7636 emit_write_barrier (cfg, sp [0], sp [1], -1);
7638 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7650 token = read32 (ip + 1);
7651 klass = mini_get_class (method, token, generic_context);
7652 CHECK_TYPELOAD (klass);
7654 /* Optimize the common ldobj+stloc combination */
7664 loc_index = ip [5] - CEE_STLOC_0;
7671 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7672 CHECK_LOCAL (loc_index);
7674 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7675 ins->dreg = cfg->locals [loc_index]->dreg;
7681 /* Optimize the ldobj+stobj combination */
7682 /* The reference case ends up being a load+store anyway */
7683 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7688 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7695 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7704 CHECK_STACK_OVF (1);
7706 n = read32 (ip + 1);
7708 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7709 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7710 ins->type = STACK_OBJ;
7713 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7714 MonoInst *iargs [1];
7716 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7717 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7719 if (cfg->opt & MONO_OPT_SHARED) {
7720 MonoInst *iargs [3];
7722 if (cfg->compile_aot) {
7723 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7725 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7726 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7727 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7728 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7729 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7731 if (bblock->out_of_line) {
7732 MonoInst *iargs [2];
7734 if (image == mono_defaults.corlib) {
7736 * Avoid relocations in AOT and save some space by using a
7737 * version of helper_ldstr specialized to mscorlib.
7739 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7740 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7742 /* Avoid creating the string object */
7743 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7744 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7745 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7749 if (cfg->compile_aot) {
7750 NEW_LDSTRCONST (cfg, ins, image, n);
7752 MONO_ADD_INS (bblock, ins);
7755 NEW_PCONST (cfg, ins, NULL);
7756 ins->type = STACK_OBJ;
7757 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7759 OUT_OF_MEMORY_FAILURE;
7762 MONO_ADD_INS (bblock, ins);
7771 MonoInst *iargs [2];
7772 MonoMethodSignature *fsig;
7775 MonoInst *vtable_arg = NULL;
7778 token = read32 (ip + 1);
7779 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7780 if (!cmethod || mono_loader_get_last_error ())
7782 fsig = mono_method_get_signature (cmethod, image, token);
7786 mono_save_token_info (cfg, image, token, cmethod);
7788 if (!mono_class_init (cmethod->klass))
7791 if (cfg->generic_sharing_context)
7792 context_used = mono_method_check_context_used (cmethod);
7794 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7795 if (check_linkdemand (cfg, method, cmethod))
7797 CHECK_CFG_EXCEPTION;
7798 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7799 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7802 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
7803 emit_generic_class_init (cfg, cmethod->klass);
7804 CHECK_TYPELOAD (cmethod->klass);
7807 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7808 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7809 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7810 mono_class_vtable (cfg->domain, cmethod->klass);
7811 CHECK_TYPELOAD (cmethod->klass);
7813 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7814 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7817 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7818 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7820 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7822 CHECK_TYPELOAD (cmethod->klass);
7823 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7828 n = fsig->param_count;
7832 * Generate smaller code for the common newobj <exception> instruction in
7833 * argument checking code.
7835 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7836 is_exception_class (cmethod->klass) && n <= 2 &&
7837 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7838 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7839 MonoInst *iargs [3];
7841 g_assert (!vtable_arg);
7845 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7848 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7852 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7857 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7860 g_assert_not_reached ();
7868 /* move the args to allow room for 'this' in the first position */
7874 /* check_call_signature () requires sp[0] to be set */
7875 this_ins.type = STACK_OBJ;
7877 if (check_call_signature (cfg, fsig, sp))
7882 if (mini_class_is_system_array (cmethod->klass)) {
7883 g_assert (!vtable_arg);
7885 *sp = emit_get_rgctx_method (cfg, context_used,
7886 cmethod, MONO_RGCTX_INFO_METHOD);
7888 /* Avoid varargs in the common case */
7889 if (fsig->param_count == 1)
7890 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7891 else if (fsig->param_count == 2)
7892 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7893 else if (fsig->param_count == 3)
7894 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7896 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7897 } else if (cmethod->string_ctor) {
7898 g_assert (!context_used);
7899 g_assert (!vtable_arg);
7900 /* we simply pass a null pointer */
7901 EMIT_NEW_PCONST (cfg, *sp, NULL);
7902 /* now call the string ctor */
7903 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7905 MonoInst* callvirt_this_arg = NULL;
7907 if (cmethod->klass->valuetype) {
7908 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7909 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7910 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7915 * The code generated by mini_emit_virtual_call () expects
7916 * iargs [0] to be a boxed instance, but luckily the vcall
7917 * will be transformed into a normal call there.
7919 } else if (context_used) {
7920 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7923 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7925 CHECK_TYPELOAD (cmethod->klass);
7928 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7929 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7930 * As a workaround, we call class cctors before allocating objects.
7932 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7933 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7934 if (cfg->verbose_level > 2)
7935 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7936 class_inits = g_slist_prepend (class_inits, vtable);
7939 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7942 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7945 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7947 /* Now call the actual ctor */
7948 /* Avoid virtual calls to ctors if possible */
7949 if (cmethod->klass->marshalbyref)
7950 callvirt_this_arg = sp [0];
7953 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7954 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
7955 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7960 CHECK_CFG_EXCEPTION;
7961 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7962 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7963 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7964 !g_list_find (dont_inline, cmethod)) {
7967 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7968 cfg->real_offset += 5;
7971 inline_costs += costs - 5;
7974 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7976 } else if (context_used &&
7977 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7978 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7979 MonoInst *cmethod_addr;
7981 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7982 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7984 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7987 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7988 callvirt_this_arg, NULL, vtable_arg);
7992 if (alloc == NULL) {
7994 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7995 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8009 token = read32 (ip + 1);
8010 klass = mini_get_class (method, token, generic_context);
8011 CHECK_TYPELOAD (klass);
8012 if (sp [0]->type != STACK_OBJ)
8015 if (cfg->generic_sharing_context)
8016 context_used = mono_class_check_context_used (klass);
8018 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8019 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8026 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8029 /*FIXME AOT support*/
8030 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8032 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8033 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8036 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8037 MonoMethod *mono_castclass;
8038 MonoInst *iargs [1];
8041 mono_castclass = mono_marshal_get_castclass (klass);
8044 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8045 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8046 CHECK_CFG_EXCEPTION;
8047 g_assert (costs > 0);
8050 cfg->real_offset += 5;
8055 inline_costs += costs;
8058 ins = handle_castclass (cfg, klass, *sp, context_used);
8059 CHECK_CFG_EXCEPTION;
8069 token = read32 (ip + 1);
8070 klass = mini_get_class (method, token, generic_context);
8071 CHECK_TYPELOAD (klass);
8072 if (sp [0]->type != STACK_OBJ)
8075 if (cfg->generic_sharing_context)
8076 context_used = mono_class_check_context_used (klass);
8078 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8079 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8086 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8089 /*FIXME AOT support*/
8090 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8092 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8095 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8096 MonoMethod *mono_isinst;
8097 MonoInst *iargs [1];
8100 mono_isinst = mono_marshal_get_isinst (klass);
8103 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8104 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8105 CHECK_CFG_EXCEPTION;
8106 g_assert (costs > 0);
8109 cfg->real_offset += 5;
8114 inline_costs += costs;
8117 ins = handle_isinst (cfg, klass, *sp, context_used);
8118 CHECK_CFG_EXCEPTION;
8125 case CEE_UNBOX_ANY: {
8129 token = read32 (ip + 1);
8130 klass = mini_get_class (method, token, generic_context);
8131 CHECK_TYPELOAD (klass);
8133 mono_save_token_info (cfg, image, token, klass);
8135 if (cfg->generic_sharing_context)
8136 context_used = mono_class_check_context_used (klass);
8138 if (generic_class_is_reference_type (cfg, klass)) {
8139 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8140 if (!context_used && mini_class_has_reference_variant_generic_argument (klass, context_used)) {
8141 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8148 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8151 /*FIXME AOT support*/
8152 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8154 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8155 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8158 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8159 MonoMethod *mono_castclass;
8160 MonoInst *iargs [1];
8163 mono_castclass = mono_marshal_get_castclass (klass);
8166 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8167 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8168 CHECK_CFG_EXCEPTION;
8169 g_assert (costs > 0);
8172 cfg->real_offset += 5;
8176 inline_costs += costs;
8178 ins = handle_castclass (cfg, klass, *sp, context_used);
8179 CHECK_CFG_EXCEPTION;
8187 if (mono_class_is_nullable (klass)) {
8188 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8195 ins = handle_unbox (cfg, klass, sp, context_used);
8201 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8214 token = read32 (ip + 1);
8215 klass = mini_get_class (method, token, generic_context);
8216 CHECK_TYPELOAD (klass);
8218 mono_save_token_info (cfg, image, token, klass);
8220 if (cfg->generic_sharing_context)
8221 context_used = mono_class_check_context_used (klass);
8223 if (generic_class_is_reference_type (cfg, klass)) {
8229 if (klass == mono_defaults.void_class)
8231 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8233 /* frequent check in generic code: box (struct), brtrue */
8235 // FIXME: LLVM can't handle the inconsistent bb linking
8236 if (!mono_class_is_nullable (klass) &&
8237 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8238 (ip [5] == CEE_BRTRUE ||
8239 ip [5] == CEE_BRTRUE_S ||
8240 ip [5] == CEE_BRFALSE ||
8241 ip [5] == CEE_BRFALSE_S)) {
8242 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8244 MonoBasicBlock *true_bb, *false_bb;
8248 if (cfg->verbose_level > 3) {
8249 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8250 printf ("<box+brtrue opt>\n");
8258 target = ip + 1 + (signed char)(*ip);
8265 target = ip + 4 + (gint)(read32 (ip));
8269 g_assert_not_reached ();
8273 * We need to link both bblocks, since it is needed for handling stack
8274 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8275 * Branching to only one of them would lead to inconsistencies, so
8276 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8278 GET_BBLOCK (cfg, true_bb, target);
8279 GET_BBLOCK (cfg, false_bb, ip);
8281 mono_link_bblock (cfg, cfg->cbb, true_bb);
8282 mono_link_bblock (cfg, cfg->cbb, false_bb);
8284 if (sp != stack_start) {
8285 handle_stack_args (cfg, stack_start, sp - stack_start);
8287 CHECK_UNVERIFIABLE (cfg);
8290 if (COMPILE_LLVM (cfg)) {
8291 dreg = alloc_ireg (cfg);
8292 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8293 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8295 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8297 /* The JIT can't eliminate the iconst+compare */
8298 MONO_INST_NEW (cfg, ins, OP_BR);
8299 ins->inst_target_bb = is_true ? true_bb : false_bb;
8300 MONO_ADD_INS (cfg->cbb, ins);
8303 start_new_bblock = 1;
8307 *sp++ = handle_box (cfg, val, klass, context_used);
8309 CHECK_CFG_EXCEPTION;
8318 token = read32 (ip + 1);
8319 klass = mini_get_class (method, token, generic_context);
8320 CHECK_TYPELOAD (klass);
8322 mono_save_token_info (cfg, image, token, klass);
8324 if (cfg->generic_sharing_context)
8325 context_used = mono_class_check_context_used (klass);
8327 if (mono_class_is_nullable (klass)) {
8330 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8331 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8335 ins = handle_unbox (cfg, klass, sp, context_used);
8345 MonoClassField *field;
8349 if (*ip == CEE_STFLD) {
8356 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8358 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8361 token = read32 (ip + 1);
8362 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8363 field = mono_method_get_wrapper_data (method, token);
8364 klass = field->parent;
8367 field = mono_field_from_token (image, token, &klass, generic_context);
8371 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8372 FIELD_ACCESS_FAILURE;
8373 mono_class_init (klass);
8375 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8376 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8377 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8378 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8381 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8382 if (*ip == CEE_STFLD) {
8383 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8385 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8386 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8387 MonoInst *iargs [5];
8390 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8391 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8392 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8396 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8397 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8398 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8399 CHECK_CFG_EXCEPTION;
8400 g_assert (costs > 0);
8402 cfg->real_offset += 5;
8405 inline_costs += costs;
8407 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8412 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8414 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8415 if (sp [0]->opcode != OP_LDADDR)
8416 store->flags |= MONO_INST_FAULT;
8418 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8419 /* insert call to write barrier */
8423 dreg = alloc_preg (cfg);
8424 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8425 emit_write_barrier (cfg, ptr, sp [1], -1);
8428 store->flags |= ins_flag;
8435 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8436 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8437 MonoInst *iargs [4];
8440 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8441 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8442 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8443 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8444 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8445 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8446 CHECK_CFG_EXCEPTION;
8448 g_assert (costs > 0);
8450 cfg->real_offset += 5;
8454 inline_costs += costs;
8456 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8460 if (sp [0]->type == STACK_VTYPE) {
8463 /* Have to compute the address of the variable */
8465 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8467 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8469 g_assert (var->klass == klass);
8471 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8475 if (*ip == CEE_LDFLDA) {
8476 if (sp [0]->type == STACK_OBJ) {
8477 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8478 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8481 dreg = alloc_preg (cfg);
8483 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8484 ins->klass = mono_class_from_mono_type (field->type);
8485 ins->type = STACK_MP;
8490 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8492 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8493 load->flags |= ins_flag;
8494 if (sp [0]->opcode != OP_LDADDR)
8495 load->flags |= MONO_INST_FAULT;
8506 MonoClassField *field;
8507 gpointer addr = NULL;
8508 gboolean is_special_static;
8511 token = read32 (ip + 1);
8513 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8514 field = mono_method_get_wrapper_data (method, token);
8515 klass = field->parent;
8518 field = mono_field_from_token (image, token, &klass, generic_context);
8521 mono_class_init (klass);
8522 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8523 FIELD_ACCESS_FAILURE;
8525 /* if the class is Critical then transparent code cannot access it's fields */
8526 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8527 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8530 * We can only support shared generic static
8531 * field access on architectures where the
8532 * trampoline code has been extended to handle
8533 * the generic class init.
8535 #ifndef MONO_ARCH_VTABLE_REG
8536 GENERIC_SHARING_FAILURE (*ip);
8539 if (cfg->generic_sharing_context)
8540 context_used = mono_class_check_context_used (klass);
8542 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8544 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8545 * to be called here.
8547 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8548 mono_class_vtable (cfg->domain, klass);
8549 CHECK_TYPELOAD (klass);
8551 mono_domain_lock (cfg->domain);
8552 if (cfg->domain->special_static_fields)
8553 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8554 mono_domain_unlock (cfg->domain);
8556 is_special_static = mono_class_field_is_special_static (field);
8558 /* Generate IR to compute the field address */
8559 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8561 * Fast access to TLS data
8562 * Inline version of get_thread_static_data () in
8566 int idx, static_data_reg, array_reg, dreg;
8567 MonoInst *thread_ins;
8569 // offset &= 0x7fffffff;
8570 // idx = (offset >> 24) - 1;
8571 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8573 thread_ins = mono_get_thread_intrinsic (cfg);
8574 MONO_ADD_INS (cfg->cbb, thread_ins);
8575 static_data_reg = alloc_ireg (cfg);
8576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8578 if (cfg->compile_aot) {
8579 int offset_reg, offset2_reg, idx_reg;
8581 /* For TLS variables, this will return the TLS offset */
8582 EMIT_NEW_SFLDACONST (cfg, ins, field);
8583 offset_reg = ins->dreg;
8584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8585 idx_reg = alloc_ireg (cfg);
8586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8589 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8590 array_reg = alloc_ireg (cfg);
8591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8592 offset2_reg = alloc_ireg (cfg);
8593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8594 dreg = alloc_ireg (cfg);
8595 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8597 offset = (gsize)addr & 0x7fffffff;
8598 idx = (offset >> 24) - 1;
8600 array_reg = alloc_ireg (cfg);
8601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8602 dreg = alloc_ireg (cfg);
8603 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8605 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8606 (cfg->compile_aot && is_special_static) ||
8607 (context_used && is_special_static)) {
8608 MonoInst *iargs [2];
8610 g_assert (field->parent);
8611 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8613 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8614 field, MONO_RGCTX_INFO_CLASS_FIELD);
8616 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8618 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8619 } else if (context_used) {
8620 MonoInst *static_data;
8623 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8624 method->klass->name_space, method->klass->name, method->name,
8625 depth, field->offset);
8628 if (mono_class_needs_cctor_run (klass, method))
8629 emit_generic_class_init (cfg, klass);
8632 * The pointer we're computing here is
8634 * super_info.static_data + field->offset
8636 static_data = emit_get_rgctx_klass (cfg, context_used,
8637 klass, MONO_RGCTX_INFO_STATIC_DATA);
8639 if (field->offset == 0) {
8642 int addr_reg = mono_alloc_preg (cfg);
8643 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8645 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8646 MonoInst *iargs [2];
8648 g_assert (field->parent);
8649 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8650 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8651 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8653 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8655 CHECK_TYPELOAD (klass);
8657 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8658 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8659 if (cfg->verbose_level > 2)
8660 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8661 class_inits = g_slist_prepend (class_inits, vtable);
8663 if (cfg->run_cctors) {
8665 /* This makes so that inline cannot trigger */
8666 /* .cctors: too many apps depend on them */
8667 /* running with a specific order... */
8668 if (! vtable->initialized)
8670 ex = mono_runtime_class_init_full (vtable, FALSE);
8672 set_exception_object (cfg, ex);
8673 goto exception_exit;
8677 addr = (char*)vtable->data + field->offset;
8679 if (cfg->compile_aot)
8680 EMIT_NEW_SFLDACONST (cfg, ins, field);
8682 EMIT_NEW_PCONST (cfg, ins, addr);
8684 MonoInst *iargs [1];
8685 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8686 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8690 /* Generate IR to do the actual load/store operation */
8692 if (*ip == CEE_LDSFLDA) {
8693 ins->klass = mono_class_from_mono_type (field->type);
8694 ins->type = STACK_PTR;
8696 } else if (*ip == CEE_STSFLD) {
8701 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8702 store->flags |= ins_flag;
8704 gboolean is_const = FALSE;
8705 MonoVTable *vtable = NULL;
8707 if (!context_used) {
8708 vtable = mono_class_vtable (cfg->domain, klass);
8709 CHECK_TYPELOAD (klass);
8711 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8712 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8713 gpointer addr = (char*)vtable->data + field->offset;
8714 int ro_type = field->type->type;
8715 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8716 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8718 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8721 case MONO_TYPE_BOOLEAN:
8723 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8727 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8730 case MONO_TYPE_CHAR:
8732 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8736 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8741 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8745 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8751 case MONO_TYPE_FNPTR:
8752 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8753 type_to_eval_stack_type ((cfg), field->type, *sp);
8756 case MONO_TYPE_STRING:
8757 case MONO_TYPE_OBJECT:
8758 case MONO_TYPE_CLASS:
8759 case MONO_TYPE_SZARRAY:
8760 case MONO_TYPE_ARRAY:
8761 if (!mono_gc_is_moving ()) {
8762 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8763 type_to_eval_stack_type ((cfg), field->type, *sp);
8771 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8776 case MONO_TYPE_VALUETYPE:
8786 CHECK_STACK_OVF (1);
8788 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8789 load->flags |= ins_flag;
8802 token = read32 (ip + 1);
8803 klass = mini_get_class (method, token, generic_context);
8804 CHECK_TYPELOAD (klass);
8805 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8806 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8807 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8808 generic_class_is_reference_type (cfg, klass)) {
8809 /* insert call to write barrier */
8810 emit_write_barrier (cfg, sp [0], sp [1], -1);
8822 const char *data_ptr;
8824 guint32 field_token;
8830 token = read32 (ip + 1);
8832 klass = mini_get_class (method, token, generic_context);
8833 CHECK_TYPELOAD (klass);
8835 if (cfg->generic_sharing_context)
8836 context_used = mono_class_check_context_used (klass);
8838 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8839 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8840 ins->sreg1 = sp [0]->dreg;
8841 ins->type = STACK_I4;
8842 ins->dreg = alloc_ireg (cfg);
8843 MONO_ADD_INS (cfg->cbb, ins);
8844 *sp = mono_decompose_opcode (cfg, ins);
8849 MonoClass *array_class = mono_array_class_get (klass, 1);
8850 /* FIXME: we cannot get a managed
8851 allocator because we can't get the
8852 open generic class's vtable. We
8853 have the same problem in
8854 handle_alloc(). This
8855 needs to be solved so that we can
8856 have managed allocs of shared
8859 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8860 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8862 MonoMethod *managed_alloc = NULL;
8864 /* FIXME: Decompose later to help abcrem */
8867 args [0] = emit_get_rgctx_klass (cfg, context_used,
8868 array_class, MONO_RGCTX_INFO_VTABLE);
8873 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8875 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8877 if (cfg->opt & MONO_OPT_SHARED) {
8878 /* Decompose now to avoid problems with references to the domainvar */
8879 MonoInst *iargs [3];
8881 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8882 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8885 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8887 /* Decompose later since it is needed by abcrem */
8888 MonoClass *array_type = mono_array_class_get (klass, 1);
8889 mono_class_vtable (cfg->domain, array_type);
8890 CHECK_TYPELOAD (array_type);
8892 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8893 ins->dreg = alloc_preg (cfg);
8894 ins->sreg1 = sp [0]->dreg;
8895 ins->inst_newa_class = klass;
8896 ins->type = STACK_OBJ;
8898 MONO_ADD_INS (cfg->cbb, ins);
8899 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8900 cfg->cbb->has_array_access = TRUE;
8902 /* Needed so mono_emit_load_get_addr () gets called */
8903 mono_get_got_var (cfg);
8913 * we inline/optimize the initialization sequence if possible.
8914 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8915 * for small sizes open code the memcpy
8916 * ensure the rva field is big enough
8918 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8919 MonoMethod *memcpy_method = get_memcpy_method ();
8920 MonoInst *iargs [3];
8921 int add_reg = alloc_preg (cfg);
8923 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8924 if (cfg->compile_aot) {
8925 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8927 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8929 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8930 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8939 if (sp [0]->type != STACK_OBJ)
8942 dreg = alloc_preg (cfg);
8943 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8944 ins->dreg = alloc_preg (cfg);
8945 ins->sreg1 = sp [0]->dreg;
8946 ins->type = STACK_I4;
8947 /* This flag will be inherited by the decomposition */
8948 ins->flags |= MONO_INST_FAULT;
8949 MONO_ADD_INS (cfg->cbb, ins);
8950 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8951 cfg->cbb->has_array_access = TRUE;
8959 if (sp [0]->type != STACK_OBJ)
8962 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8964 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8965 CHECK_TYPELOAD (klass);
8966 /* we need to make sure that this array is exactly the type it needs
8967 * to be for correctness. the wrappers are lax with their usage
8968 * so we need to ignore them here
8970 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8971 MonoClass *array_class = mono_array_class_get (klass, 1);
8972 mini_emit_check_array_type (cfg, sp [0], array_class);
8973 CHECK_TYPELOAD (array_class);
8977 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8992 case CEE_LDELEM_REF: {
8998 if (*ip == CEE_LDELEM) {
9000 token = read32 (ip + 1);
9001 klass = mini_get_class (method, token, generic_context);
9002 CHECK_TYPELOAD (klass);
9003 mono_class_init (klass);
9006 klass = array_access_to_klass (*ip);
9008 if (sp [0]->type != STACK_OBJ)
9011 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9013 if (sp [1]->opcode == OP_ICONST) {
9014 int array_reg = sp [0]->dreg;
9015 int index_reg = sp [1]->dreg;
9016 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9018 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9019 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9021 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9022 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9025 if (*ip == CEE_LDELEM)
9038 case CEE_STELEM_REF:
9045 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9047 if (*ip == CEE_STELEM) {
9049 token = read32 (ip + 1);
9050 klass = mini_get_class (method, token, generic_context);
9051 CHECK_TYPELOAD (klass);
9052 mono_class_init (klass);
9055 klass = array_access_to_klass (*ip);
9057 if (sp [0]->type != STACK_OBJ)
9060 /* storing a NULL doesn't need any of the complex checks in stelemref */
9061 if (generic_class_is_reference_type (cfg, klass) &&
9062 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9063 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9064 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9065 MonoInst *iargs [3];
9068 mono_class_setup_vtable (obj_array);
9069 g_assert (helper->slot);
9071 if (sp [0]->type != STACK_OBJ)
9073 if (sp [2]->type != STACK_OBJ)
9080 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9082 if (sp [1]->opcode == OP_ICONST) {
9083 int array_reg = sp [0]->dreg;
9084 int index_reg = sp [1]->dreg;
9085 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9087 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9088 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9090 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9091 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9095 if (*ip == CEE_STELEM)
9102 case CEE_CKFINITE: {
9106 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9107 ins->sreg1 = sp [0]->dreg;
9108 ins->dreg = alloc_freg (cfg);
9109 ins->type = STACK_R8;
9110 MONO_ADD_INS (bblock, ins);
9112 *sp++ = mono_decompose_opcode (cfg, ins);
9117 case CEE_REFANYVAL: {
9118 MonoInst *src_var, *src;
9120 int klass_reg = alloc_preg (cfg);
9121 int dreg = alloc_preg (cfg);
9124 MONO_INST_NEW (cfg, ins, *ip);
9127 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9128 CHECK_TYPELOAD (klass);
9129 mono_class_init (klass);
9131 if (cfg->generic_sharing_context)
9132 context_used = mono_class_check_context_used (klass);
9135 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9137 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9138 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9139 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9142 MonoInst *klass_ins;
9144 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9145 klass, MONO_RGCTX_INFO_KLASS);
9148 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9149 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9151 mini_emit_class_check (cfg, klass_reg, klass);
9153 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9154 ins->type = STACK_MP;
9159 case CEE_MKREFANY: {
9160 MonoInst *loc, *addr;
9163 MONO_INST_NEW (cfg, ins, *ip);
9166 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9167 CHECK_TYPELOAD (klass);
9168 mono_class_init (klass);
9170 if (cfg->generic_sharing_context)
9171 context_used = mono_class_check_context_used (klass);
9173 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9174 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9177 MonoInst *const_ins;
9178 int type_reg = alloc_preg (cfg);
9180 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9181 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9182 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9183 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9184 } else if (cfg->compile_aot) {
9185 int const_reg = alloc_preg (cfg);
9186 int type_reg = alloc_preg (cfg);
9188 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9189 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9190 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9191 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9193 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9194 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9196 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9198 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9199 ins->type = STACK_VTYPE;
9200 ins->klass = mono_defaults.typed_reference_class;
9207 MonoClass *handle_class;
9209 CHECK_STACK_OVF (1);
9212 n = read32 (ip + 1);
9214 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9215 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9216 handle = mono_method_get_wrapper_data (method, n);
9217 handle_class = mono_method_get_wrapper_data (method, n + 1);
9218 if (handle_class == mono_defaults.typehandle_class)
9219 handle = &((MonoClass*)handle)->byval_arg;
9222 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9226 mono_class_init (handle_class);
9227 if (cfg->generic_sharing_context) {
9228 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9229 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9230 /* This case handles ldtoken
9231 of an open type, like for
9234 } else if (handle_class == mono_defaults.typehandle_class) {
9235 /* If we get a MONO_TYPE_CLASS
9236 then we need to provide the
9238 instantiation of it. */
9239 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9242 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9243 } else if (handle_class == mono_defaults.fieldhandle_class)
9244 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9245 else if (handle_class == mono_defaults.methodhandle_class)
9246 context_used = mono_method_check_context_used (handle);
9248 g_assert_not_reached ();
9251 if ((cfg->opt & MONO_OPT_SHARED) &&
9252 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9253 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9254 MonoInst *addr, *vtvar, *iargs [3];
9255 int method_context_used;
9257 if (cfg->generic_sharing_context)
9258 method_context_used = mono_method_check_context_used (method);
9260 method_context_used = 0;
9262 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9264 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9265 EMIT_NEW_ICONST (cfg, iargs [1], n);
9266 if (method_context_used) {
9267 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9268 method, MONO_RGCTX_INFO_METHOD);
9269 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9271 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9272 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9274 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9276 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9278 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9280 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9281 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9282 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9283 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9284 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9285 MonoClass *tclass = mono_class_from_mono_type (handle);
9287 mono_class_init (tclass);
9289 ins = emit_get_rgctx_klass (cfg, context_used,
9290 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9291 } else if (cfg->compile_aot) {
9292 if (method->wrapper_type) {
9293 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9294 /* Special case for static synchronized wrappers */
9295 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9297 /* FIXME: n is not a normal token */
9298 cfg->disable_aot = TRUE;
9299 EMIT_NEW_PCONST (cfg, ins, NULL);
9302 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9305 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9307 ins->type = STACK_OBJ;
9308 ins->klass = cmethod->klass;
9311 MonoInst *addr, *vtvar;
9313 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9316 if (handle_class == mono_defaults.typehandle_class) {
9317 ins = emit_get_rgctx_klass (cfg, context_used,
9318 mono_class_from_mono_type (handle),
9319 MONO_RGCTX_INFO_TYPE);
9320 } else if (handle_class == mono_defaults.methodhandle_class) {
9321 ins = emit_get_rgctx_method (cfg, context_used,
9322 handle, MONO_RGCTX_INFO_METHOD);
9323 } else if (handle_class == mono_defaults.fieldhandle_class) {
9324 ins = emit_get_rgctx_field (cfg, context_used,
9325 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9327 g_assert_not_reached ();
9329 } else if (cfg->compile_aot) {
9330 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9332 EMIT_NEW_PCONST (cfg, ins, handle);
9334 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9335 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9336 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9346 MONO_INST_NEW (cfg, ins, OP_THROW);
9348 ins->sreg1 = sp [0]->dreg;
9350 bblock->out_of_line = TRUE;
9351 MONO_ADD_INS (bblock, ins);
9352 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9353 MONO_ADD_INS (bblock, ins);
9356 link_bblock (cfg, bblock, end_bblock);
9357 start_new_bblock = 1;
9359 case CEE_ENDFINALLY:
9360 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9361 MONO_ADD_INS (bblock, ins);
9363 start_new_bblock = 1;
9366 * Control will leave the method so empty the stack, otherwise
9367 * the next basic block will start with a nonempty stack.
9369 while (sp != stack_start) {
9377 if (*ip == CEE_LEAVE) {
9379 target = ip + 5 + (gint32)read32(ip + 1);
9382 target = ip + 2 + (signed char)(ip [1]);
9385 /* empty the stack */
9386 while (sp != stack_start) {
9391 * If this leave statement is in a catch block, check for a
9392 * pending exception, and rethrow it if necessary.
9393 * We avoid doing this in runtime invoke wrappers, since those are called
9394 * by native code which excepts the wrapper to catch all exceptions.
9396 for (i = 0; i < header->num_clauses; ++i) {
9397 MonoExceptionClause *clause = &header->clauses [i];
9400 * Use <= in the final comparison to handle clauses with multiple
9401 * leave statements, like in bug #78024.
9402 * The ordering of the exception clauses guarantees that we find the
9405 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9407 MonoBasicBlock *dont_throw;
9412 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9415 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9417 NEW_BBLOCK (cfg, dont_throw);
9420 * Currently, we always rethrow the abort exception, despite the
9421 * fact that this is not correct. See thread6.cs for an example.
9422 * But propagating the abort exception is more important than
9423 * getting the sematics right.
9425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9426 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9427 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9429 MONO_START_BB (cfg, dont_throw);
9434 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9436 MonoExceptionClause *clause;
9438 for (tmp = handlers; tmp; tmp = tmp->next) {
9440 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9442 link_bblock (cfg, bblock, tblock);
9443 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9444 ins->inst_target_bb = tblock;
9445 ins->inst_eh_block = clause;
9446 MONO_ADD_INS (bblock, ins);
9447 bblock->has_call_handler = 1;
9448 if (COMPILE_LLVM (cfg)) {
9449 MonoBasicBlock *target_bb;
9452 * Link the finally bblock with the target, since it will
9453 * conceptually branch there.
9454 * FIXME: Have to link the bblock containing the endfinally.
9456 GET_BBLOCK (cfg, target_bb, target);
9457 link_bblock (cfg, tblock, target_bb);
9460 g_list_free (handlers);
9463 MONO_INST_NEW (cfg, ins, OP_BR);
9464 MONO_ADD_INS (bblock, ins);
9465 GET_BBLOCK (cfg, tblock, target);
9466 link_bblock (cfg, bblock, tblock);
9467 ins->inst_target_bb = tblock;
9468 start_new_bblock = 1;
9470 if (*ip == CEE_LEAVE)
9479 * Mono specific opcodes
9481 case MONO_CUSTOM_PREFIX: {
9483 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9487 case CEE_MONO_ICALL: {
9489 MonoJitICallInfo *info;
9491 token = read32 (ip + 2);
9492 func = mono_method_get_wrapper_data (method, token);
9493 info = mono_find_jit_icall_by_addr (func);
9496 CHECK_STACK (info->sig->param_count);
9497 sp -= info->sig->param_count;
9499 ins = mono_emit_jit_icall (cfg, info->func, sp);
9500 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9504 inline_costs += 10 * num_calls++;
9508 case CEE_MONO_LDPTR: {
9511 CHECK_STACK_OVF (1);
9513 token = read32 (ip + 2);
9515 ptr = mono_method_get_wrapper_data (method, token);
9516 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9517 MonoJitICallInfo *callinfo;
9518 const char *icall_name;
9520 icall_name = method->name + strlen ("__icall_wrapper_");
9521 g_assert (icall_name);
9522 callinfo = mono_find_jit_icall_by_name (icall_name);
9523 g_assert (callinfo);
9525 if (ptr == callinfo->func) {
9526 /* Will be transformed into an AOTCONST later */
9527 EMIT_NEW_PCONST (cfg, ins, ptr);
9533 /* FIXME: Generalize this */
9534 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9535 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9540 EMIT_NEW_PCONST (cfg, ins, ptr);
9543 inline_costs += 10 * num_calls++;
9544 /* Can't embed random pointers into AOT code */
9545 cfg->disable_aot = 1;
9548 case CEE_MONO_ICALL_ADDR: {
9549 MonoMethod *cmethod;
9552 CHECK_STACK_OVF (1);
9554 token = read32 (ip + 2);
9556 cmethod = mono_method_get_wrapper_data (method, token);
9558 if (cfg->compile_aot) {
9559 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9561 ptr = mono_lookup_internal_call (cmethod);
9563 EMIT_NEW_PCONST (cfg, ins, ptr);
9569 case CEE_MONO_VTADDR: {
9570 MonoInst *src_var, *src;
9576 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9577 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9582 case CEE_MONO_NEWOBJ: {
9583 MonoInst *iargs [2];
9585 CHECK_STACK_OVF (1);
9587 token = read32 (ip + 2);
9588 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9589 mono_class_init (klass);
9590 NEW_DOMAINCONST (cfg, iargs [0]);
9591 MONO_ADD_INS (cfg->cbb, iargs [0]);
9592 NEW_CLASSCONST (cfg, iargs [1], klass);
9593 MONO_ADD_INS (cfg->cbb, iargs [1]);
9594 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9596 inline_costs += 10 * num_calls++;
9599 case CEE_MONO_OBJADDR:
9602 MONO_INST_NEW (cfg, ins, OP_MOVE);
9603 ins->dreg = alloc_preg (cfg);
9604 ins->sreg1 = sp [0]->dreg;
9605 ins->type = STACK_MP;
9606 MONO_ADD_INS (cfg->cbb, ins);
9610 case CEE_MONO_LDNATIVEOBJ:
9612 * Similar to LDOBJ, but instead load the unmanaged
9613 * representation of the vtype to the stack.
9618 token = read32 (ip + 2);
9619 klass = mono_method_get_wrapper_data (method, token);
9620 g_assert (klass->valuetype);
9621 mono_class_init (klass);
9624 MonoInst *src, *dest, *temp;
9627 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9628 temp->backend.is_pinvoke = 1;
9629 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9630 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9632 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9633 dest->type = STACK_VTYPE;
9634 dest->klass = klass;
9640 case CEE_MONO_RETOBJ: {
9642 * Same as RET, but return the native representation of a vtype
9645 g_assert (cfg->ret);
9646 g_assert (mono_method_signature (method)->pinvoke);
9651 token = read32 (ip + 2);
9652 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9654 if (!cfg->vret_addr) {
9655 g_assert (cfg->ret_var_is_local);
9657 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9659 EMIT_NEW_RETLOADA (cfg, ins);
9661 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9663 if (sp != stack_start)
9666 MONO_INST_NEW (cfg, ins, OP_BR);
9667 ins->inst_target_bb = end_bblock;
9668 MONO_ADD_INS (bblock, ins);
9669 link_bblock (cfg, bblock, end_bblock);
9670 start_new_bblock = 1;
9674 case CEE_MONO_CISINST:
9675 case CEE_MONO_CCASTCLASS: {
9680 token = read32 (ip + 2);
9681 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9682 if (ip [1] == CEE_MONO_CISINST)
9683 ins = handle_cisinst (cfg, klass, sp [0]);
9685 ins = handle_ccastclass (cfg, klass, sp [0]);
9691 case CEE_MONO_SAVE_LMF:
9692 case CEE_MONO_RESTORE_LMF:
9693 #ifdef MONO_ARCH_HAVE_LMF_OPS
9694 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9695 MONO_ADD_INS (bblock, ins);
9696 cfg->need_lmf_area = TRUE;
9700 case CEE_MONO_CLASSCONST:
9701 CHECK_STACK_OVF (1);
9703 token = read32 (ip + 2);
9704 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9707 inline_costs += 10 * num_calls++;
9709 case CEE_MONO_NOT_TAKEN:
9710 bblock->out_of_line = TRUE;
9714 CHECK_STACK_OVF (1);
9716 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9717 ins->dreg = alloc_preg (cfg);
9718 ins->inst_offset = (gint32)read32 (ip + 2);
9719 ins->type = STACK_PTR;
9720 MONO_ADD_INS (bblock, ins);
9724 case CEE_MONO_DYN_CALL: {
9727 /* It would be easier to call a trampoline, but that would put an
9728 * extra frame on the stack, confusing exception handling. So
9729 * implement it inline using an opcode for now.
9732 if (!cfg->dyn_call_var) {
9733 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9734 /* prevent it from being register allocated */
9735 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9738 /* Has to use a call inst since it local regalloc expects it */
9739 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9740 ins = (MonoInst*)call;
9742 ins->sreg1 = sp [0]->dreg;
9743 ins->sreg2 = sp [1]->dreg;
9744 MONO_ADD_INS (bblock, ins);
9746 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9747 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9751 inline_costs += 10 * num_calls++;
9756 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9766 /* somewhat similar to LDTOKEN */
9767 MonoInst *addr, *vtvar;
9768 CHECK_STACK_OVF (1);
9769 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9771 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9772 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9774 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9775 ins->type = STACK_VTYPE;
9776 ins->klass = mono_defaults.argumenthandle_class;
9789 * The following transforms:
9790 * CEE_CEQ into OP_CEQ
9791 * CEE_CGT into OP_CGT
9792 * CEE_CGT_UN into OP_CGT_UN
9793 * CEE_CLT into OP_CLT
9794 * CEE_CLT_UN into OP_CLT_UN
9796 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9798 MONO_INST_NEW (cfg, ins, cmp->opcode);
9800 cmp->sreg1 = sp [0]->dreg;
9801 cmp->sreg2 = sp [1]->dreg;
9802 type_from_op (cmp, sp [0], sp [1]);
9804 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9805 cmp->opcode = OP_LCOMPARE;
9806 else if (sp [0]->type == STACK_R8)
9807 cmp->opcode = OP_FCOMPARE;
9809 cmp->opcode = OP_ICOMPARE;
9810 MONO_ADD_INS (bblock, cmp);
9811 ins->type = STACK_I4;
9812 ins->dreg = alloc_dreg (cfg, ins->type);
9813 type_from_op (ins, sp [0], sp [1]);
9815 if (cmp->opcode == OP_FCOMPARE) {
9817 * The backends expect the fceq opcodes to do the
9820 cmp->opcode = OP_NOP;
9821 ins->sreg1 = cmp->sreg1;
9822 ins->sreg2 = cmp->sreg2;
9824 MONO_ADD_INS (bblock, ins);
9831 MonoMethod *cil_method;
9832 gboolean needs_static_rgctx_invoke;
9834 CHECK_STACK_OVF (1);
9836 n = read32 (ip + 2);
9837 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9838 if (!cmethod || mono_loader_get_last_error ())
9840 mono_class_init (cmethod->klass);
9842 mono_save_token_info (cfg, image, n, cmethod);
9844 if (cfg->generic_sharing_context)
9845 context_used = mono_method_check_context_used (cmethod);
9847 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9849 cil_method = cmethod;
9850 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9851 METHOD_ACCESS_FAILURE;
9853 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9854 if (check_linkdemand (cfg, method, cmethod))
9856 CHECK_CFG_EXCEPTION;
9857 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9858 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9862 * Optimize the common case of ldftn+delegate creation
9864 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9865 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9866 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9867 MonoInst *target_ins;
9869 int invoke_context_used = 0;
9871 invoke = mono_get_delegate_invoke (ctor_method->klass);
9872 if (!invoke || !mono_method_signature (invoke))
9875 if (cfg->generic_sharing_context)
9876 invoke_context_used = mono_method_check_context_used (invoke);
9878 target_ins = sp [-1];
9880 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
9881 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
9882 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
9883 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
9884 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
9888 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
9889 /* FIXME: SGEN support */
9890 if (invoke_context_used == 0) {
9892 if (cfg->verbose_level > 3)
9893 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9895 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9896 CHECK_CFG_EXCEPTION;
9905 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9906 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9910 inline_costs += 10 * num_calls++;
9913 case CEE_LDVIRTFTN: {
9918 n = read32 (ip + 2);
9919 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9920 if (!cmethod || mono_loader_get_last_error ())
9922 mono_class_init (cmethod->klass);
9924 if (cfg->generic_sharing_context)
9925 context_used = mono_method_check_context_used (cmethod);
9927 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9928 if (check_linkdemand (cfg, method, cmethod))
9930 CHECK_CFG_EXCEPTION;
9931 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9932 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9938 args [1] = emit_get_rgctx_method (cfg, context_used,
9939 cmethod, MONO_RGCTX_INFO_METHOD);
9942 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9944 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9947 inline_costs += 10 * num_calls++;
9951 CHECK_STACK_OVF (1);
9953 n = read16 (ip + 2);
9955 EMIT_NEW_ARGLOAD (cfg, ins, n);
9960 CHECK_STACK_OVF (1);
9962 n = read16 (ip + 2);
9964 NEW_ARGLOADA (cfg, ins, n);
9965 MONO_ADD_INS (cfg->cbb, ins);
9973 n = read16 (ip + 2);
9975 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9977 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9981 CHECK_STACK_OVF (1);
9983 n = read16 (ip + 2);
9985 EMIT_NEW_LOCLOAD (cfg, ins, n);
9990 unsigned char *tmp_ip;
9991 CHECK_STACK_OVF (1);
9993 n = read16 (ip + 2);
9996 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10002 EMIT_NEW_LOCLOADA (cfg, ins, n);
10011 n = read16 (ip + 2);
10013 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10015 emit_stloc_ir (cfg, sp, header, n);
10022 if (sp != stack_start)
10024 if (cfg->method != method)
10026 * Inlining this into a loop in a parent could lead to
10027 * stack overflows which is different behavior than the
10028 * non-inlined case, thus disable inlining in this case.
10030 goto inline_failure;
10032 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10033 ins->dreg = alloc_preg (cfg);
10034 ins->sreg1 = sp [0]->dreg;
10035 ins->type = STACK_PTR;
10036 MONO_ADD_INS (cfg->cbb, ins);
10038 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10040 ins->flags |= MONO_INST_INIT;
10045 case CEE_ENDFILTER: {
10046 MonoExceptionClause *clause, *nearest;
10047 int cc, nearest_num;
10051 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10053 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10054 ins->sreg1 = (*sp)->dreg;
10055 MONO_ADD_INS (bblock, ins);
10056 start_new_bblock = 1;
10061 for (cc = 0; cc < header->num_clauses; ++cc) {
10062 clause = &header->clauses [cc];
10063 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10064 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10065 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10070 g_assert (nearest);
10071 if ((ip - header->code) != nearest->handler_offset)
10076 case CEE_UNALIGNED_:
10077 ins_flag |= MONO_INST_UNALIGNED;
10078 /* FIXME: record alignment? we can assume 1 for now */
10082 case CEE_VOLATILE_:
10083 ins_flag |= MONO_INST_VOLATILE;
10087 ins_flag |= MONO_INST_TAILCALL;
10088 cfg->flags |= MONO_CFG_HAS_TAIL;
10089 /* Can't inline tail calls at this time */
10090 inline_costs += 100000;
10097 token = read32 (ip + 2);
10098 klass = mini_get_class (method, token, generic_context);
10099 CHECK_TYPELOAD (klass);
10100 if (generic_class_is_reference_type (cfg, klass))
10101 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10103 mini_emit_initobj (cfg, *sp, NULL, klass);
10107 case CEE_CONSTRAINED_:
10109 token = read32 (ip + 2);
10110 if (method->wrapper_type != MONO_WRAPPER_NONE)
10111 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10113 constrained_call = mono_class_get_full (image, token, generic_context);
10114 CHECK_TYPELOAD (constrained_call);
10118 case CEE_INITBLK: {
10119 MonoInst *iargs [3];
10123 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10124 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10125 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10126 /* emit_memset only works when val == 0 */
10127 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10129 iargs [0] = sp [0];
10130 iargs [1] = sp [1];
10131 iargs [2] = sp [2];
10132 if (ip [1] == CEE_CPBLK) {
10133 MonoMethod *memcpy_method = get_memcpy_method ();
10134 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10136 MonoMethod *memset_method = get_memset_method ();
10137 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10147 ins_flag |= MONO_INST_NOTYPECHECK;
10149 ins_flag |= MONO_INST_NORANGECHECK;
10150 /* we ignore the no-nullcheck for now since we
10151 * really do it explicitly only when doing callvirt->call
10155 case CEE_RETHROW: {
10157 int handler_offset = -1;
10159 for (i = 0; i < header->num_clauses; ++i) {
10160 MonoExceptionClause *clause = &header->clauses [i];
10161 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10162 handler_offset = clause->handler_offset;
10167 bblock->flags |= BB_EXCEPTION_UNSAFE;
10169 g_assert (handler_offset != -1);
10171 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10172 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10173 ins->sreg1 = load->dreg;
10174 MONO_ADD_INS (bblock, ins);
10176 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10177 MONO_ADD_INS (bblock, ins);
10180 link_bblock (cfg, bblock, end_bblock);
10181 start_new_bblock = 1;
10189 CHECK_STACK_OVF (1);
10191 token = read32 (ip + 2);
10192 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
10193 MonoType *type = mono_type_create_from_typespec (image, token);
10194 token = mono_type_size (type, &ialign);
10196 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10197 CHECK_TYPELOAD (klass);
10198 mono_class_init (klass);
10199 token = mono_class_value_size (klass, &align);
10201 EMIT_NEW_ICONST (cfg, ins, token);
10206 case CEE_REFANYTYPE: {
10207 MonoInst *src_var, *src;
10213 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10215 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10216 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10217 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10222 case CEE_READONLY_:
10235 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10245 g_warning ("opcode 0x%02x not handled", *ip);
10249 if (start_new_bblock != 1)
10252 bblock->cil_length = ip - bblock->cil_code;
10253 bblock->next_bb = end_bblock;
10255 if (cfg->method == method && cfg->domainvar) {
10257 MonoInst *get_domain;
10259 cfg->cbb = init_localsbb;
10261 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10262 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10265 get_domain->dreg = alloc_preg (cfg);
10266 MONO_ADD_INS (cfg->cbb, get_domain);
10268 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10269 MONO_ADD_INS (cfg->cbb, store);
10272 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10273 if (cfg->compile_aot)
10274 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10275 mono_get_got_var (cfg);
10278 if (cfg->method == method && cfg->got_var)
10279 mono_emit_load_got_addr (cfg);
10284 cfg->cbb = init_localsbb;
10286 for (i = 0; i < header->num_locals; ++i) {
10287 MonoType *ptype = header->locals [i];
10288 int t = ptype->type;
10289 dreg = cfg->locals [i]->dreg;
10291 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10292 t = mono_class_enum_basetype (ptype->data.klass)->type;
10293 if (ptype->byref) {
10294 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10295 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10296 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10297 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10298 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10299 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10300 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10301 ins->type = STACK_R8;
10302 ins->inst_p0 = (void*)&r8_0;
10303 ins->dreg = alloc_dreg (cfg, STACK_R8);
10304 MONO_ADD_INS (init_localsbb, ins);
10305 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10306 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10307 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10308 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10310 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10315 if (cfg->init_ref_vars && cfg->method == method) {
10316 /* Emit initialization for ref vars */
10317 // FIXME: Avoid duplication initialization for IL locals.
10318 for (i = 0; i < cfg->num_varinfo; ++i) {
10319 MonoInst *ins = cfg->varinfo [i];
10321 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10322 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10326 /* Add a sequence point for method entry/exit events */
10328 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10329 MONO_ADD_INS (init_localsbb, ins);
10330 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10331 MONO_ADD_INS (cfg->bb_exit, ins);
10336 if (cfg->method == method) {
10337 MonoBasicBlock *bb;
10338 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10339 bb->region = mono_find_block_region (cfg, bb->real_offset);
10341 mono_create_spvar_for_region (cfg, bb->region);
10342 if (cfg->verbose_level > 2)
10343 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10347 g_slist_free (class_inits);
10348 dont_inline = g_list_remove (dont_inline, method);
10350 if (inline_costs < 0) {
10353 /* Method is too large */
10354 mname = mono_method_full_name (method, TRUE);
10355 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10356 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10358 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10359 mono_basic_block_free (original_bb);
10363 if ((cfg->verbose_level > 2) && (cfg->method == method))
10364 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10366 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10367 mono_basic_block_free (original_bb);
10368 return inline_costs;
10371 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10382 set_exception_type_from_invalid_il (cfg, method, ip);
10386 g_slist_free (class_inits);
10387 mono_basic_block_free (original_bb);
10388 dont_inline = g_list_remove (dont_inline, method);
10389 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10394 store_membase_reg_to_store_membase_imm (int opcode)
10397 case OP_STORE_MEMBASE_REG:
10398 return OP_STORE_MEMBASE_IMM;
10399 case OP_STOREI1_MEMBASE_REG:
10400 return OP_STOREI1_MEMBASE_IMM;
10401 case OP_STOREI2_MEMBASE_REG:
10402 return OP_STOREI2_MEMBASE_IMM;
10403 case OP_STOREI4_MEMBASE_REG:
10404 return OP_STOREI4_MEMBASE_IMM;
10405 case OP_STOREI8_MEMBASE_REG:
10406 return OP_STOREI8_MEMBASE_IMM;
10408 g_assert_not_reached ();
10414 #endif /* DISABLE_JIT */
10417 mono_op_to_op_imm (int opcode)
10421 return OP_IADD_IMM;
10423 return OP_ISUB_IMM;
10425 return OP_IDIV_IMM;
10427 return OP_IDIV_UN_IMM;
10429 return OP_IREM_IMM;
10431 return OP_IREM_UN_IMM;
10433 return OP_IMUL_IMM;
10435 return OP_IAND_IMM;
10439 return OP_IXOR_IMM;
10441 return OP_ISHL_IMM;
10443 return OP_ISHR_IMM;
10445 return OP_ISHR_UN_IMM;
10448 return OP_LADD_IMM;
10450 return OP_LSUB_IMM;
10452 return OP_LAND_IMM;
10456 return OP_LXOR_IMM;
10458 return OP_LSHL_IMM;
10460 return OP_LSHR_IMM;
10462 return OP_LSHR_UN_IMM;
10465 return OP_COMPARE_IMM;
10467 return OP_ICOMPARE_IMM;
10469 return OP_LCOMPARE_IMM;
10471 case OP_STORE_MEMBASE_REG:
10472 return OP_STORE_MEMBASE_IMM;
10473 case OP_STOREI1_MEMBASE_REG:
10474 return OP_STOREI1_MEMBASE_IMM;
10475 case OP_STOREI2_MEMBASE_REG:
10476 return OP_STOREI2_MEMBASE_IMM;
10477 case OP_STOREI4_MEMBASE_REG:
10478 return OP_STOREI4_MEMBASE_IMM;
10480 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10482 return OP_X86_PUSH_IMM;
10483 case OP_X86_COMPARE_MEMBASE_REG:
10484 return OP_X86_COMPARE_MEMBASE_IMM;
10486 #if defined(TARGET_AMD64)
10487 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10488 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10490 case OP_VOIDCALL_REG:
10491 return OP_VOIDCALL;
10499 return OP_LOCALLOC_IMM;
10506 ldind_to_load_membase (int opcode)
10510 return OP_LOADI1_MEMBASE;
10512 return OP_LOADU1_MEMBASE;
10514 return OP_LOADI2_MEMBASE;
10516 return OP_LOADU2_MEMBASE;
10518 return OP_LOADI4_MEMBASE;
10520 return OP_LOADU4_MEMBASE;
10522 return OP_LOAD_MEMBASE;
10523 case CEE_LDIND_REF:
10524 return OP_LOAD_MEMBASE;
10526 return OP_LOADI8_MEMBASE;
10528 return OP_LOADR4_MEMBASE;
10530 return OP_LOADR8_MEMBASE;
10532 g_assert_not_reached ();
10539 stind_to_store_membase (int opcode)
10543 return OP_STOREI1_MEMBASE_REG;
10545 return OP_STOREI2_MEMBASE_REG;
10547 return OP_STOREI4_MEMBASE_REG;
10549 case CEE_STIND_REF:
10550 return OP_STORE_MEMBASE_REG;
10552 return OP_STOREI8_MEMBASE_REG;
10554 return OP_STORER4_MEMBASE_REG;
10556 return OP_STORER8_MEMBASE_REG;
10558 g_assert_not_reached ();
10565 mono_load_membase_to_load_mem (int opcode)
10567 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10568 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10570 case OP_LOAD_MEMBASE:
10571 return OP_LOAD_MEM;
10572 case OP_LOADU1_MEMBASE:
10573 return OP_LOADU1_MEM;
10574 case OP_LOADU2_MEMBASE:
10575 return OP_LOADU2_MEM;
10576 case OP_LOADI4_MEMBASE:
10577 return OP_LOADI4_MEM;
10578 case OP_LOADU4_MEMBASE:
10579 return OP_LOADU4_MEM;
10580 #if SIZEOF_REGISTER == 8
10581 case OP_LOADI8_MEMBASE:
10582 return OP_LOADI8_MEM;
10591 op_to_op_dest_membase (int store_opcode, int opcode)
10593 #if defined(TARGET_X86)
10594 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10599 return OP_X86_ADD_MEMBASE_REG;
10601 return OP_X86_SUB_MEMBASE_REG;
10603 return OP_X86_AND_MEMBASE_REG;
10605 return OP_X86_OR_MEMBASE_REG;
10607 return OP_X86_XOR_MEMBASE_REG;
10610 return OP_X86_ADD_MEMBASE_IMM;
10613 return OP_X86_SUB_MEMBASE_IMM;
10616 return OP_X86_AND_MEMBASE_IMM;
10619 return OP_X86_OR_MEMBASE_IMM;
10622 return OP_X86_XOR_MEMBASE_IMM;
10628 #if defined(TARGET_AMD64)
10629 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10634 return OP_X86_ADD_MEMBASE_REG;
10636 return OP_X86_SUB_MEMBASE_REG;
10638 return OP_X86_AND_MEMBASE_REG;
10640 return OP_X86_OR_MEMBASE_REG;
10642 return OP_X86_XOR_MEMBASE_REG;
10644 return OP_X86_ADD_MEMBASE_IMM;
10646 return OP_X86_SUB_MEMBASE_IMM;
10648 return OP_X86_AND_MEMBASE_IMM;
10650 return OP_X86_OR_MEMBASE_IMM;
10652 return OP_X86_XOR_MEMBASE_IMM;
10654 return OP_AMD64_ADD_MEMBASE_REG;
10656 return OP_AMD64_SUB_MEMBASE_REG;
10658 return OP_AMD64_AND_MEMBASE_REG;
10660 return OP_AMD64_OR_MEMBASE_REG;
10662 return OP_AMD64_XOR_MEMBASE_REG;
10665 return OP_AMD64_ADD_MEMBASE_IMM;
10668 return OP_AMD64_SUB_MEMBASE_IMM;
10671 return OP_AMD64_AND_MEMBASE_IMM;
10674 return OP_AMD64_OR_MEMBASE_IMM;
10677 return OP_AMD64_XOR_MEMBASE_IMM;
10687 op_to_op_store_membase (int store_opcode, int opcode)
10689 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10692 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10693 return OP_X86_SETEQ_MEMBASE;
10695 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10696 return OP_X86_SETNE_MEMBASE;
10704 op_to_op_src1_membase (int load_opcode, int opcode)
10707 /* FIXME: This has sign extension issues */
10709 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10710 return OP_X86_COMPARE_MEMBASE8_IMM;
10713 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10718 return OP_X86_PUSH_MEMBASE;
10719 case OP_COMPARE_IMM:
10720 case OP_ICOMPARE_IMM:
10721 return OP_X86_COMPARE_MEMBASE_IMM;
10724 return OP_X86_COMPARE_MEMBASE_REG;
10728 #ifdef TARGET_AMD64
10729 /* FIXME: This has sign extension issues */
10731 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10732 return OP_X86_COMPARE_MEMBASE8_IMM;
10737 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10738 return OP_X86_PUSH_MEMBASE;
10740 /* FIXME: This only works for 32 bit immediates
10741 case OP_COMPARE_IMM:
10742 case OP_LCOMPARE_IMM:
10743 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10744 return OP_AMD64_COMPARE_MEMBASE_IMM;
10746 case OP_ICOMPARE_IMM:
10747 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10748 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10752 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10753 return OP_AMD64_COMPARE_MEMBASE_REG;
10756 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10757 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10766 op_to_op_src2_membase (int load_opcode, int opcode)
10769 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10775 return OP_X86_COMPARE_REG_MEMBASE;
10777 return OP_X86_ADD_REG_MEMBASE;
10779 return OP_X86_SUB_REG_MEMBASE;
10781 return OP_X86_AND_REG_MEMBASE;
10783 return OP_X86_OR_REG_MEMBASE;
10785 return OP_X86_XOR_REG_MEMBASE;
10789 #ifdef TARGET_AMD64
10790 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
10793 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10795 return OP_X86_ADD_REG_MEMBASE;
10797 return OP_X86_SUB_REG_MEMBASE;
10799 return OP_X86_AND_REG_MEMBASE;
10801 return OP_X86_OR_REG_MEMBASE;
10803 return OP_X86_XOR_REG_MEMBASE;
10805 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
10809 return OP_AMD64_COMPARE_REG_MEMBASE;
10811 return OP_AMD64_ADD_REG_MEMBASE;
10813 return OP_AMD64_SUB_REG_MEMBASE;
10815 return OP_AMD64_AND_REG_MEMBASE;
10817 return OP_AMD64_OR_REG_MEMBASE;
10819 return OP_AMD64_XOR_REG_MEMBASE;
10828 mono_op_to_op_imm_noemul (int opcode)
10831 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10837 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10845 return mono_op_to_op_imm (opcode);
10849 #ifndef DISABLE_JIT
10852 * mono_handle_global_vregs:
10854 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10858 mono_handle_global_vregs (MonoCompile *cfg)
10860 gint32 *vreg_to_bb;
10861 MonoBasicBlock *bb;
10864 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10866 #ifdef MONO_ARCH_SIMD_INTRINSICS
10867 if (cfg->uses_simd_intrinsics)
10868 mono_simd_simplify_indirection (cfg);
10871 /* Find local vregs used in more than one bb */
10872 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10873 MonoInst *ins = bb->code;
10874 int block_num = bb->block_num;
10876 if (cfg->verbose_level > 2)
10877 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10880 for (; ins; ins = ins->next) {
10881 const char *spec = INS_INFO (ins->opcode);
10882 int regtype = 0, regindex;
10885 if (G_UNLIKELY (cfg->verbose_level > 2))
10886 mono_print_ins (ins);
10888 g_assert (ins->opcode >= MONO_CEE_LAST);
10890 for (regindex = 0; regindex < 4; regindex ++) {
10893 if (regindex == 0) {
10894 regtype = spec [MONO_INST_DEST];
10895 if (regtype == ' ')
10898 } else if (regindex == 1) {
10899 regtype = spec [MONO_INST_SRC1];
10900 if (regtype == ' ')
10903 } else if (regindex == 2) {
10904 regtype = spec [MONO_INST_SRC2];
10905 if (regtype == ' ')
10908 } else if (regindex == 3) {
10909 regtype = spec [MONO_INST_SRC3];
10910 if (regtype == ' ')
10915 #if SIZEOF_REGISTER == 4
10916 /* In the LLVM case, the long opcodes are not decomposed */
10917 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10919 * Since some instructions reference the original long vreg,
10920 * and some reference the two component vregs, it is quite hard
10921 * to determine when it needs to be global. So be conservative.
10923 if (!get_vreg_to_inst (cfg, vreg)) {
10924 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10926 if (cfg->verbose_level > 2)
10927 printf ("LONG VREG R%d made global.\n", vreg);
10931 * Make the component vregs volatile since the optimizations can
10932 * get confused otherwise.
10934 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10935 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10939 g_assert (vreg != -1);
10941 prev_bb = vreg_to_bb [vreg];
10942 if (prev_bb == 0) {
10943 /* 0 is a valid block num */
10944 vreg_to_bb [vreg] = block_num + 1;
10945 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10946 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10949 if (!get_vreg_to_inst (cfg, vreg)) {
10950 if (G_UNLIKELY (cfg->verbose_level > 2))
10951 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10955 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10958 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10961 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10964 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10967 g_assert_not_reached ();
10971 /* Flag as having been used in more than one bb */
10972 vreg_to_bb [vreg] = -1;
10978 /* If a variable is used in only one bblock, convert it into a local vreg */
10979 for (i = 0; i < cfg->num_varinfo; i++) {
10980 MonoInst *var = cfg->varinfo [i];
10981 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10983 switch (var->type) {
10989 #if SIZEOF_REGISTER == 8
10992 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10993 /* Enabling this screws up the fp stack on x86 */
10996 /* Arguments are implicitly global */
10997 /* Putting R4 vars into registers doesn't work currently */
10998 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11000 * Make that the variable's liveness interval doesn't contain a call, since
11001 * that would cause the lvreg to be spilled, making the whole optimization
11004 /* This is too slow for JIT compilation */
11006 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11008 int def_index, call_index, ins_index;
11009 gboolean spilled = FALSE;
11014 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11015 const char *spec = INS_INFO (ins->opcode);
11017 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11018 def_index = ins_index;
11020 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11021 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11022 if (call_index > def_index) {
11028 if (MONO_IS_CALL (ins))
11029 call_index = ins_index;
11039 if (G_UNLIKELY (cfg->verbose_level > 2))
11040 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11041 var->flags |= MONO_INST_IS_DEAD;
11042 cfg->vreg_to_inst [var->dreg] = NULL;
11049 * Compress the varinfo and vars tables so the liveness computation is faster and
11050 * takes up less space.
11053 for (i = 0; i < cfg->num_varinfo; ++i) {
11054 MonoInst *var = cfg->varinfo [i];
11055 if (pos < i && cfg->locals_start == i)
11056 cfg->locals_start = pos;
11057 if (!(var->flags & MONO_INST_IS_DEAD)) {
11059 cfg->varinfo [pos] = cfg->varinfo [i];
11060 cfg->varinfo [pos]->inst_c0 = pos;
11061 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11062 cfg->vars [pos].idx = pos;
11063 #if SIZEOF_REGISTER == 4
11064 if (cfg->varinfo [pos]->type == STACK_I8) {
11065 /* Modify the two component vars too */
11068 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11069 var1->inst_c0 = pos;
11070 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11071 var1->inst_c0 = pos;
11078 cfg->num_varinfo = pos;
11079 if (cfg->locals_start > cfg->num_varinfo)
11080 cfg->locals_start = cfg->num_varinfo;
11084 * mono_spill_global_vars:
11086 * Generate spill code for variables which are not allocated to registers,
11087 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11088 * code is generated which could be optimized by the local optimization passes.
11091 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11093 MonoBasicBlock *bb;
11095 int orig_next_vreg;
11096 guint32 *vreg_to_lvreg;
11098 guint32 i, lvregs_len;
11099 gboolean dest_has_lvreg = FALSE;
11100 guint32 stacktypes [128];
11101 MonoInst **live_range_start, **live_range_end;
11102 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11104 *need_local_opts = FALSE;
11106 memset (spec2, 0, sizeof (spec2));
11108 /* FIXME: Move this function to mini.c */
11109 stacktypes ['i'] = STACK_PTR;
11110 stacktypes ['l'] = STACK_I8;
11111 stacktypes ['f'] = STACK_R8;
11112 #ifdef MONO_ARCH_SIMD_INTRINSICS
11113 stacktypes ['x'] = STACK_VTYPE;
11116 #if SIZEOF_REGISTER == 4
11117 /* Create MonoInsts for longs */
11118 for (i = 0; i < cfg->num_varinfo; i++) {
11119 MonoInst *ins = cfg->varinfo [i];
11121 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11122 switch (ins->type) {
11127 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11130 g_assert (ins->opcode == OP_REGOFFSET);
11132 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11134 tree->opcode = OP_REGOFFSET;
11135 tree->inst_basereg = ins->inst_basereg;
11136 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11138 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11140 tree->opcode = OP_REGOFFSET;
11141 tree->inst_basereg = ins->inst_basereg;
11142 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11152 /* FIXME: widening and truncation */
11155 * As an optimization, when a variable allocated to the stack is first loaded into
11156 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11157 * the variable again.
11159 orig_next_vreg = cfg->next_vreg;
11160 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11161 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11165 * These arrays contain the first and last instructions accessing a given
11167 * Since we emit bblocks in the same order we process them here, and we
11168 * don't split live ranges, these will precisely describe the live range of
11169 * the variable, i.e. the instruction range where a valid value can be found
11170 * in the variables location.
11171 * The live range is computed using the liveness info computed by the liveness pass.
11172 * We can't use vmv->range, since that is an abstract live range, and we need
11173 * one which is instruction precise.
11174 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11176 /* FIXME: Only do this if debugging info is requested */
11177 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11178 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11179 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11180 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11182 /* Add spill loads/stores */
11183 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11186 if (cfg->verbose_level > 2)
11187 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11189 /* Clear vreg_to_lvreg array */
11190 for (i = 0; i < lvregs_len; i++)
11191 vreg_to_lvreg [lvregs [i]] = 0;
11195 MONO_BB_FOR_EACH_INS (bb, ins) {
11196 const char *spec = INS_INFO (ins->opcode);
11197 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11198 gboolean store, no_lvreg;
11199 int sregs [MONO_MAX_SRC_REGS];
11201 if (G_UNLIKELY (cfg->verbose_level > 2))
11202 mono_print_ins (ins);
11204 if (ins->opcode == OP_NOP)
11208 * We handle LDADDR here as well, since it can only be decomposed
11209 * when variable addresses are known.
11211 if (ins->opcode == OP_LDADDR) {
11212 MonoInst *var = ins->inst_p0;
11214 if (var->opcode == OP_VTARG_ADDR) {
11215 /* Happens on SPARC/S390 where vtypes are passed by reference */
11216 MonoInst *vtaddr = var->inst_left;
11217 if (vtaddr->opcode == OP_REGVAR) {
11218 ins->opcode = OP_MOVE;
11219 ins->sreg1 = vtaddr->dreg;
11221 else if (var->inst_left->opcode == OP_REGOFFSET) {
11222 ins->opcode = OP_LOAD_MEMBASE;
11223 ins->inst_basereg = vtaddr->inst_basereg;
11224 ins->inst_offset = vtaddr->inst_offset;
11228 g_assert (var->opcode == OP_REGOFFSET);
11230 ins->opcode = OP_ADD_IMM;
11231 ins->sreg1 = var->inst_basereg;
11232 ins->inst_imm = var->inst_offset;
11235 *need_local_opts = TRUE;
11236 spec = INS_INFO (ins->opcode);
11239 if (ins->opcode < MONO_CEE_LAST) {
11240 mono_print_ins (ins);
11241 g_assert_not_reached ();
11245 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11249 if (MONO_IS_STORE_MEMBASE (ins)) {
11250 tmp_reg = ins->dreg;
11251 ins->dreg = ins->sreg2;
11252 ins->sreg2 = tmp_reg;
11255 spec2 [MONO_INST_DEST] = ' ';
11256 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11257 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11258 spec2 [MONO_INST_SRC3] = ' ';
11260 } else if (MONO_IS_STORE_MEMINDEX (ins))
11261 g_assert_not_reached ();
11266 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11267 printf ("\t %.3s %d", spec, ins->dreg);
11268 num_sregs = mono_inst_get_src_registers (ins, sregs);
11269 for (srcindex = 0; srcindex < 3; ++srcindex)
11270 printf (" %d", sregs [srcindex]);
11277 regtype = spec [MONO_INST_DEST];
11278 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11281 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11282 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11283 MonoInst *store_ins;
11285 MonoInst *def_ins = ins;
11286 int dreg = ins->dreg; /* The original vreg */
11288 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11290 if (var->opcode == OP_REGVAR) {
11291 ins->dreg = var->dreg;
11292 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11294 * Instead of emitting a load+store, use a _membase opcode.
11296 g_assert (var->opcode == OP_REGOFFSET);
11297 if (ins->opcode == OP_MOVE) {
11301 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11302 ins->inst_basereg = var->inst_basereg;
11303 ins->inst_offset = var->inst_offset;
11306 spec = INS_INFO (ins->opcode);
11310 g_assert (var->opcode == OP_REGOFFSET);
11312 prev_dreg = ins->dreg;
11314 /* Invalidate any previous lvreg for this vreg */
11315 vreg_to_lvreg [ins->dreg] = 0;
11319 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11321 store_opcode = OP_STOREI8_MEMBASE_REG;
11324 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11326 if (regtype == 'l') {
11327 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11328 mono_bblock_insert_after_ins (bb, ins, store_ins);
11329 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11330 mono_bblock_insert_after_ins (bb, ins, store_ins);
11331 def_ins = store_ins;
11334 g_assert (store_opcode != OP_STOREV_MEMBASE);
11336 /* Try to fuse the store into the instruction itself */
11337 /* FIXME: Add more instructions */
11338 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11339 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11340 ins->inst_imm = ins->inst_c0;
11341 ins->inst_destbasereg = var->inst_basereg;
11342 ins->inst_offset = var->inst_offset;
11343 spec = INS_INFO (ins->opcode);
11344 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11345 ins->opcode = store_opcode;
11346 ins->inst_destbasereg = var->inst_basereg;
11347 ins->inst_offset = var->inst_offset;
11351 tmp_reg = ins->dreg;
11352 ins->dreg = ins->sreg2;
11353 ins->sreg2 = tmp_reg;
11356 spec2 [MONO_INST_DEST] = ' ';
11357 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11358 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11359 spec2 [MONO_INST_SRC3] = ' ';
11361 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11362 // FIXME: The backends expect the base reg to be in inst_basereg
11363 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11365 ins->inst_basereg = var->inst_basereg;
11366 ins->inst_offset = var->inst_offset;
11367 spec = INS_INFO (ins->opcode);
11369 /* printf ("INS: "); mono_print_ins (ins); */
11370 /* Create a store instruction */
11371 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11373 /* Insert it after the instruction */
11374 mono_bblock_insert_after_ins (bb, ins, store_ins);
11376 def_ins = store_ins;
11379 * We can't assign ins->dreg to var->dreg here, since the
11380 * sregs could use it. So set a flag, and do it after
11383 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11384 dest_has_lvreg = TRUE;
11389 if (def_ins && !live_range_start [dreg]) {
11390 live_range_start [dreg] = def_ins;
11391 live_range_start_bb [dreg] = bb;
11398 num_sregs = mono_inst_get_src_registers (ins, sregs);
11399 for (srcindex = 0; srcindex < 3; ++srcindex) {
11400 regtype = spec [MONO_INST_SRC1 + srcindex];
11401 sreg = sregs [srcindex];
11403 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11404 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11405 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11406 MonoInst *use_ins = ins;
11407 MonoInst *load_ins;
11408 guint32 load_opcode;
11410 if (var->opcode == OP_REGVAR) {
11411 sregs [srcindex] = var->dreg;
11412 //mono_inst_set_src_registers (ins, sregs);
11413 live_range_end [sreg] = use_ins;
11414 live_range_end_bb [sreg] = bb;
11418 g_assert (var->opcode == OP_REGOFFSET);
11420 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11422 g_assert (load_opcode != OP_LOADV_MEMBASE);
11424 if (vreg_to_lvreg [sreg]) {
11425 g_assert (vreg_to_lvreg [sreg] != -1);
11427 /* The variable is already loaded to an lvreg */
11428 if (G_UNLIKELY (cfg->verbose_level > 2))
11429 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11430 sregs [srcindex] = vreg_to_lvreg [sreg];
11431 //mono_inst_set_src_registers (ins, sregs);
11435 /* Try to fuse the load into the instruction */
11436 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11437 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11438 sregs [0] = var->inst_basereg;
11439 //mono_inst_set_src_registers (ins, sregs);
11440 ins->inst_offset = var->inst_offset;
11441 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11442 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11443 sregs [1] = var->inst_basereg;
11444 //mono_inst_set_src_registers (ins, sregs);
11445 ins->inst_offset = var->inst_offset;
11447 if (MONO_IS_REAL_MOVE (ins)) {
11448 ins->opcode = OP_NOP;
11451 //printf ("%d ", srcindex); mono_print_ins (ins);
11453 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11455 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11456 if (var->dreg == prev_dreg) {
11458 * sreg refers to the value loaded by the load
11459 * emitted below, but we need to use ins->dreg
11460 * since it refers to the store emitted earlier.
11464 g_assert (sreg != -1);
11465 vreg_to_lvreg [var->dreg] = sreg;
11466 g_assert (lvregs_len < 1024);
11467 lvregs [lvregs_len ++] = var->dreg;
11471 sregs [srcindex] = sreg;
11472 //mono_inst_set_src_registers (ins, sregs);
11474 if (regtype == 'l') {
11475 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11476 mono_bblock_insert_before_ins (bb, ins, load_ins);
11477 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11478 mono_bblock_insert_before_ins (bb, ins, load_ins);
11479 use_ins = load_ins;
11482 #if SIZEOF_REGISTER == 4
11483 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11485 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11486 mono_bblock_insert_before_ins (bb, ins, load_ins);
11487 use_ins = load_ins;
11491 if (var->dreg < orig_next_vreg) {
11492 live_range_end [var->dreg] = use_ins;
11493 live_range_end_bb [var->dreg] = bb;
11497 mono_inst_set_src_registers (ins, sregs);
11499 if (dest_has_lvreg) {
11500 g_assert (ins->dreg != -1);
11501 vreg_to_lvreg [prev_dreg] = ins->dreg;
11502 g_assert (lvregs_len < 1024);
11503 lvregs [lvregs_len ++] = prev_dreg;
11504 dest_has_lvreg = FALSE;
11508 tmp_reg = ins->dreg;
11509 ins->dreg = ins->sreg2;
11510 ins->sreg2 = tmp_reg;
11513 if (MONO_IS_CALL (ins)) {
11514 /* Clear vreg_to_lvreg array */
11515 for (i = 0; i < lvregs_len; i++)
11516 vreg_to_lvreg [lvregs [i]] = 0;
11518 } else if (ins->opcode == OP_NOP) {
11520 MONO_INST_NULLIFY_SREGS (ins);
11523 if (cfg->verbose_level > 2)
11524 mono_print_ins_index (1, ins);
11527 /* Extend the live range based on the liveness info */
11528 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11529 for (i = 0; i < cfg->num_varinfo; i ++) {
11530 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11532 if (vreg_is_volatile (cfg, vi->vreg))
11533 /* The liveness info is incomplete */
11536 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11537 /* Live from at least the first ins of this bb */
11538 live_range_start [vi->vreg] = bb->code;
11539 live_range_start_bb [vi->vreg] = bb;
11542 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11543 /* Live at least until the last ins of this bb */
11544 live_range_end [vi->vreg] = bb->last_ins;
11545 live_range_end_bb [vi->vreg] = bb;
11551 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11553 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11554 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11556 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11557 for (i = 0; i < cfg->num_varinfo; ++i) {
11558 int vreg = MONO_VARINFO (cfg, i)->vreg;
11561 if (live_range_start [vreg]) {
11562 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11564 ins->inst_c1 = vreg;
11565 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11567 if (live_range_end [vreg]) {
11568 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11570 ins->inst_c1 = vreg;
11571 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11572 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11574 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11580 g_free (live_range_start);
11581 g_free (live_range_end);
11582 g_free (live_range_start_bb);
11583 g_free (live_range_end_bb);
11587 mono_create_helper_signatures (void)
11589 helper_sig_domain_get = mono_create_icall_signature ("ptr");
11590 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
11591 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
11592 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
11593 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
11594 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
11595 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
11600 * - use 'iadd' instead of 'int_add'
11601 * - handling ovf opcodes: decompose in method_to_ir.
11602 * - unify iregs/fregs
11603 * -> partly done, the missing parts are:
11604 * - a more complete unification would involve unifying the hregs as well, so
11605 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11606 * would no longer map to the machine hregs, so the code generators would need to
11607 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11608 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11609 * fp/non-fp branches speeds it up by about 15%.
11610 * - use sext/zext opcodes instead of shifts
11612 * - get rid of TEMPLOADs if possible and use vregs instead
11613 * - clean up usage of OP_P/OP_ opcodes
11614 * - cleanup usage of DUMMY_USE
11615 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11617 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11618 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11619 * - make sure handle_stack_args () is called before the branch is emitted
11620 * - when the new IR is done, get rid of all unused stuff
11621 * - COMPARE/BEQ as separate instructions or unify them ?
11622 * - keeping them separate allows specialized compare instructions like
11623 * compare_imm, compare_membase
11624 * - most back ends unify fp compare+branch, fp compare+ceq
11625 * - integrate mono_save_args into inline_method
11626 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11627 * - handle long shift opts on 32 bit platforms somehow: they require
11628 * 3 sregs (2 for arg1 and 1 for arg2)
11629 * - make byref a 'normal' type.
11630 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11631 * variable if needed.
11632 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11633 * like inline_method.
11634 * - remove inlining restrictions
11635 * - fix LNEG and enable cfold of INEG
11636 * - generalize x86 optimizations like ldelema as a peephole optimization
11637 * - add store_mem_imm for amd64
11638 * - optimize the loading of the interruption flag in the managed->native wrappers
11639 * - avoid special handling of OP_NOP in passes
11640 * - move code inserting instructions into one function/macro.
11641 * - try a coalescing phase after liveness analysis
11642 * - add float -> vreg conversion + local optimizations on !x86
11643 * - figure out how to handle decomposed branches during optimizations, ie.
11644 * compare+branch, op_jump_table+op_br etc.
11645 * - promote RuntimeXHandles to vregs
11646 * - vtype cleanups:
11647 * - add a NEW_VARLOADA_VREG macro
11648 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11649 * accessing vtype fields.
11650 * - get rid of I8CONST on 64 bit platforms
11651 * - dealing with the increase in code size due to branches created during opcode
11653 * - use extended basic blocks
11654 * - all parts of the JIT
11655 * - handle_global_vregs () && local regalloc
11656 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11657 * - sources of increase in code size:
11660 * - isinst and castclass
11661 * - lvregs not allocated to global registers even if used multiple times
11662 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11664 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11665 * - add all micro optimizations from the old JIT
11666 * - put tree optimizations into the deadce pass
11667 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11668 * specific function.
11669 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11670 * fcompare + branchCC.
11671 * - create a helper function for allocating a stack slot, taking into account
11672 * MONO_CFG_HAS_SPILLUP.
11674 * - merge the ia64 switch changes.
11675 * - optimize mono_regstate2_alloc_int/float.
11676 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11677 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11678 * parts of the tree could be separated by other instructions, killing the tree
11679 * arguments, or stores killing loads etc. Also, should we fold loads into other
11680 * instructions if the result of the load is used multiple times ?
11681 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11682 * - LAST MERGE: 108395.
11683 * - when returning vtypes in registers, generate IR and append it to the end of the
11684 * last bb instead of doing it in the epilog.
11685 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11693 - When to decompose opcodes:
11694 - earlier: this makes some optimizations hard to implement, since the low level IR
11695 no longer contains the neccessary information. But it is easier to do.
11696 - later: harder to implement, enables more optimizations.
11697 - Branches inside bblocks:
11698 - created when decomposing complex opcodes.
11699 - branches to another bblock: harmless, but not tracked by the branch
11700 optimizations, so need to branch to a label at the start of the bblock.
11701 - branches to inside the same bblock: very problematic, trips up the local
11702 reg allocator. Can be fixed by spitting the current bblock, but that is a
11703 complex operation, since some local vregs can become global vregs etc.
11704 - Local/global vregs:
11705 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11706 local register allocator.
11707 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11708 structure, created by mono_create_var (). Assigned to hregs or the stack by
11709 the global register allocator.
11710 - When to do optimizations like alu->alu_imm:
11711 - earlier -> saves work later on since the IR will be smaller/simpler
11712 - later -> can work on more instructions
11713 - Handling of valuetypes:
11714 - When a vtype is pushed on the stack, a new temporary is created, an
11715 instruction computing its address (LDADDR) is emitted and pushed on
11716 the stack. Need to optimize cases when the vtype is used immediately as in
11717 argument passing, stloc etc.
11718 - Instead of the to_end stuff in the old JIT, simply call the function handling
11719 the values on the stack before emitting the last instruction of the bb.
11722 #endif /* DISABLE_JIT */