2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/metadata/profiler-private.h>
53 #include <mono/metadata/profiler.h>
54 #include <mono/utils/mono-compiler.h>
61 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 extern GHashTable *jit_icall_name_hash;
165 #define MONO_INIT_VARINFO(vi,id) do { \
166 (vi)->range.first_use.pos.bid = 0xffff; \
172 mono_inst_set_src_registers (MonoInst *ins, int *regs)
174 ins->sreg1 = regs [0];
175 ins->sreg2 = regs [1];
176 ins->sreg3 = regs [2];
180 mono_alloc_ireg (MonoCompile *cfg)
182 return alloc_ireg (cfg);
186 mono_alloc_freg (MonoCompile *cfg)
188 return alloc_freg (cfg);
192 mono_alloc_preg (MonoCompile *cfg)
194 return alloc_preg (cfg);
198 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
200 return alloc_dreg (cfg, stack_type);
204 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
210 switch (type->type) {
213 case MONO_TYPE_BOOLEAN:
225 case MONO_TYPE_FNPTR:
227 case MONO_TYPE_CLASS:
228 case MONO_TYPE_STRING:
229 case MONO_TYPE_OBJECT:
230 case MONO_TYPE_SZARRAY:
231 case MONO_TYPE_ARRAY:
235 #if SIZEOF_REGISTER == 8
244 case MONO_TYPE_VALUETYPE:
245 if (type->data.klass->enumtype) {
246 type = mono_class_enum_basetype (type->data.klass);
249 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
252 case MONO_TYPE_TYPEDBYREF:
254 case MONO_TYPE_GENERICINST:
255 type = &type->data.generic_class->container_class->byval_arg;
259 g_assert (cfg->generic_sharing_context);
262 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
268 mono_print_bb (MonoBasicBlock *bb, const char *msg)
273 printf ("\n%s %d: [IN: ", msg, bb->block_num);
274 for (i = 0; i < bb->in_count; ++i)
275 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
277 for (i = 0; i < bb->out_count; ++i)
278 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
280 for (tree = bb->code; tree; tree = tree->next)
281 mono_print_ins_index (-1, tree);
285 * Can't put this at the beginning, since other files reference stuff from this
290 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
292 #define GET_BBLOCK(cfg,tblock,ip) do { \
293 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
295 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
296 NEW_BBLOCK (cfg, (tblock)); \
297 (tblock)->cil_code = (ip); \
298 ADD_BBLOCK (cfg, (tblock)); \
302 #if defined(TARGET_X86) || defined(TARGET_AMD64)
303 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
304 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
305 (dest)->dreg = alloc_preg ((cfg)); \
306 (dest)->sreg1 = (sr1); \
307 (dest)->sreg2 = (sr2); \
308 (dest)->inst_imm = (imm); \
309 (dest)->backend.shift_amount = (shift); \
310 MONO_ADD_INS ((cfg)->cbb, (dest)); \
314 #if SIZEOF_REGISTER == 8
315 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
316 /* FIXME: Need to add many more cases */ \
317 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
319 int dr = alloc_preg (cfg); \
320 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
321 (ins)->sreg2 = widen->dreg; \
325 #define ADD_WIDEN_OP(ins, arg1, arg2)
328 #define ADD_BINOP(op) do { \
329 MONO_INST_NEW (cfg, ins, (op)); \
331 ins->sreg1 = sp [0]->dreg; \
332 ins->sreg2 = sp [1]->dreg; \
333 type_from_op (ins, sp [0], sp [1]); \
335 /* Have to insert a widening op */ \
336 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
337 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
338 MONO_ADD_INS ((cfg)->cbb, (ins)); \
339 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
342 #define ADD_UNOP(op) do { \
343 MONO_INST_NEW (cfg, ins, (op)); \
345 ins->sreg1 = sp [0]->dreg; \
346 type_from_op (ins, sp [0], NULL); \
348 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
349 MONO_ADD_INS ((cfg)->cbb, (ins)); \
350 *sp++ = mono_decompose_opcode (cfg, ins); \
353 #define ADD_BINCOND(next_block) do { \
356 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
357 cmp->sreg1 = sp [0]->dreg; \
358 cmp->sreg2 = sp [1]->dreg; \
359 type_from_op (cmp, sp [0], sp [1]); \
361 type_from_op (ins, sp [0], sp [1]); \
362 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
363 GET_BBLOCK (cfg, tblock, target); \
364 link_bblock (cfg, bblock, tblock); \
365 ins->inst_true_bb = tblock; \
366 if ((next_block)) { \
367 link_bblock (cfg, bblock, (next_block)); \
368 ins->inst_false_bb = (next_block); \
369 start_new_bblock = 1; \
371 GET_BBLOCK (cfg, tblock, ip); \
372 link_bblock (cfg, bblock, tblock); \
373 ins->inst_false_bb = tblock; \
374 start_new_bblock = 2; \
376 if (sp != stack_start) { \
377 handle_stack_args (cfg, stack_start, sp - stack_start); \
378 CHECK_UNVERIFIABLE (cfg); \
380 MONO_ADD_INS (bblock, cmp); \
381 MONO_ADD_INS (bblock, ins); \
385 * link_bblock: Links two basic blocks
387 * links two basic blocks in the control flow graph, the 'from'
388 * argument is the starting block and the 'to' argument is the block
389 * the control flow ends to after 'from'.
392 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
394 MonoBasicBlock **newa;
398 if (from->cil_code) {
400 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
402 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
405 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
407 printf ("edge from entry to exit\n");
412 for (i = 0; i < from->out_count; ++i) {
413 if (to == from->out_bb [i]) {
419 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
420 for (i = 0; i < from->out_count; ++i) {
421 newa [i] = from->out_bb [i];
429 for (i = 0; i < to->in_count; ++i) {
430 if (from == to->in_bb [i]) {
436 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
437 for (i = 0; i < to->in_count; ++i) {
438 newa [i] = to->in_bb [i];
447 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
449 link_bblock (cfg, from, to);
453 * mono_find_block_region:
455 * We mark each basic block with a region ID. We use that to avoid BB
456 * optimizations when blocks are in different regions.
459 * A region token that encodes where this region is, and information
460 * about the clause owner for this block.
462 * The region encodes the try/catch/filter clause that owns this block
463 * as well as the type. -1 is a special value that represents a block
464 * that is in none of try/catch/filter.
467 mono_find_block_region (MonoCompile *cfg, int offset)
469 MonoMethod *method = cfg->method;
470 MonoMethodHeader *header = mono_method_get_header (method);
471 MonoExceptionClause *clause;
474 for (i = 0; i < header->num_clauses; ++i) {
475 clause = &header->clauses [i];
476 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
477 (offset < (clause->handler_offset)))
478 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
480 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
481 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
482 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
483 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
484 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
486 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
489 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
490 return ((i + 1) << 8) | clause->flags;
497 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
499 MonoMethod *method = cfg->method;
500 MonoMethodHeader *header = mono_method_get_header (method);
501 MonoExceptionClause *clause;
502 MonoBasicBlock *handler;
506 for (i = 0; i < header->num_clauses; ++i) {
507 clause = &header->clauses [i];
508 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
509 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
510 if (clause->flags == type) {
511 handler = cfg->cil_offset_to_bb [clause->handler_offset];
513 res = g_list_append (res, handler);
521 mono_create_spvar_for_region (MonoCompile *cfg, int region)
525 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
529 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
530 /* prevent it from being register allocated */
531 var->flags |= MONO_INST_INDIRECT;
533 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
537 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
539 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
543 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
547 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
551 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
552 /* prevent it from being register allocated */
553 var->flags |= MONO_INST_INDIRECT;
555 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
561 * Returns the type used in the eval stack when @type is loaded.
562 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
565 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
569 inst->klass = klass = mono_class_from_mono_type (type);
571 inst->type = STACK_MP;
576 switch (type->type) {
578 inst->type = STACK_INV;
582 case MONO_TYPE_BOOLEAN:
588 inst->type = STACK_I4;
593 case MONO_TYPE_FNPTR:
594 inst->type = STACK_PTR;
596 case MONO_TYPE_CLASS:
597 case MONO_TYPE_STRING:
598 case MONO_TYPE_OBJECT:
599 case MONO_TYPE_SZARRAY:
600 case MONO_TYPE_ARRAY:
601 inst->type = STACK_OBJ;
605 inst->type = STACK_I8;
609 inst->type = STACK_R8;
611 case MONO_TYPE_VALUETYPE:
612 if (type->data.klass->enumtype) {
613 type = mono_class_enum_basetype (type->data.klass);
617 inst->type = STACK_VTYPE;
620 case MONO_TYPE_TYPEDBYREF:
621 inst->klass = mono_defaults.typed_reference_class;
622 inst->type = STACK_VTYPE;
624 case MONO_TYPE_GENERICINST:
625 type = &type->data.generic_class->container_class->byval_arg;
628 case MONO_TYPE_MVAR :
629 /* FIXME: all the arguments must be references for now,
630 * later look inside cfg and see if the arg num is
633 g_assert (cfg->generic_sharing_context);
634 inst->type = STACK_OBJ;
637 g_error ("unknown type 0x%02x in eval stack type", type->type);
642 * The following tables are used to quickly validate the IL code in type_from_op ().
645 bin_num_table [STACK_MAX] [STACK_MAX] = {
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
652 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
653 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
658 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
661 /* reduce the size of this table */
663 bin_int_table [STACK_MAX] [STACK_MAX] = {
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
671 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
675 bin_comp_table [STACK_MAX] [STACK_MAX] = {
676 /* Inv i L p F & O vt */
678 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
679 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
680 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
681 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
682 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
683 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
684 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
687 /* reduce the size of this table */
689 shift_table [STACK_MAX] [STACK_MAX] = {
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
701 * Tables to map from the non-specific opcode to the matching
702 * type-specific opcode.
704 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
706 binops_op_map [STACK_MAX] = {
707 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
710 /* handles from CEE_NEG to CEE_CONV_U8 */
712 unops_op_map [STACK_MAX] = {
713 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
716 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
718 ovfops_op_map [STACK_MAX] = {
719 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
722 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
724 ovf2ops_op_map [STACK_MAX] = {
725 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
728 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
730 ovf3ops_op_map [STACK_MAX] = {
731 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
734 /* handles from CEE_BEQ to CEE_BLT_UN */
736 beqops_op_map [STACK_MAX] = {
737 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
740 /* handles from CEE_CEQ to CEE_CLT_UN */
742 ceqops_op_map [STACK_MAX] = {
743 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
747 * Sets ins->type (the type on the eval stack) according to the
748 * type of the opcode and the arguments to it.
749 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
751 * FIXME: this function sets ins->type unconditionally in some cases, but
752 * it should set it to invalid for some types (a conv.x on an object)
755 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
757 switch (ins->opcode) {
764 /* FIXME: check unverifiable args for STACK_MP */
765 ins->type = bin_num_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
773 ins->type = bin_int_table [src1->type] [src2->type];
774 ins->opcode += binops_op_map [ins->type];
779 ins->type = shift_table [src1->type] [src2->type];
780 ins->opcode += binops_op_map [ins->type];
785 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
786 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
787 ins->opcode = OP_LCOMPARE;
788 else if (src1->type == STACK_R8)
789 ins->opcode = OP_FCOMPARE;
791 ins->opcode = OP_ICOMPARE;
793 case OP_ICOMPARE_IMM:
794 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
795 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
796 ins->opcode = OP_LCOMPARE_IMM;
808 ins->opcode += beqops_op_map [src1->type];
811 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
812 ins->opcode += ceqops_op_map [src1->type];
818 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
819 ins->opcode += ceqops_op_map [src1->type];
823 ins->type = neg_table [src1->type];
824 ins->opcode += unops_op_map [ins->type];
827 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
828 ins->type = src1->type;
830 ins->type = STACK_INV;
831 ins->opcode += unops_op_map [ins->type];
837 ins->type = STACK_I4;
838 ins->opcode += unops_op_map [src1->type];
841 ins->type = STACK_R8;
842 switch (src1->type) {
845 ins->opcode = OP_ICONV_TO_R_UN;
848 ins->opcode = OP_LCONV_TO_R_UN;
852 case CEE_CONV_OVF_I1:
853 case CEE_CONV_OVF_U1:
854 case CEE_CONV_OVF_I2:
855 case CEE_CONV_OVF_U2:
856 case CEE_CONV_OVF_I4:
857 case CEE_CONV_OVF_U4:
858 ins->type = STACK_I4;
859 ins->opcode += ovf3ops_op_map [src1->type];
861 case CEE_CONV_OVF_I_UN:
862 case CEE_CONV_OVF_U_UN:
863 ins->type = STACK_PTR;
864 ins->opcode += ovf2ops_op_map [src1->type];
866 case CEE_CONV_OVF_I1_UN:
867 case CEE_CONV_OVF_I2_UN:
868 case CEE_CONV_OVF_I4_UN:
869 case CEE_CONV_OVF_U1_UN:
870 case CEE_CONV_OVF_U2_UN:
871 case CEE_CONV_OVF_U4_UN:
872 ins->type = STACK_I4;
873 ins->opcode += ovf2ops_op_map [src1->type];
876 ins->type = STACK_PTR;
877 switch (src1->type) {
879 ins->opcode = OP_ICONV_TO_U;
883 #if SIZEOF_REGISTER == 8
884 ins->opcode = OP_LCONV_TO_U;
886 ins->opcode = OP_MOVE;
890 ins->opcode = OP_LCONV_TO_U;
893 ins->opcode = OP_FCONV_TO_U;
899 ins->type = STACK_I8;
900 ins->opcode += unops_op_map [src1->type];
902 case CEE_CONV_OVF_I8:
903 case CEE_CONV_OVF_U8:
904 ins->type = STACK_I8;
905 ins->opcode += ovf3ops_op_map [src1->type];
907 case CEE_CONV_OVF_U8_UN:
908 case CEE_CONV_OVF_I8_UN:
909 ins->type = STACK_I8;
910 ins->opcode += ovf2ops_op_map [src1->type];
914 ins->type = STACK_R8;
915 ins->opcode += unops_op_map [src1->type];
918 ins->type = STACK_R8;
922 ins->type = STACK_I4;
923 ins->opcode += ovfops_op_map [src1->type];
928 ins->type = STACK_PTR;
929 ins->opcode += ovfops_op_map [src1->type];
937 ins->type = bin_num_table [src1->type] [src2->type];
938 ins->opcode += ovfops_op_map [src1->type];
939 if (ins->type == STACK_R8)
940 ins->type = STACK_INV;
942 case OP_LOAD_MEMBASE:
943 ins->type = STACK_PTR;
945 case OP_LOADI1_MEMBASE:
946 case OP_LOADU1_MEMBASE:
947 case OP_LOADI2_MEMBASE:
948 case OP_LOADU2_MEMBASE:
949 case OP_LOADI4_MEMBASE:
950 case OP_LOADU4_MEMBASE:
951 ins->type = STACK_PTR;
953 case OP_LOADI8_MEMBASE:
954 ins->type = STACK_I8;
956 case OP_LOADR4_MEMBASE:
957 case OP_LOADR8_MEMBASE:
958 ins->type = STACK_R8;
961 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
965 if (ins->type == STACK_MP)
966 ins->klass = mono_defaults.object_class;
971 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
977 param_table [STACK_MAX] [STACK_MAX] = {
982 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
986 switch (args->type) {
996 for (i = 0; i < sig->param_count; ++i) {
997 switch (args [i].type) {
1001 if (!sig->params [i]->byref)
1005 if (sig->params [i]->byref)
1007 switch (sig->params [i]->type) {
1008 case MONO_TYPE_CLASS:
1009 case MONO_TYPE_STRING:
1010 case MONO_TYPE_OBJECT:
1011 case MONO_TYPE_SZARRAY:
1012 case MONO_TYPE_ARRAY:
1019 if (sig->params [i]->byref)
1021 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1030 /*if (!param_table [args [i].type] [sig->params [i]->type])
1038 * When we need a pointer to the current domain many times in a method, we
1039 * call mono_domain_get() once and we store the result in a local variable.
1040 * This function returns the variable that represents the MonoDomain*.
1042 inline static MonoInst *
1043 mono_get_domainvar (MonoCompile *cfg)
1045 if (!cfg->domainvar)
1046 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1047 return cfg->domainvar;
1051 * The got_var contains the address of the Global Offset Table when AOT
1055 mono_get_got_var (MonoCompile *cfg)
1057 #ifdef MONO_ARCH_NEED_GOT_VAR
1058 if (!cfg->compile_aot)
1060 if (!cfg->got_var) {
1061 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1063 return cfg->got_var;
1070 mono_get_vtable_var (MonoCompile *cfg)
1072 g_assert (cfg->generic_sharing_context);
1074 if (!cfg->rgctx_var) {
1075 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1076 /* force the var to be stack allocated */
1077 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1080 return cfg->rgctx_var;
1084 type_from_stack_type (MonoInst *ins) {
1085 switch (ins->type) {
1086 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1087 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1088 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1089 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1091 return &ins->klass->this_arg;
1092 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1093 case STACK_VTYPE: return &ins->klass->byval_arg;
1095 g_error ("stack type %d to monotype not handled\n", ins->type);
1100 static G_GNUC_UNUSED int
1101 type_to_stack_type (MonoType *t)
1103 switch (mono_type_get_underlying_type (t)->type) {
1106 case MONO_TYPE_BOOLEAN:
1109 case MONO_TYPE_CHAR:
1116 case MONO_TYPE_FNPTR:
1118 case MONO_TYPE_CLASS:
1119 case MONO_TYPE_STRING:
1120 case MONO_TYPE_OBJECT:
1121 case MONO_TYPE_SZARRAY:
1122 case MONO_TYPE_ARRAY:
1130 case MONO_TYPE_VALUETYPE:
1131 case MONO_TYPE_TYPEDBYREF:
1133 case MONO_TYPE_GENERICINST:
1134 if (mono_type_generic_inst_is_valuetype (t))
1140 g_assert_not_reached ();
1147 array_access_to_klass (int opcode)
1151 return mono_defaults.byte_class;
1153 return mono_defaults.uint16_class;
1156 return mono_defaults.int_class;
1159 return mono_defaults.sbyte_class;
1162 return mono_defaults.int16_class;
1165 return mono_defaults.int32_class;
1167 return mono_defaults.uint32_class;
1170 return mono_defaults.int64_class;
1173 return mono_defaults.single_class;
1176 return mono_defaults.double_class;
1177 case CEE_LDELEM_REF:
1178 case CEE_STELEM_REF:
1179 return mono_defaults.object_class;
1181 g_assert_not_reached ();
1187 * We try to share variables when possible
1190 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1195 /* inlining can result in deeper stacks */
1196 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1197 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1199 pos = ins->type - 1 + slot * STACK_MAX;
1201 switch (ins->type) {
1208 if ((vnum = cfg->intvars [pos]))
1209 return cfg->varinfo [vnum];
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1211 cfg->intvars [pos] = res->inst_c0;
1214 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1220 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1223 * Don't use this if a generic_context is set, since that means AOT can't
1224 * look up the method using just the image+token.
1225 * table == 0 means this is a reference made from a wrapper.
1227 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1228 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1229 jump_info_token->image = image;
1230 jump_info_token->token = token;
1231 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1236 * This function is called to handle items that are left on the evaluation stack
1237 * at basic block boundaries. What happens is that we save the values to local variables
1238 * and we reload them later when first entering the target basic block (with the
1239 * handle_loaded_temps () function).
1240 * A single joint point will use the same variables (stored in the array bb->out_stack or
1241 * bb->in_stack, if the basic block is before or after the joint point).
1243 * This function needs to be called _before_ emitting the last instruction of
1244 * the bb (i.e. before emitting a branch).
1245 * If the stack merge fails at a join point, cfg->unverifiable is set.
1248 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1251 MonoBasicBlock *bb = cfg->cbb;
1252 MonoBasicBlock *outb;
1253 MonoInst *inst, **locals;
1258 if (cfg->verbose_level > 3)
1259 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1260 if (!bb->out_scount) {
1261 bb->out_scount = count;
1262 //printf ("bblock %d has out:", bb->block_num);
1264 for (i = 0; i < bb->out_count; ++i) {
1265 outb = bb->out_bb [i];
1266 /* exception handlers are linked, but they should not be considered for stack args */
1267 if (outb->flags & BB_EXCEPTION_HANDLER)
1269 //printf (" %d", outb->block_num);
1270 if (outb->in_stack) {
1272 bb->out_stack = outb->in_stack;
1278 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1279 for (i = 0; i < count; ++i) {
1281 * try to reuse temps already allocated for this purpouse, if they occupy the same
1282 * stack slot and if they are of the same type.
1283 * This won't cause conflicts since if 'local' is used to
1284 * store one of the values in the in_stack of a bblock, then
1285 * the same variable will be used for the same outgoing stack
1287 * This doesn't work when inlining methods, since the bblocks
1288 * in the inlined methods do not inherit their in_stack from
1289 * the bblock they are inlined to. See bug #58863 for an
1292 if (cfg->inlined_method)
1293 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1295 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1300 for (i = 0; i < bb->out_count; ++i) {
1301 outb = bb->out_bb [i];
1302 /* exception handlers are linked, but they should not be considered for stack args */
1303 if (outb->flags & BB_EXCEPTION_HANDLER)
1305 if (outb->in_scount) {
1306 if (outb->in_scount != bb->out_scount) {
1307 cfg->unverifiable = TRUE;
1310 continue; /* check they are the same locals */
1312 outb->in_scount = count;
1313 outb->in_stack = bb->out_stack;
1316 locals = bb->out_stack;
1318 for (i = 0; i < count; ++i) {
1319 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1320 inst->cil_code = sp [i]->cil_code;
1321 sp [i] = locals [i];
1322 if (cfg->verbose_level > 3)
1323 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1327 * It is possible that the out bblocks already have in_stack assigned, and
1328 * the in_stacks differ. In this case, we will store to all the different
1335 /* Find a bblock which has a different in_stack */
1337 while (bindex < bb->out_count) {
1338 outb = bb->out_bb [bindex];
1339 /* exception handlers are linked, but they should not be considered for stack args */
1340 if (outb->flags & BB_EXCEPTION_HANDLER) {
1344 if (outb->in_stack != locals) {
1345 for (i = 0; i < count; ++i) {
1346 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1347 inst->cil_code = sp [i]->cil_code;
1348 sp [i] = locals [i];
1349 if (cfg->verbose_level > 3)
1350 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1352 locals = outb->in_stack;
1361 /* Emit code which loads interface_offsets [klass->interface_id]
1362 * The array is stored in memory before vtable.
1365 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1367 if (cfg->compile_aot) {
1368 int ioffset_reg = alloc_preg (cfg);
1369 int iid_reg = alloc_preg (cfg);
1371 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1372 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1381 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1382 * stored in "klass_reg" implements the interface "klass".
1385 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1387 int ibitmap_reg = alloc_preg (cfg);
1388 int ibitmap_byte_reg = alloc_preg (cfg);
1390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1392 if (cfg->compile_aot) {
1393 int iid_reg = alloc_preg (cfg);
1394 int shifted_iid_reg = alloc_preg (cfg);
1395 int ibitmap_byte_address_reg = alloc_preg (cfg);
1396 int masked_iid_reg = alloc_preg (cfg);
1397 int iid_one_bit_reg = alloc_preg (cfg);
1398 int iid_bit_reg = alloc_preg (cfg);
1399 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1402 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1404 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1406 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1408 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1414 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1415 * stored in "vtable_reg" implements the interface "klass".
1418 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1420 int ibitmap_reg = alloc_preg (cfg);
1421 int ibitmap_byte_reg = alloc_preg (cfg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1425 if (cfg->compile_aot) {
1426 int iid_reg = alloc_preg (cfg);
1427 int shifted_iid_reg = alloc_preg (cfg);
1428 int ibitmap_byte_address_reg = alloc_preg (cfg);
1429 int masked_iid_reg = alloc_preg (cfg);
1430 int iid_one_bit_reg = alloc_preg (cfg);
1431 int iid_bit_reg = alloc_preg (cfg);
1432 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1435 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1437 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1439 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1441 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1447 * Emit code which checks whenever the interface id of @klass is smaller than
1448 * than the value given by max_iid_reg.
1451 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1452 MonoBasicBlock *false_target)
1454 if (cfg->compile_aot) {
1455 int iid_reg = alloc_preg (cfg);
1456 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1457 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1464 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1467 /* Same as above, but obtains max_iid from a vtable */
1469 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1470 MonoBasicBlock *false_target)
1472 int max_iid_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1475 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1478 /* Same as above, but obtains max_iid from a klass */
1480 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1481 MonoBasicBlock *false_target)
1483 int max_iid_reg = alloc_preg (cfg);
1485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1486 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1490 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1492 int idepth_reg = alloc_preg (cfg);
1493 int stypes_reg = alloc_preg (cfg);
1494 int stype = alloc_preg (cfg);
1496 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1497 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1499 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1502 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1503 if (cfg->compile_aot) {
1504 int const_reg = alloc_preg (cfg);
1505 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1506 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1514 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1516 int intf_reg = alloc_preg (cfg);
1518 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1519 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1522 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1524 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1528 * Variant of the above that takes a register to the class, not the vtable.
1531 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1533 int intf_bit_reg = alloc_preg (cfg);
1535 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1536 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1541 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1545 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1547 if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1560 if (cfg->compile_aot) {
1561 int const_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1563 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1571 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1574 int rank_reg = alloc_preg (cfg);
1575 int eclass_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1579 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1580 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1582 if (klass->cast_class == mono_defaults.object_class) {
1583 int parent_reg = alloc_preg (cfg);
1584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1585 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1586 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1587 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1588 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1589 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1590 } else if (klass->cast_class == mono_defaults.enum_class) {
1591 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1592 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1593 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1595 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1596 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1599 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1600 /* Check that the object is a vector too */
1601 int bounds_reg = alloc_preg (cfg);
1602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1604 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1607 int idepth_reg = alloc_preg (cfg);
1608 int stypes_reg = alloc_preg (cfg);
1609 int stype = alloc_preg (cfg);
1611 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1612 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1614 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1618 mini_emit_class_check (cfg, stype, klass);
1623 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1627 g_assert (val == 0);
1632 if ((size <= 4) && (size <= align)) {
1635 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1638 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1643 #if SIZEOF_REGISTER == 8
1645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1651 val_reg = alloc_preg (cfg);
1653 if (SIZEOF_REGISTER == 8)
1654 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1656 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1659 /* This could be optimized further if neccesary */
1661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1668 #if !NO_UNALIGNED_ACCESS
1669 if (SIZEOF_REGISTER == 8) {
1671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1700 #endif /* DISABLE_JIT */
1703 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1711 /* This could be optimized further if neccesary */
1713 cur_reg = alloc_preg (cfg);
1714 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1715 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1722 #if !NO_UNALIGNED_ACCESS
1723 if (SIZEOF_REGISTER == 8) {
1725 cur_reg = alloc_preg (cfg);
1726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1727 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1736 cur_reg = alloc_preg (cfg);
1737 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1738 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1752 cur_reg = alloc_preg (cfg);
1753 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1754 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1764 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1767 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1770 type = mini_get_basic_type_from_generic (gsctx, type);
1771 switch (type->type) {
1772 case MONO_TYPE_VOID:
1773 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1776 case MONO_TYPE_BOOLEAN:
1779 case MONO_TYPE_CHAR:
1782 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1786 case MONO_TYPE_FNPTR:
1787 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1788 case MONO_TYPE_CLASS:
1789 case MONO_TYPE_STRING:
1790 case MONO_TYPE_OBJECT:
1791 case MONO_TYPE_SZARRAY:
1792 case MONO_TYPE_ARRAY:
1793 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1796 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1799 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1800 case MONO_TYPE_VALUETYPE:
1801 if (type->data.klass->enumtype) {
1802 type = mono_class_enum_basetype (type->data.klass);
1805 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1806 case MONO_TYPE_TYPEDBYREF:
1807 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1808 case MONO_TYPE_GENERICINST:
1809 type = &type->data.generic_class->container_class->byval_arg;
1812 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1818 * target_type_is_incompatible:
1819 * @cfg: MonoCompile context
1821 * Check that the item @arg on the evaluation stack can be stored
1822 * in the target type (can be a local, or field, etc).
1823 * The cfg arg can be used to check if we need verification or just
1826 * Returns: non-0 value if arg can't be stored on a target.
1829 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1831 MonoType *simple_type;
1834 if (target->byref) {
1835 /* FIXME: check that the pointed to types match */
1836 if (arg->type == STACK_MP)
1837 return arg->klass != mono_class_from_mono_type (target);
1838 if (arg->type == STACK_PTR)
1843 simple_type = mono_type_get_underlying_type (target);
1844 switch (simple_type->type) {
1845 case MONO_TYPE_VOID:
1849 case MONO_TYPE_BOOLEAN:
1852 case MONO_TYPE_CHAR:
1855 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1859 /* STACK_MP is needed when setting pinned locals */
1860 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1865 case MONO_TYPE_FNPTR:
1866 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1869 case MONO_TYPE_CLASS:
1870 case MONO_TYPE_STRING:
1871 case MONO_TYPE_OBJECT:
1872 case MONO_TYPE_SZARRAY:
1873 case MONO_TYPE_ARRAY:
1874 if (arg->type != STACK_OBJ)
1876 /* FIXME: check type compatibility */
1880 if (arg->type != STACK_I8)
1885 if (arg->type != STACK_R8)
1888 case MONO_TYPE_VALUETYPE:
1889 if (arg->type != STACK_VTYPE)
1891 klass = mono_class_from_mono_type (simple_type);
1892 if (klass != arg->klass)
1895 case MONO_TYPE_TYPEDBYREF:
1896 if (arg->type != STACK_VTYPE)
1898 klass = mono_class_from_mono_type (simple_type);
1899 if (klass != arg->klass)
1902 case MONO_TYPE_GENERICINST:
1903 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1904 if (arg->type != STACK_VTYPE)
1906 klass = mono_class_from_mono_type (simple_type);
1907 if (klass != arg->klass)
1911 if (arg->type != STACK_OBJ)
1913 /* FIXME: check type compatibility */
1917 case MONO_TYPE_MVAR:
1918 /* FIXME: all the arguments must be references for now,
1919 * later look inside cfg and see if the arg num is
1920 * really a reference
1922 g_assert (cfg->generic_sharing_context);
1923 if (arg->type != STACK_OBJ)
1927 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1933 * Prepare arguments for passing to a function call.
1934 * Return a non-zero value if the arguments can't be passed to the given
1936 * The type checks are not yet complete and some conversions may need
1937 * casts on 32 or 64 bit architectures.
1939 * FIXME: implement this using target_type_is_incompatible ()
1942 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1944 MonoType *simple_type;
1948 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1952 for (i = 0; i < sig->param_count; ++i) {
1953 if (sig->params [i]->byref) {
1954 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1958 simple_type = sig->params [i];
1959 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1961 switch (simple_type->type) {
1962 case MONO_TYPE_VOID:
1967 case MONO_TYPE_BOOLEAN:
1970 case MONO_TYPE_CHAR:
1973 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1979 case MONO_TYPE_FNPTR:
1980 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1983 case MONO_TYPE_CLASS:
1984 case MONO_TYPE_STRING:
1985 case MONO_TYPE_OBJECT:
1986 case MONO_TYPE_SZARRAY:
1987 case MONO_TYPE_ARRAY:
1988 if (args [i]->type != STACK_OBJ)
1993 if (args [i]->type != STACK_I8)
1998 if (args [i]->type != STACK_R8)
2001 case MONO_TYPE_VALUETYPE:
2002 if (simple_type->data.klass->enumtype) {
2003 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2006 if (args [i]->type != STACK_VTYPE)
2009 case MONO_TYPE_TYPEDBYREF:
2010 if (args [i]->type != STACK_VTYPE)
2013 case MONO_TYPE_GENERICINST:
2014 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2018 g_error ("unknown type 0x%02x in check_call_signature",
2026 callvirt_to_call (int opcode)
2031 case OP_VOIDCALLVIRT:
2040 g_assert_not_reached ();
2047 callvirt_to_call_membase (int opcode)
2051 return OP_CALL_MEMBASE;
2052 case OP_VOIDCALLVIRT:
2053 return OP_VOIDCALL_MEMBASE;
2055 return OP_FCALL_MEMBASE;
2057 return OP_LCALL_MEMBASE;
2059 return OP_VCALL_MEMBASE;
2061 g_assert_not_reached ();
2067 #ifdef MONO_ARCH_HAVE_IMT
2069 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2071 #ifdef MONO_ARCH_IMT_REG
2072 int method_reg = alloc_preg (cfg);
2075 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2076 } else if (cfg->compile_aot) {
2077 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2080 MONO_INST_NEW (cfg, ins, OP_PCONST);
2081 ins->inst_p0 = call->method;
2082 ins->dreg = method_reg;
2083 MONO_ADD_INS (cfg->cbb, ins);
2086 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2088 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2093 static MonoJumpInfo *
2094 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2096 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2100 ji->data.target = target;
2105 inline static MonoInst*
2106 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2108 inline static MonoCallInst *
2109 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2110 MonoInst **args, int calli, int virtual, int tail)
2113 #ifdef MONO_ARCH_SOFT_FLOAT
2118 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2120 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2123 call->signature = sig;
2125 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2128 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2129 call->vret_var = cfg->vret_addr;
2130 //g_assert_not_reached ();
2132 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2133 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2136 temp->backend.is_pinvoke = sig->pinvoke;
2139 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2140 * address of return value to increase optimization opportunities.
2141 * Before vtype decomposition, the dreg of the call ins itself represents the
2142 * fact the call modifies the return value. After decomposition, the call will
2143 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2144 * will be transformed into an LDADDR.
2146 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2147 loada->dreg = alloc_preg (cfg);
2148 loada->inst_p0 = temp;
2149 /* We reference the call too since call->dreg could change during optimization */
2150 loada->inst_p1 = call;
2151 MONO_ADD_INS (cfg->cbb, loada);
2153 call->inst.dreg = temp->dreg;
2155 call->vret_var = loada;
2156 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2157 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2159 #ifdef MONO_ARCH_SOFT_FLOAT
2161 * If the call has a float argument, we would need to do an r8->r4 conversion using
2162 * an icall, but that cannot be done during the call sequence since it would clobber
2163 * the call registers + the stack. So we do it before emitting the call.
2165 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2167 MonoInst *in = call->args [i];
2169 if (i >= sig->hasthis)
2170 t = sig->params [i - sig->hasthis];
2172 t = &mono_defaults.int_class->byval_arg;
2173 t = mono_type_get_underlying_type (t);
2175 if (!t->byref && t->type == MONO_TYPE_R4) {
2176 MonoInst *iargs [1];
2180 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2182 /* The result will be in an int vreg */
2183 call->args [i] = conv;
2189 if (COMPILE_LLVM (cfg))
2190 mono_llvm_emit_call (cfg, call);
2192 mono_arch_emit_call (cfg, call);
2194 mono_arch_emit_call (cfg, call);
2197 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2198 cfg->flags |= MONO_CFG_HAS_CALLS;
2203 inline static MonoInst*
2204 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2206 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2208 call->inst.sreg1 = addr->dreg;
2210 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2212 return (MonoInst*)call;
2215 inline static MonoInst*
2216 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2218 #ifdef MONO_ARCH_RGCTX_REG
2223 rgctx_reg = mono_alloc_preg (cfg);
2224 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2226 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2228 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2229 cfg->uses_rgctx_reg = TRUE;
2230 call->rgctx_reg = TRUE;
2232 return (MonoInst*)call;
2234 g_assert_not_reached ();
2240 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2242 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2245 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2246 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2248 gboolean might_be_remote;
2249 gboolean virtual = this != NULL;
2250 gboolean enable_for_aot = TRUE;
2254 if (method->string_ctor) {
2255 /* Create the real signature */
2256 /* FIXME: Cache these */
2257 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2258 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2263 might_be_remote = this && sig->hasthis &&
2264 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2265 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2267 context_used = mono_method_check_context_used (method);
2268 if (might_be_remote && context_used) {
2271 g_assert (cfg->generic_sharing_context);
2273 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2275 return mono_emit_calli (cfg, sig, args, addr);
2278 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2280 if (might_be_remote)
2281 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2283 call->method = method;
2284 call->inst.flags |= MONO_INST_HAS_METHOD;
2285 call->inst.inst_left = this;
2288 int vtable_reg, slot_reg, this_reg;
2290 this_reg = this->dreg;
2292 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2294 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2295 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2296 /* Make a call to delegate->invoke_impl */
2297 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2298 call->inst.inst_basereg = this_reg;
2299 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2300 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2302 return (MonoInst*)call;
2306 if ((!cfg->compile_aot || enable_for_aot) &&
2307 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2308 (MONO_METHOD_IS_FINAL (method) &&
2309 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2311 * the method is not virtual, we just need to ensure this is not null
2312 * and then we can call the method directly.
2314 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2315 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2318 if (!method->string_ctor)
2319 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2321 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2323 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2325 return (MonoInst*)call;
2328 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2330 * the method is virtual, but we can statically dispatch since either
2331 * it's class or the method itself are sealed.
2332 * But first we need to ensure it's not a null reference.
2334 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2336 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2337 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2339 return (MonoInst*)call;
2342 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2344 vtable_reg = alloc_preg (cfg);
2345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2346 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2348 #ifdef MONO_ARCH_HAVE_IMT
2350 guint32 imt_slot = mono_method_get_imt_slot (method);
2351 emit_imt_argument (cfg, call, imt_arg);
2352 slot_reg = vtable_reg;
2353 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2356 if (slot_reg == -1) {
2357 slot_reg = alloc_preg (cfg);
2358 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2359 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2362 slot_reg = vtable_reg;
2363 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2364 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2365 #ifdef MONO_ARCH_HAVE_IMT
2367 g_assert (mono_method_signature (method)->generic_param_count);
2368 emit_imt_argument (cfg, call, imt_arg);
2373 call->inst.sreg1 = slot_reg;
2374 call->virtual = TRUE;
2377 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2379 return (MonoInst*)call;
2383 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2384 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2391 #ifdef MONO_ARCH_RGCTX_REG
2392 rgctx_reg = mono_alloc_preg (cfg);
2393 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2398 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2400 call = (MonoCallInst*)ins;
2402 #ifdef MONO_ARCH_RGCTX_REG
2403 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2404 cfg->uses_rgctx_reg = TRUE;
2405 call->rgctx_reg = TRUE;
2414 static inline MonoInst*
2415 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2417 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2421 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2428 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2431 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2433 return (MonoInst*)call;
2436 inline static MonoInst*
2437 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2439 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2443 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2447 * mono_emit_abs_call:
2449 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2451 inline static MonoInst*
2452 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2453 MonoMethodSignature *sig, MonoInst **args)
2455 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2459 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2462 if (cfg->abs_patches == NULL)
2463 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2464 g_hash_table_insert (cfg->abs_patches, ji, ji);
2465 ins = mono_emit_native_call (cfg, ji, sig, args);
2466 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2471 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2473 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2474 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2478 * Native code might return non register sized integers
2479 * without initializing the upper bits.
2481 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2482 case OP_LOADI1_MEMBASE:
2483 widen_op = OP_ICONV_TO_I1;
2485 case OP_LOADU1_MEMBASE:
2486 widen_op = OP_ICONV_TO_U1;
2488 case OP_LOADI2_MEMBASE:
2489 widen_op = OP_ICONV_TO_I2;
2491 case OP_LOADU2_MEMBASE:
2492 widen_op = OP_ICONV_TO_U2;
2498 if (widen_op != -1) {
2499 int dreg = alloc_preg (cfg);
2502 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2503 widen->type = ins->type;
2513 get_memcpy_method (void)
2515 static MonoMethod *memcpy_method = NULL;
2516 if (!memcpy_method) {
2517 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2519 g_error ("Old corlib found. Install a new one");
2521 return memcpy_method;
2525 * Emit code to copy a valuetype of type @klass whose address is stored in
2526 * @src->dreg to memory whose address is stored at @dest->dreg.
2529 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2531 MonoInst *iargs [3];
2534 MonoMethod *memcpy_method;
2538 * This check breaks with spilled vars... need to handle it during verification anyway.
2539 * g_assert (klass && klass == src->klass && klass == dest->klass);
2543 n = mono_class_native_size (klass, &align);
2545 n = mono_class_value_size (klass, &align);
2547 #if HAVE_WRITE_BARRIERS
2548 /* if native is true there should be no references in the struct */
2549 if (klass->has_references && !native) {
2550 /* Avoid barriers when storing to the stack */
2551 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2552 (dest->opcode == OP_LDADDR))) {
2553 int context_used = 0;
2558 if (cfg->generic_sharing_context)
2559 context_used = mono_class_check_context_used (klass);
2561 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2563 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2564 mono_class_compute_gc_descriptor (klass);
2567 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2572 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2573 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2574 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2578 EMIT_NEW_ICONST (cfg, iargs [2], n);
2580 memcpy_method = get_memcpy_method ();
2581 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2586 get_memset_method (void)
2588 static MonoMethod *memset_method = NULL;
2589 if (!memset_method) {
2590 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2592 g_error ("Old corlib found. Install a new one");
2594 return memset_method;
2598 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2600 MonoInst *iargs [3];
2603 MonoMethod *memset_method;
2605 /* FIXME: Optimize this for the case when dest is an LDADDR */
2607 mono_class_init (klass);
2608 n = mono_class_value_size (klass, &align);
2610 if (n <= sizeof (gpointer) * 5) {
2611 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2614 memset_method = get_memset_method ();
2616 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2617 EMIT_NEW_ICONST (cfg, iargs [2], n);
2618 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2623 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2625 MonoInst *this = NULL;
2627 g_assert (cfg->generic_sharing_context);
2629 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2630 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2631 !method->klass->valuetype)
2632 EMIT_NEW_ARGLOAD (cfg, this, 0);
2634 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2635 MonoInst *mrgctx_loc, *mrgctx_var;
2638 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2640 mrgctx_loc = mono_get_vtable_var (cfg);
2641 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2644 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2645 MonoInst *vtable_loc, *vtable_var;
2649 vtable_loc = mono_get_vtable_var (cfg);
2650 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2652 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2653 MonoInst *mrgctx_var = vtable_var;
2656 vtable_reg = alloc_preg (cfg);
2657 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2658 vtable_var->type = STACK_PTR;
2664 int vtable_reg, res_reg;
2666 vtable_reg = alloc_preg (cfg);
2667 res_reg = alloc_preg (cfg);
2668 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2673 static MonoJumpInfoRgctxEntry *
2674 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2676 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2677 res->method = method;
2678 res->in_mrgctx = in_mrgctx;
2679 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2680 res->data->type = patch_type;
2681 res->data->data.target = patch_data;
2682 res->info_type = info_type;
2687 static inline MonoInst*
2688 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2690 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2694 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2695 MonoClass *klass, int rgctx_type)
2697 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2698 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2700 return emit_rgctx_fetch (cfg, rgctx, entry);
2704 * emit_get_rgctx_method:
2706 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2707 * normal constants, else emit a load from the rgctx.
2710 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2711 MonoMethod *cmethod, int rgctx_type)
2713 if (!context_used) {
2716 switch (rgctx_type) {
2717 case MONO_RGCTX_INFO_METHOD:
2718 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2720 case MONO_RGCTX_INFO_METHOD_RGCTX:
2721 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2724 g_assert_not_reached ();
2727 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2728 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2730 return emit_rgctx_fetch (cfg, rgctx, entry);
2735 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2736 MonoClassField *field, int rgctx_type)
2738 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2739 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2741 return emit_rgctx_fetch (cfg, rgctx, entry);
2745 * On return the caller must check @klass for load errors.
2748 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2750 MonoInst *vtable_arg;
2752 int context_used = 0;
2754 if (cfg->generic_sharing_context)
2755 context_used = mono_class_check_context_used (klass);
2758 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2759 klass, MONO_RGCTX_INFO_VTABLE);
2761 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2765 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2768 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2769 #ifdef MONO_ARCH_VTABLE_REG
2770 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2771 cfg->uses_vtable_reg = TRUE;
2778 * On return the caller must check @array_class for load errors
2781 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2783 int vtable_reg = alloc_preg (cfg);
2784 int context_used = 0;
2786 if (cfg->generic_sharing_context)
2787 context_used = mono_class_check_context_used (array_class);
2789 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2791 if (cfg->opt & MONO_OPT_SHARED) {
2792 int class_reg = alloc_preg (cfg);
2793 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2794 if (cfg->compile_aot) {
2795 int klass_reg = alloc_preg (cfg);
2796 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2797 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2799 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2801 } else if (context_used) {
2802 MonoInst *vtable_ins;
2804 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2805 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2807 if (cfg->compile_aot) {
2811 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2813 vt_reg = alloc_preg (cfg);
2814 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2815 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2818 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2820 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2824 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2828 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2830 if (mini_get_debug_options ()->better_cast_details) {
2831 int to_klass_reg = alloc_preg (cfg);
2832 int vtable_reg = alloc_preg (cfg);
2833 int klass_reg = alloc_preg (cfg);
2834 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2837 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2841 MONO_ADD_INS (cfg->cbb, tls_get);
2842 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2843 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2846 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2847 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2852 reset_cast_details (MonoCompile *cfg)
2854 /* Reset the variables holding the cast details */
2855 if (mini_get_debug_options ()->better_cast_details) {
2856 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2858 MONO_ADD_INS (cfg->cbb, tls_get);
2859 /* It is enough to reset the from field */
2860 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2865 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2866 * generic code is generated.
2869 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2871 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2874 MonoInst *rgctx, *addr;
2876 /* FIXME: What if the class is shared? We might not
2877 have to get the address of the method from the
2879 addr = emit_get_rgctx_method (cfg, context_used, method,
2880 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2882 rgctx = emit_get_rgctx (cfg, method, context_used);
2884 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2886 return mono_emit_method_call (cfg, method, &val, NULL);
2891 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2895 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2896 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2897 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2898 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2900 obj_reg = sp [0]->dreg;
2901 MONO_EMIT_NULL_CHECK (cfg, obj_reg);
2902 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2903 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2905 /* FIXME: generics */
2906 g_assert (klass->rank == 0);
2909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2910 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2916 MonoInst *element_class;
2918 /* This assertion is from the unboxcast insn */
2919 g_assert (klass->rank == 0);
2921 element_class = emit_get_rgctx_klass (cfg, context_used,
2922 klass->element_class, MONO_RGCTX_INFO_KLASS);
2924 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2925 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2927 save_cast_details (cfg, klass->element_class, obj_reg);
2928 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2929 reset_cast_details (cfg);
2932 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2933 MONO_ADD_INS (cfg->cbb, add);
2934 add->type = STACK_MP;
2941 * Returns NULL and set the cfg exception on error.
2944 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2946 MonoInst *iargs [2];
2949 if (cfg->opt & MONO_OPT_SHARED) {
2950 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2951 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2953 alloc_ftn = mono_object_new;
2954 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2955 /* This happens often in argument checking code, eg. throw new FooException... */
2956 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2957 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2958 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2960 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2961 MonoMethod *managed_alloc = NULL;
2965 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2966 cfg->exception_ptr = klass;
2970 #ifndef MONO_CROSS_COMPILE
2971 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2974 if (managed_alloc) {
2975 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2976 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2978 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2980 guint32 lw = vtable->klass->instance_size;
2981 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2982 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2983 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2986 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2990 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2994 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2997 MonoInst *iargs [2];
2998 MonoMethod *managed_alloc = NULL;
3002 FIXME: we cannot get managed_alloc here because we can't get
3003 the class's vtable (because it's not a closed class)
3005 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3006 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3009 if (cfg->opt & MONO_OPT_SHARED) {
3010 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3011 iargs [1] = data_inst;
3012 alloc_ftn = mono_object_new;
3014 if (managed_alloc) {
3015 iargs [0] = data_inst;
3016 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3019 iargs [0] = data_inst;
3020 alloc_ftn = mono_object_new_specific;
3023 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3027 * Returns NULL and set the cfg exception on error.
3030 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3032 MonoInst *alloc, *ins;
3034 if (mono_class_is_nullable (klass)) {
3035 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3036 return mono_emit_method_call (cfg, method, &val, NULL);
3039 alloc = handle_alloc (cfg, klass, TRUE);
3043 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3049 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3051 MonoInst *alloc, *ins;
3053 if (mono_class_is_nullable (klass)) {
3054 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3055 /* FIXME: What if the class is shared? We might not
3056 have to get the method address from the RGCTX. */
3057 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3058 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3059 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3061 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3063 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3065 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3072 * Returns NULL and set the cfg exception on error.
3075 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3077 MonoBasicBlock *is_null_bb;
3078 int obj_reg = src->dreg;
3079 int vtable_reg = alloc_preg (cfg);
3081 NEW_BBLOCK (cfg, is_null_bb);
3083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3084 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3086 save_cast_details (cfg, klass, obj_reg);
3088 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3090 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3092 int klass_reg = alloc_preg (cfg);
3094 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3096 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3097 /* the remoting code is broken, access the class for now */
3098 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3099 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3101 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3102 cfg->exception_ptr = klass;
3105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3107 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3108 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3110 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3113 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3117 MONO_START_BB (cfg, is_null_bb);
3119 reset_cast_details (cfg);
3125 * Returns NULL and set the cfg exception on error.
3128 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3131 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3132 int obj_reg = src->dreg;
3133 int vtable_reg = alloc_preg (cfg);
3134 int res_reg = alloc_preg (cfg);
3136 NEW_BBLOCK (cfg, is_null_bb);
3137 NEW_BBLOCK (cfg, false_bb);
3138 NEW_BBLOCK (cfg, end_bb);
3140 /* Do the assignment at the beginning, so the other assignment can be if converted */
3141 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3142 ins->type = STACK_OBJ;
3145 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3146 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3148 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3149 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3150 /* the is_null_bb target simply copies the input register to the output */
3151 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3153 int klass_reg = alloc_preg (cfg);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3158 int rank_reg = alloc_preg (cfg);
3159 int eclass_reg = alloc_preg (cfg);
3161 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3163 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3166 if (klass->cast_class == mono_defaults.object_class) {
3167 int parent_reg = alloc_preg (cfg);
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3169 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3170 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3171 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3172 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3173 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3174 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3175 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3176 } else if (klass->cast_class == mono_defaults.enum_class) {
3177 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3178 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3179 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3180 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3182 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3183 /* Check that the object is a vector too */
3184 int bounds_reg = alloc_preg (cfg);
3185 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3186 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3190 /* the is_null_bb target simply copies the input register to the output */
3191 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3193 } else if (mono_class_is_nullable (klass)) {
3194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3195 /* the is_null_bb target simply copies the input register to the output */
3196 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3198 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3199 /* the remoting code is broken, access the class for now */
3200 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3201 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3203 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3204 cfg->exception_ptr = klass;
3207 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3209 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3210 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3212 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3213 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3215 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3216 /* the is_null_bb target simply copies the input register to the output */
3217 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3222 MONO_START_BB (cfg, false_bb);
3224 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3225 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3227 MONO_START_BB (cfg, is_null_bb);
3229 MONO_START_BB (cfg, end_bb);
3235 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3237 /* This opcode takes as input an object reference and a class, and returns:
3238 0) if the object is an instance of the class,
3239 1) if the object is not instance of the class,
3240 2) if the object is a proxy whose type cannot be determined */
3243 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3244 int obj_reg = src->dreg;
3245 int dreg = alloc_ireg (cfg);
3247 int klass_reg = alloc_preg (cfg);
3249 NEW_BBLOCK (cfg, true_bb);
3250 NEW_BBLOCK (cfg, false_bb);
3251 NEW_BBLOCK (cfg, false2_bb);
3252 NEW_BBLOCK (cfg, end_bb);
3253 NEW_BBLOCK (cfg, no_proxy_bb);
3255 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3258 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3259 NEW_BBLOCK (cfg, interface_fail_bb);
3261 tmp_reg = alloc_preg (cfg);
3262 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3263 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3264 MONO_START_BB (cfg, interface_fail_bb);
3265 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3267 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3269 tmp_reg = alloc_preg (cfg);
3270 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3271 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3272 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3274 tmp_reg = alloc_preg (cfg);
3275 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3278 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3279 tmp_reg = alloc_preg (cfg);
3280 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3281 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3283 tmp_reg = alloc_preg (cfg);
3284 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3288 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3289 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3291 MONO_START_BB (cfg, no_proxy_bb);
3293 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3296 MONO_START_BB (cfg, false_bb);
3298 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3301 MONO_START_BB (cfg, false2_bb);
3303 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3304 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3306 MONO_START_BB (cfg, true_bb);
3308 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3310 MONO_START_BB (cfg, end_bb);
3313 MONO_INST_NEW (cfg, ins, OP_ICONST);
3315 ins->type = STACK_I4;
3321 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3323 /* This opcode takes as input an object reference and a class, and returns:
3324 0) if the object is an instance of the class,
3325 1) if the object is a proxy whose type cannot be determined
3326 an InvalidCastException exception is thrown otherwhise*/
3329 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3330 int obj_reg = src->dreg;
3331 int dreg = alloc_ireg (cfg);
3332 int tmp_reg = alloc_preg (cfg);
3333 int klass_reg = alloc_preg (cfg);
3335 NEW_BBLOCK (cfg, end_bb);
3336 NEW_BBLOCK (cfg, ok_result_bb);
3338 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3339 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3341 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3342 NEW_BBLOCK (cfg, interface_fail_bb);
3344 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3345 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3346 MONO_START_BB (cfg, interface_fail_bb);
3347 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3349 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3351 tmp_reg = alloc_preg (cfg);
3352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3354 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3356 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3357 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3360 NEW_BBLOCK (cfg, no_proxy_bb);
3362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3363 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3364 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3366 tmp_reg = alloc_preg (cfg);
3367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3370 tmp_reg = alloc_preg (cfg);
3371 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3372 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3373 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3375 NEW_BBLOCK (cfg, fail_1_bb);
3377 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3379 MONO_START_BB (cfg, fail_1_bb);
3381 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3384 MONO_START_BB (cfg, no_proxy_bb);
3386 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3389 MONO_START_BB (cfg, ok_result_bb);
3391 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3393 MONO_START_BB (cfg, end_bb);
3396 MONO_INST_NEW (cfg, ins, OP_ICONST);
3398 ins->type = STACK_I4;
3404 * Returns NULL and set the cfg exception on error.
3406 static G_GNUC_UNUSED MonoInst*
3407 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3409 gpointer *trampoline;
3410 MonoInst *obj, *method_ins, *tramp_ins;
3414 obj = handle_alloc (cfg, klass, FALSE);
3418 /* Inline the contents of mono_delegate_ctor */
3420 /* Set target field */
3421 /* Optimize away setting of NULL target */
3422 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3423 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3425 /* Set method field */
3426 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3427 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3430 * To avoid looking up the compiled code belonging to the target method
3431 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3432 * store it, and we fill it after the method has been compiled.
3434 if (!cfg->compile_aot && !method->dynamic) {
3435 MonoInst *code_slot_ins;
3438 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3440 domain = mono_domain_get ();
3441 mono_domain_lock (domain);
3442 if (!domain_jit_info (domain)->method_code_hash)
3443 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3444 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3446 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3447 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3449 mono_domain_unlock (domain);
3451 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3453 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3456 /* Set invoke_impl field */
3457 if (cfg->compile_aot) {
3458 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3460 trampoline = mono_create_delegate_trampoline (klass);
3461 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3463 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3465 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3471 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3473 MonoJitICallInfo *info;
3475 /* Need to register the icall so it gets an icall wrapper */
3476 info = mono_get_array_new_va_icall (rank);
3478 cfg->flags |= MONO_CFG_HAS_VARARGS;
3480 /* mono_array_new_va () needs a vararg calling convention */
3481 cfg->disable_llvm = TRUE;
3483 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3484 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3488 mono_emit_load_got_addr (MonoCompile *cfg)
3490 MonoInst *getaddr, *dummy_use;
3492 if (!cfg->got_var || cfg->got_var_allocated)
3495 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3496 getaddr->dreg = cfg->got_var->dreg;
3498 /* Add it to the start of the first bblock */
3499 if (cfg->bb_entry->code) {
3500 getaddr->next = cfg->bb_entry->code;
3501 cfg->bb_entry->code = getaddr;
3504 MONO_ADD_INS (cfg->bb_entry, getaddr);
3506 cfg->got_var_allocated = TRUE;
3509 * Add a dummy use to keep the got_var alive, since real uses might
3510 * only be generated by the back ends.
3511 * Add it to end_bblock, so the variable's lifetime covers the whole
3513 * It would be better to make the usage of the got var explicit in all
3514 * cases when the backend needs it (i.e. calls, throw etc.), so this
3515 * wouldn't be needed.
3517 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3518 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3521 static int inline_limit;
3522 static gboolean inline_limit_inited;
3525 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3527 MonoMethodHeader *header;
3529 #ifdef MONO_ARCH_SOFT_FLOAT
3530 MonoMethodSignature *sig = mono_method_signature (method);
3534 if (cfg->generic_sharing_context)
3537 if (cfg->inline_depth > 10)
3540 #ifdef MONO_ARCH_HAVE_LMF_OPS
3541 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3542 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3543 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3547 if (method->is_inflated)
3548 /* Avoid inflating the header */
3549 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3551 header = mono_method_get_header (method);
3553 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3554 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3555 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3556 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3557 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3558 (method->klass->marshalbyref) ||
3559 !header || header->num_clauses)
3562 /* also consider num_locals? */
3563 /* Do the size check early to avoid creating vtables */
3564 if (!inline_limit_inited) {
3565 if (getenv ("MONO_INLINELIMIT"))
3566 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3568 inline_limit = INLINE_LENGTH_LIMIT;
3569 inline_limit_inited = TRUE;
3571 if (header->code_size >= inline_limit)
3575 * if we can initialize the class of the method right away, we do,
3576 * otherwise we don't allow inlining if the class needs initialization,
3577 * since it would mean inserting a call to mono_runtime_class_init()
3578 * inside the inlined code
3580 if (!(cfg->opt & MONO_OPT_SHARED)) {
3581 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3582 if (cfg->run_cctors && method->klass->has_cctor) {
3583 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3584 if (!method->klass->runtime_info)
3585 /* No vtable created yet */
3587 vtable = mono_class_vtable (cfg->domain, method->klass);
3590 /* This makes so that inline cannot trigger */
3591 /* .cctors: too many apps depend on them */
3592 /* running with a specific order... */
3593 if (! vtable->initialized)
3595 mono_runtime_class_init (vtable);
3597 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3598 if (!method->klass->runtime_info)
3599 /* No vtable created yet */
3601 vtable = mono_class_vtable (cfg->domain, method->klass);
3604 if (!vtable->initialized)
3609 * If we're compiling for shared code
3610 * the cctor will need to be run at aot method load time, for example,
3611 * or at the end of the compilation of the inlining method.
3613 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3618 * CAS - do not inline methods with declarative security
3619 * Note: this has to be before any possible return TRUE;
3621 if (mono_method_has_declsec (method))
3624 #ifdef MONO_ARCH_SOFT_FLOAT
3626 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3628 for (i = 0; i < sig->param_count; ++i)
3629 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3637 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3639 if (vtable->initialized && !cfg->compile_aot)
3642 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3645 if (!mono_class_needs_cctor_run (vtable->klass, method))
3648 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3649 /* The initialization is already done before the method is called */
3656 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3660 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3662 mono_class_init (klass);
3663 size = mono_class_array_element_size (klass);
3665 mult_reg = alloc_preg (cfg);
3666 array_reg = arr->dreg;
3667 index_reg = index->dreg;
3669 #if SIZEOF_REGISTER == 8
3670 /* The array reg is 64 bits but the index reg is only 32 */
3671 index2_reg = alloc_preg (cfg);
3672 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3674 if (index->type == STACK_I8) {
3675 index2_reg = alloc_preg (cfg);
3676 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3678 index2_reg = index_reg;
3682 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3684 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3685 if (size == 1 || size == 2 || size == 4 || size == 8) {
3686 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3688 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3689 ins->type = STACK_PTR;
3695 add_reg = alloc_preg (cfg);
3697 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3698 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3699 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3700 ins->type = STACK_PTR;
3701 MONO_ADD_INS (cfg->cbb, ins);
3706 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3708 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3710 int bounds_reg = alloc_preg (cfg);
3711 int add_reg = alloc_preg (cfg);
3712 int mult_reg = alloc_preg (cfg);
3713 int mult2_reg = alloc_preg (cfg);
3714 int low1_reg = alloc_preg (cfg);
3715 int low2_reg = alloc_preg (cfg);
3716 int high1_reg = alloc_preg (cfg);
3717 int high2_reg = alloc_preg (cfg);
3718 int realidx1_reg = alloc_preg (cfg);
3719 int realidx2_reg = alloc_preg (cfg);
3720 int sum_reg = alloc_preg (cfg);
3725 mono_class_init (klass);
3726 size = mono_class_array_element_size (klass);
3728 index1 = index_ins1->dreg;
3729 index2 = index_ins2->dreg;
3731 /* range checking */
3732 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3733 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3735 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3736 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3737 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3738 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3739 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3740 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3741 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3743 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3744 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3745 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3746 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3747 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3748 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3749 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3751 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3752 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3754 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3755 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3757 ins->type = STACK_MP;
3759 MONO_ADD_INS (cfg->cbb, ins);
3766 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3770 MonoMethod *addr_method;
3773 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3776 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3778 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3779 /* emit_ldelema_2 depends on OP_LMUL */
3780 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3781 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3785 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3786 addr_method = mono_marshal_get_array_address (rank, element_size);
3787 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3793 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3795 MonoInst *ins = NULL;
3797 static MonoClass *runtime_helpers_class = NULL;
3798 if (! runtime_helpers_class)
3799 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3800 "System.Runtime.CompilerServices", "RuntimeHelpers");
3802 if (cmethod->klass == mono_defaults.string_class) {
3803 if (strcmp (cmethod->name, "get_Chars") == 0) {
3804 int dreg = alloc_ireg (cfg);
3805 int index_reg = alloc_preg (cfg);
3806 int mult_reg = alloc_preg (cfg);
3807 int add_reg = alloc_preg (cfg);
3809 #if SIZEOF_REGISTER == 8
3810 /* The array reg is 64 bits but the index reg is only 32 */
3811 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3813 index_reg = args [1]->dreg;
3815 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3817 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3818 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3819 add_reg = ins->dreg;
3820 /* Avoid a warning */
3822 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3825 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3826 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3827 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3828 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3830 type_from_op (ins, NULL, NULL);
3832 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3833 int dreg = alloc_ireg (cfg);
3834 /* Decompose later to allow more optimizations */
3835 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3836 ins->type = STACK_I4;
3837 cfg->cbb->has_array_access = TRUE;
3838 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3841 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3842 int mult_reg = alloc_preg (cfg);
3843 int add_reg = alloc_preg (cfg);
3845 /* The corlib functions check for oob already. */
3846 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3847 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3851 } else if (cmethod->klass == mono_defaults.object_class) {
3853 if (strcmp (cmethod->name, "GetType") == 0) {
3854 int dreg = alloc_preg (cfg);
3855 int vt_reg = alloc_preg (cfg);
3856 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3858 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3859 type_from_op (ins, NULL, NULL);
3862 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3863 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3864 int dreg = alloc_ireg (cfg);
3865 int t1 = alloc_ireg (cfg);
3867 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3868 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3869 ins->type = STACK_I4;
3873 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3874 MONO_INST_NEW (cfg, ins, OP_NOP);
3875 MONO_ADD_INS (cfg->cbb, ins);
3879 } else if (cmethod->klass == mono_defaults.array_class) {
3880 if (cmethod->name [0] != 'g')
3883 if (strcmp (cmethod->name, "get_Rank") == 0) {
3884 int dreg = alloc_ireg (cfg);
3885 int vtable_reg = alloc_preg (cfg);
3886 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3887 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3888 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3889 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3890 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3891 type_from_op (ins, NULL, NULL);
3894 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3895 int dreg = alloc_ireg (cfg);
3897 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3898 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3899 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3900 type_from_op (ins, NULL, NULL);
3905 } else if (cmethod->klass == runtime_helpers_class) {
3907 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3908 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3912 } else if (cmethod->klass == mono_defaults.thread_class) {
3913 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3914 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3915 MONO_ADD_INS (cfg->cbb, ins);
3917 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3918 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3919 MONO_ADD_INS (cfg->cbb, ins);
3922 } else if (cmethod->klass == mono_defaults.monitor_class) {
3923 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3924 if (strcmp (cmethod->name, "Enter") == 0) {
3927 if (COMPILE_LLVM (cfg)) {
3929 * Pass the argument normally, the LLVM backend will handle the
3930 * calling convention problems.
3932 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
3934 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3935 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3936 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3937 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3940 return (MonoInst*)call;
3941 } else if (strcmp (cmethod->name, "Exit") == 0) {
3944 if (COMPILE_LLVM (cfg)) {
3945 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
3947 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3948 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3949 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3950 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3953 return (MonoInst*)call;
3955 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3956 MonoMethod *fast_method = NULL;
3958 /* Avoid infinite recursion */
3959 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3960 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3961 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3964 if (strcmp (cmethod->name, "Enter") == 0 ||
3965 strcmp (cmethod->name, "Exit") == 0)
3966 fast_method = mono_monitor_get_fast_path (cmethod);
3970 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3972 } else if (mini_class_is_system_array (cmethod->klass) &&
3973 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3974 MonoInst *addr, *store, *load;
3975 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3977 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3978 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3979 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3981 } else if (cmethod->klass->image == mono_defaults.corlib &&
3982 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3983 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3986 #if SIZEOF_REGISTER == 8
3987 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3988 /* 64 bit reads are already atomic */
3989 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3990 ins->dreg = mono_alloc_preg (cfg);
3991 ins->inst_basereg = args [0]->dreg;
3992 ins->inst_offset = 0;
3993 MONO_ADD_INS (cfg->cbb, ins);
3997 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3998 if (strcmp (cmethod->name, "Increment") == 0) {
3999 MonoInst *ins_iconst;
4002 if (fsig->params [0]->type == MONO_TYPE_I4)
4003 opcode = OP_ATOMIC_ADD_NEW_I4;
4004 #if SIZEOF_REGISTER == 8
4005 else if (fsig->params [0]->type == MONO_TYPE_I8)
4006 opcode = OP_ATOMIC_ADD_NEW_I8;
4009 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4010 ins_iconst->inst_c0 = 1;
4011 ins_iconst->dreg = mono_alloc_ireg (cfg);
4012 MONO_ADD_INS (cfg->cbb, ins_iconst);
4014 MONO_INST_NEW (cfg, ins, opcode);
4015 ins->dreg = mono_alloc_ireg (cfg);
4016 ins->inst_basereg = args [0]->dreg;
4017 ins->inst_offset = 0;
4018 ins->sreg2 = ins_iconst->dreg;
4019 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4020 MONO_ADD_INS (cfg->cbb, ins);
4022 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4023 MonoInst *ins_iconst;
4026 if (fsig->params [0]->type == MONO_TYPE_I4)
4027 opcode = OP_ATOMIC_ADD_NEW_I4;
4028 #if SIZEOF_REGISTER == 8
4029 else if (fsig->params [0]->type == MONO_TYPE_I8)
4030 opcode = OP_ATOMIC_ADD_NEW_I8;
4033 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4034 ins_iconst->inst_c0 = -1;
4035 ins_iconst->dreg = mono_alloc_ireg (cfg);
4036 MONO_ADD_INS (cfg->cbb, ins_iconst);
4038 MONO_INST_NEW (cfg, ins, opcode);
4039 ins->dreg = mono_alloc_ireg (cfg);
4040 ins->inst_basereg = args [0]->dreg;
4041 ins->inst_offset = 0;
4042 ins->sreg2 = ins_iconst->dreg;
4043 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4044 MONO_ADD_INS (cfg->cbb, ins);
4046 } else if (strcmp (cmethod->name, "Add") == 0) {
4049 if (fsig->params [0]->type == MONO_TYPE_I4)
4050 opcode = OP_ATOMIC_ADD_NEW_I4;
4051 #if SIZEOF_REGISTER == 8
4052 else if (fsig->params [0]->type == MONO_TYPE_I8)
4053 opcode = OP_ATOMIC_ADD_NEW_I8;
4057 MONO_INST_NEW (cfg, ins, opcode);
4058 ins->dreg = mono_alloc_ireg (cfg);
4059 ins->inst_basereg = args [0]->dreg;
4060 ins->inst_offset = 0;
4061 ins->sreg2 = args [1]->dreg;
4062 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4063 MONO_ADD_INS (cfg->cbb, ins);
4066 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4068 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4069 if (strcmp (cmethod->name, "Exchange") == 0) {
4071 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4073 if (fsig->params [0]->type == MONO_TYPE_I4)
4074 opcode = OP_ATOMIC_EXCHANGE_I4;
4075 #if SIZEOF_REGISTER == 8
4076 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4077 (fsig->params [0]->type == MONO_TYPE_I))
4078 opcode = OP_ATOMIC_EXCHANGE_I8;
4080 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4081 opcode = OP_ATOMIC_EXCHANGE_I4;
4086 MONO_INST_NEW (cfg, ins, opcode);
4087 ins->dreg = mono_alloc_ireg (cfg);
4088 ins->inst_basereg = args [0]->dreg;
4089 ins->inst_offset = 0;
4090 ins->sreg2 = args [1]->dreg;
4091 MONO_ADD_INS (cfg->cbb, ins);
4093 switch (fsig->params [0]->type) {
4095 ins->type = STACK_I4;
4099 ins->type = STACK_I8;
4101 case MONO_TYPE_OBJECT:
4102 ins->type = STACK_OBJ;
4105 g_assert_not_reached ();
4108 #if HAVE_WRITE_BARRIERS
4110 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4111 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4115 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4117 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4118 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4120 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4121 if (fsig->params [1]->type == MONO_TYPE_I4)
4123 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4124 size = sizeof (gpointer);
4125 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4128 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4129 ins->dreg = alloc_ireg (cfg);
4130 ins->sreg1 = args [0]->dreg;
4131 ins->sreg2 = args [1]->dreg;
4132 ins->sreg3 = args [2]->dreg;
4133 ins->type = STACK_I4;
4134 MONO_ADD_INS (cfg->cbb, ins);
4135 } else if (size == 8) {
4136 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4137 ins->dreg = alloc_ireg (cfg);
4138 ins->sreg1 = args [0]->dreg;
4139 ins->sreg2 = args [1]->dreg;
4140 ins->sreg3 = args [2]->dreg;
4141 ins->type = STACK_I8;
4142 MONO_ADD_INS (cfg->cbb, ins);
4144 /* g_assert_not_reached (); */
4146 #if HAVE_WRITE_BARRIERS
4148 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4149 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4153 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4157 } else if (cmethod->klass->image == mono_defaults.corlib) {
4158 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4159 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4160 MONO_INST_NEW (cfg, ins, OP_BREAK);
4161 MONO_ADD_INS (cfg->cbb, ins);
4164 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4165 && strcmp (cmethod->klass->name, "Environment") == 0) {
4167 EMIT_NEW_ICONST (cfg, ins, 1);
4169 EMIT_NEW_ICONST (cfg, ins, 0);
4173 } else if (cmethod->klass == mono_defaults.math_class) {
4175 * There is general branches code for Min/Max, but it does not work for
4177 * http://everything2.com/?node_id=1051618
4181 #ifdef MONO_ARCH_SIMD_INTRINSICS
4182 if (cfg->opt & MONO_OPT_SIMD) {
4183 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4189 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4193 * This entry point could be used later for arbitrary method
4196 inline static MonoInst*
4197 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4198 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4200 if (method->klass == mono_defaults.string_class) {
4201 /* managed string allocation support */
4202 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4203 MonoInst *iargs [2];
4204 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4205 MonoMethod *managed_alloc = NULL;
4207 g_assert (vtable); /*Should not fail since it System.String*/
4208 #ifndef MONO_CROSS_COMPILE
4209 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4213 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4214 iargs [1] = args [0];
4215 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4222 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4224 MonoInst *store, *temp;
4227 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4228 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4231 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4232 * would be different than the MonoInst's used to represent arguments, and
4233 * the ldelema implementation can't deal with that.
4234 * Solution: When ldelema is used on an inline argument, create a var for
4235 * it, emit ldelema on that var, and emit the saving code below in
4236 * inline_method () if needed.
4238 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4239 cfg->args [i] = temp;
4240 /* This uses cfg->args [i] which is set by the preceeding line */
4241 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4242 store->cil_code = sp [0]->cil_code;
4247 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4248 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4250 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4252 check_inline_called_method_name_limit (MonoMethod *called_method)
4255 static char *limit = NULL;
4257 if (limit == NULL) {
4258 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4260 if (limit_string != NULL)
4261 limit = limit_string;
4263 limit = (char *) "";
4266 if (limit [0] != '\0') {
4267 char *called_method_name = mono_method_full_name (called_method, TRUE);
4269 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4270 g_free (called_method_name);
4272 //return (strncmp_result <= 0);
4273 return (strncmp_result == 0);
4280 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4282 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4285 static char *limit = NULL;
4287 if (limit == NULL) {
4288 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4289 if (limit_string != NULL) {
4290 limit = limit_string;
4292 limit = (char *) "";
4296 if (limit [0] != '\0') {
4297 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4299 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4300 g_free (caller_method_name);
4302 //return (strncmp_result <= 0);
4303 return (strncmp_result == 0);
4311 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4312 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4314 MonoInst *ins, *rvar = NULL;
4315 MonoMethodHeader *cheader;
4316 MonoBasicBlock *ebblock, *sbblock;
4318 MonoMethod *prev_inlined_method;
4319 MonoInst **prev_locals, **prev_args;
4320 MonoType **prev_arg_types;
4321 guint prev_real_offset;
4322 GHashTable *prev_cbb_hash;
4323 MonoBasicBlock **prev_cil_offset_to_bb;
4324 MonoBasicBlock *prev_cbb;
4325 unsigned char* prev_cil_start;
4326 guint32 prev_cil_offset_to_bb_len;
4327 MonoMethod *prev_current_method;
4328 MonoGenericContext *prev_generic_context;
4329 gboolean ret_var_set, prev_ret_var_set;
4331 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4333 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4334 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4337 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4338 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4342 if (cfg->verbose_level > 2)
4343 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4345 if (!cmethod->inline_info) {
4346 mono_jit_stats.inlineable_methods++;
4347 cmethod->inline_info = 1;
4349 /* allocate space to store the return value */
4350 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4351 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4354 /* allocate local variables */
4355 cheader = mono_method_get_header (cmethod);
4356 prev_locals = cfg->locals;
4357 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4358 for (i = 0; i < cheader->num_locals; ++i)
4359 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4361 /* allocate start and end blocks */
4362 /* This is needed so if the inline is aborted, we can clean up */
4363 NEW_BBLOCK (cfg, sbblock);
4364 sbblock->real_offset = real_offset;
4366 NEW_BBLOCK (cfg, ebblock);
4367 ebblock->block_num = cfg->num_bblocks++;
4368 ebblock->real_offset = real_offset;
4370 prev_args = cfg->args;
4371 prev_arg_types = cfg->arg_types;
4372 prev_inlined_method = cfg->inlined_method;
4373 cfg->inlined_method = cmethod;
4374 cfg->ret_var_set = FALSE;
4375 cfg->inline_depth ++;
4376 prev_real_offset = cfg->real_offset;
4377 prev_cbb_hash = cfg->cbb_hash;
4378 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4379 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4380 prev_cil_start = cfg->cil_start;
4381 prev_cbb = cfg->cbb;
4382 prev_current_method = cfg->current_method;
4383 prev_generic_context = cfg->generic_context;
4384 prev_ret_var_set = cfg->ret_var_set;
4386 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4388 ret_var_set = cfg->ret_var_set;
4390 cfg->inlined_method = prev_inlined_method;
4391 cfg->real_offset = prev_real_offset;
4392 cfg->cbb_hash = prev_cbb_hash;
4393 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4394 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4395 cfg->cil_start = prev_cil_start;
4396 cfg->locals = prev_locals;
4397 cfg->args = prev_args;
4398 cfg->arg_types = prev_arg_types;
4399 cfg->current_method = prev_current_method;
4400 cfg->generic_context = prev_generic_context;
4401 cfg->ret_var_set = prev_ret_var_set;
4402 cfg->inline_depth --;
4404 if ((costs >= 0 && costs < 60) || inline_allways) {
4405 if (cfg->verbose_level > 2)
4406 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4408 mono_jit_stats.inlined_methods++;
4410 /* always add some code to avoid block split failures */
4411 MONO_INST_NEW (cfg, ins, OP_NOP);
4412 MONO_ADD_INS (prev_cbb, ins);
4414 prev_cbb->next_bb = sbblock;
4415 link_bblock (cfg, prev_cbb, sbblock);
4418 * Get rid of the begin and end bblocks if possible to aid local
4421 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4423 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4424 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4426 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4427 MonoBasicBlock *prev = ebblock->in_bb [0];
4428 mono_merge_basic_blocks (cfg, prev, ebblock);
4430 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4431 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4432 cfg->cbb = prev_cbb;
4440 * If the inlined method contains only a throw, then the ret var is not
4441 * set, so set it to a dummy value.
4444 static double r8_0 = 0.0;
4446 switch (rvar->type) {
4448 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4451 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4456 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4459 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4460 ins->type = STACK_R8;
4461 ins->inst_p0 = (void*)&r8_0;
4462 ins->dreg = rvar->dreg;
4463 MONO_ADD_INS (cfg->cbb, ins);
4466 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4469 g_assert_not_reached ();
4473 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4478 if (cfg->verbose_level > 2)
4479 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4480 cfg->exception_type = MONO_EXCEPTION_NONE;
4481 mono_loader_clear_error ();
4483 /* This gets rid of the newly added bblocks */
4484 cfg->cbb = prev_cbb;
4490 * Some of these comments may well be out-of-date.
4491 * Design decisions: we do a single pass over the IL code (and we do bblock
4492 * splitting/merging in the few cases when it's required: a back jump to an IL
4493 * address that was not already seen as bblock starting point).
4494 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4495 * Complex operations are decomposed in simpler ones right away. We need to let the
4496 * arch-specific code peek and poke inside this process somehow (except when the
4497 * optimizations can take advantage of the full semantic info of coarse opcodes).
4498 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4499 * MonoInst->opcode initially is the IL opcode or some simplification of that
4500 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4501 * opcode with value bigger than OP_LAST.
4502 * At this point the IR can be handed over to an interpreter, a dumb code generator
4503 * or to the optimizing code generator that will translate it to SSA form.
4505 * Profiling directed optimizations.
4506 * We may compile by default with few or no optimizations and instrument the code
4507 * or the user may indicate what methods to optimize the most either in a config file
4508 * or through repeated runs where the compiler applies offline the optimizations to
4509 * each method and then decides if it was worth it.
4512 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4513 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4514 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4515 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4516 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4517 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4518 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4519 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4521 /* offset from br.s -> br like opcodes */
4522 #define BIG_BRANCH_OFFSET 13
4525 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4527 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4529 return b == NULL || b == bb;
4533 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4535 unsigned char *ip = start;
4536 unsigned char *target;
4539 MonoBasicBlock *bblock;
4540 const MonoOpcode *opcode;
4543 cli_addr = ip - start;
4544 i = mono_opcode_value ((const guint8 **)&ip, end);
4547 opcode = &mono_opcodes [i];
4548 switch (opcode->argument) {
4549 case MonoInlineNone:
4552 case MonoInlineString:
4553 case MonoInlineType:
4554 case MonoInlineField:
4555 case MonoInlineMethod:
4558 case MonoShortInlineR:
4565 case MonoShortInlineVar:
4566 case MonoShortInlineI:
4569 case MonoShortInlineBrTarget:
4570 target = start + cli_addr + 2 + (signed char)ip [1];
4571 GET_BBLOCK (cfg, bblock, target);
4574 GET_BBLOCK (cfg, bblock, ip);
4576 case MonoInlineBrTarget:
4577 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4578 GET_BBLOCK (cfg, bblock, target);
4581 GET_BBLOCK (cfg, bblock, ip);
4583 case MonoInlineSwitch: {
4584 guint32 n = read32 (ip + 1);
4587 cli_addr += 5 + 4 * n;
4588 target = start + cli_addr;
4589 GET_BBLOCK (cfg, bblock, target);
4591 for (j = 0; j < n; ++j) {
4592 target = start + cli_addr + (gint32)read32 (ip);
4593 GET_BBLOCK (cfg, bblock, target);
4603 g_assert_not_reached ();
4606 if (i == CEE_THROW) {
4607 unsigned char *bb_start = ip - 1;
4609 /* Find the start of the bblock containing the throw */
4611 while ((bb_start >= start) && !bblock) {
4612 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4616 bblock->out_of_line = 1;
4625 static inline MonoMethod *
4626 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4630 if (m->wrapper_type != MONO_WRAPPER_NONE)
4631 return mono_method_get_wrapper_data (m, token);
4633 method = mono_get_method_full (m->klass->image, token, klass, context);
4638 static inline MonoMethod *
4639 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4641 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4643 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4649 static inline MonoClass*
4650 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4654 if (method->wrapper_type != MONO_WRAPPER_NONE)
4655 klass = mono_method_get_wrapper_data (method, token);
4657 klass = mono_class_get_full (method->klass->image, token, context);
4659 mono_class_init (klass);
4664 * Returns TRUE if the JIT should abort inlining because "callee"
4665 * is influenced by security attributes.
4668 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4672 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4676 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4677 if (result == MONO_JIT_SECURITY_OK)
4680 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4681 /* Generate code to throw a SecurityException before the actual call/link */
4682 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4685 NEW_ICONST (cfg, args [0], 4);
4686 NEW_METHODCONST (cfg, args [1], caller);
4687 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4688 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4689 /* don't hide previous results */
4690 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4691 cfg->exception_data = result;
4699 throw_exception (void)
4701 static MonoMethod *method = NULL;
4704 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4705 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4712 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4714 MonoMethod *thrower = throw_exception ();
4717 EMIT_NEW_PCONST (cfg, args [0], ex);
4718 mono_emit_method_call (cfg, thrower, args, NULL);
4722 * Return the original method is a wrapper is specified. We can only access
4723 * the custom attributes from the original method.
4726 get_original_method (MonoMethod *method)
4728 if (method->wrapper_type == MONO_WRAPPER_NONE)
4731 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4732 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4735 /* in other cases we need to find the original method */
4736 return mono_marshal_method_from_wrapper (method);
4740 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4741 MonoBasicBlock *bblock, unsigned char *ip)
4743 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4744 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4747 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4748 caller = get_original_method (caller);
4752 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4753 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4754 emit_throw_exception (cfg, mono_get_exception_field_access ());
4758 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4759 MonoBasicBlock *bblock, unsigned char *ip)
4761 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4762 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4765 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4766 caller = get_original_method (caller);
4770 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4771 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4772 emit_throw_exception (cfg, mono_get_exception_method_access ());
4776 * Check that the IL instructions at ip are the array initialization
4777 * sequence and return the pointer to the data and the size.
4780 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4783 * newarr[System.Int32]
4785 * ldtoken field valuetype ...
4786 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4788 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4789 guint32 token = read32 (ip + 7);
4790 guint32 field_token = read32 (ip + 2);
4791 guint32 field_index = field_token & 0xffffff;
4793 const char *data_ptr;
4795 MonoMethod *cmethod;
4796 MonoClass *dummy_class;
4797 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4803 *out_field_token = field_token;
4805 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4808 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4810 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4811 case MONO_TYPE_BOOLEAN:
4815 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4816 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4817 case MONO_TYPE_CHAR:
4827 return NULL; /* stupid ARM FP swapped format */
4837 if (size > mono_type_size (field->type, &dummy_align))
4840 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4841 if (!method->klass->image->dynamic) {
4842 field_index = read32 (ip + 2) & 0xffffff;
4843 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4844 data_ptr = mono_image_rva_map (method->klass->image, rva);
4845 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4846 /* for aot code we do the lookup on load */
4847 if (aot && data_ptr)
4848 return GUINT_TO_POINTER (rva);
4850 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4852 data_ptr = mono_field_get_data (field);
4860 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4862 char *method_fname = mono_method_full_name (method, TRUE);
4865 if (mono_method_get_header (method)->code_size == 0)
4866 method_code = g_strdup ("method body is empty.");
4868 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4869 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4870 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4871 g_free (method_fname);
4872 g_free (method_code);
4876 set_exception_object (MonoCompile *cfg, MonoException *exception)
4878 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4879 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4880 cfg->exception_ptr = exception;
4884 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4888 if (cfg->generic_sharing_context)
4889 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4891 type = &klass->byval_arg;
4892 return MONO_TYPE_IS_REFERENCE (type);
4896 * mono_decompose_array_access_opts:
4898 * Decompose array access opcodes.
4899 * This should be in decompose.c, but it emits calls so it has to stay here until
4900 * the old JIT is gone.
4903 mono_decompose_array_access_opts (MonoCompile *cfg)
4905 MonoBasicBlock *bb, *first_bb;
4908 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4909 * can be executed anytime. It should be run before decompose_long
4913 * Create a dummy bblock and emit code into it so we can use the normal
4914 * code generation macros.
4916 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4917 first_bb = cfg->cbb;
4919 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4921 MonoInst *prev = NULL;
4923 MonoInst *iargs [3];
4926 if (!bb->has_array_access)
4929 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4931 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4937 for (ins = bb->code; ins; ins = ins->next) {
4938 switch (ins->opcode) {
4940 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
4941 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4942 G_STRUCT_OFFSET (MonoArray, max_length));
4943 MONO_ADD_INS (cfg->cbb, dest);
4945 case OP_BOUNDS_CHECK:
4946 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1); \
4947 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4950 if (cfg->opt & MONO_OPT_SHARED) {
4951 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4952 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4953 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4954 iargs [2]->dreg = ins->sreg1;
4956 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4957 dest->dreg = ins->dreg;
4959 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4960 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (vtable, 1);
4962 g_assert (vtable); /*This shall not fail since we check for this condition on OP_NEWARR creation*/
4963 NEW_VTABLECONST (cfg, iargs [0], vtable);
4964 MONO_ADD_INS (cfg->cbb, iargs [0]);
4965 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4966 iargs [1]->dreg = ins->sreg1;
4969 dest = mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4971 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4972 dest->dreg = ins->dreg;
4976 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
4977 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4978 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4979 MONO_ADD_INS (cfg->cbb, dest);
4985 g_assert (cfg->cbb == first_bb);
4987 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4988 /* Replace the original instruction with the new code sequence */
4990 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4991 first_bb->code = first_bb->last_ins = NULL;
4992 first_bb->in_count = first_bb->out_count = 0;
4993 cfg->cbb = first_bb;
5000 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
5010 #ifdef MONO_ARCH_SOFT_FLOAT
5013 * mono_decompose_soft_float:
5015 * Soft float support on ARM. We store each double value in a pair of integer vregs,
5016 * similar to long support on 32 bit platforms. 32 bit float values require special
5017 * handling when used as locals, arguments, and in calls.
5018 * One big problem with soft-float is that there are few r4 test cases in our test suite.
5021 mono_decompose_soft_float (MonoCompile *cfg)
5023 MonoBasicBlock *bb, *first_bb;
5026 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
5030 * Create a dummy bblock and emit code into it so we can use the normal
5031 * code generation macros.
5033 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
5034 first_bb = cfg->cbb;
5036 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5038 MonoInst *prev = NULL;
5041 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
5043 cfg->cbb->code = cfg->cbb->last_ins = NULL;
5049 for (ins = bb->code; ins; ins = ins->next) {
5050 const char *spec = INS_INFO (ins->opcode);
5052 /* Most fp operations are handled automatically by opcode emulation */
5054 switch (ins->opcode) {
5057 d.vald = *(double*)ins->inst_p0;
5058 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5063 /* We load the r8 value */
5064 d.vald = *(float*)ins->inst_p0;
5065 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5069 ins->opcode = OP_LMOVE;
5072 ins->opcode = OP_MOVE;
5073 ins->sreg1 = ins->sreg1 + 1;
5076 ins->opcode = OP_MOVE;
5077 ins->sreg1 = ins->sreg1 + 2;
5080 int reg = ins->sreg1;
5082 ins->opcode = OP_SETLRET;
5084 ins->sreg1 = reg + 1;
5085 ins->sreg2 = reg + 2;
5088 case OP_LOADR8_MEMBASE:
5089 ins->opcode = OP_LOADI8_MEMBASE;
5091 case OP_STORER8_MEMBASE_REG:
5092 ins->opcode = OP_STOREI8_MEMBASE_REG;
5094 case OP_STORER4_MEMBASE_REG: {
5095 MonoInst *iargs [2];
5098 /* Arg 1 is the double value */
5099 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5100 iargs [0]->dreg = ins->sreg1;
5102 /* Arg 2 is the address to store to */
5103 addr_reg = mono_alloc_preg (cfg);
5104 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5105 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5109 case OP_LOADR4_MEMBASE: {
5110 MonoInst *iargs [1];
5114 addr_reg = mono_alloc_preg (cfg);
5115 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5116 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5117 conv->dreg = ins->dreg;
5122 case OP_FCALL_MEMBASE: {
5123 MonoCallInst *call = (MonoCallInst*)ins;
5124 if (call->signature->ret->type == MONO_TYPE_R4) {
5125 MonoCallInst *call2;
5126 MonoInst *iargs [1];
5129 /* Convert the call into a call returning an int */
5130 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5131 memcpy (call2, call, sizeof (MonoCallInst));
5132 switch (ins->opcode) {
5134 call2->inst.opcode = OP_CALL;
5137 call2->inst.opcode = OP_CALL_REG;
5139 case OP_FCALL_MEMBASE:
5140 call2->inst.opcode = OP_CALL_MEMBASE;
5143 g_assert_not_reached ();
5145 call2->inst.dreg = mono_alloc_ireg (cfg);
5146 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5148 /* FIXME: Optimize this */
5150 /* Emit an r4->r8 conversion */
5151 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5152 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5153 conv->dreg = ins->dreg;
5155 /* The call sequence might include fp ins */
5158 switch (ins->opcode) {
5160 ins->opcode = OP_LCALL;
5163 ins->opcode = OP_LCALL_REG;
5165 case OP_FCALL_MEMBASE:
5166 ins->opcode = OP_LCALL_MEMBASE;
5169 g_assert_not_reached ();
5175 MonoJitICallInfo *info;
5176 MonoInst *iargs [2];
5177 MonoInst *call, *cmp, *br;
5179 /* Convert fcompare+fbcc to icall+icompare+beq */
5181 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5184 /* Create dummy MonoInst's for the arguments */
5185 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5186 iargs [0]->dreg = ins->sreg1;
5187 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5188 iargs [1]->dreg = ins->sreg2;
5190 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5192 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5193 cmp->sreg1 = call->dreg;
5195 MONO_ADD_INS (cfg->cbb, cmp);
5197 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5198 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5199 br->inst_true_bb = ins->next->inst_true_bb;
5200 br->inst_false_bb = ins->next->inst_false_bb;
5201 MONO_ADD_INS (cfg->cbb, br);
5203 /* The call sequence might include fp ins */
5206 /* Skip fbcc or fccc */
5207 NULLIFY_INS (ins->next);
5215 MonoJitICallInfo *info;
5216 MonoInst *iargs [2];
5219 /* Convert fccc to icall+icompare+iceq */
5221 info = mono_find_jit_opcode_emulation (ins->opcode);
5224 /* Create dummy MonoInst's for the arguments */
5225 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5226 iargs [0]->dreg = ins->sreg1;
5227 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5228 iargs [1]->dreg = ins->sreg2;
5230 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5232 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5233 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5235 /* The call sequence might include fp ins */
5240 MonoInst *iargs [2];
5241 MonoInst *call, *cmp;
5243 /* Convert to icall+icompare+cond_exc+move */
5245 /* Create dummy MonoInst's for the arguments */
5246 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5247 iargs [0]->dreg = ins->sreg1;
5249 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5251 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5252 cmp->sreg1 = call->dreg;
5254 MONO_ADD_INS (cfg->cbb, cmp);
5256 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5258 /* Do the assignment if the value is finite */
5259 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5265 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5266 mono_print_ins (ins);
5267 g_assert_not_reached ();
5272 g_assert (cfg->cbb == first_bb);
5274 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5275 /* Replace the original instruction with the new code sequence */
5277 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5278 first_bb->code = first_bb->last_ins = NULL;
5279 first_bb->in_count = first_bb->out_count = 0;
5280 cfg->cbb = first_bb;
5287 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5290 mono_decompose_long_opts (cfg);
5296 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5299 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5300 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5301 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5302 /* Optimize reg-reg moves away */
5304 * Can't optimize other opcodes, since sp[0] might point to
5305 * the last ins of a decomposed opcode.
5307 sp [0]->dreg = (cfg)->locals [n]->dreg;
5309 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5314 * ldloca inhibits many optimizations so try to get rid of it in common
5317 static inline unsigned char *
5318 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5327 local = read16 (ip + 2);
5331 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5332 gboolean skip = FALSE;
5334 /* From the INITOBJ case */
5335 token = read32 (ip + 2);
5336 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5337 CHECK_TYPELOAD (klass);
5338 if (generic_class_is_reference_type (cfg, klass)) {
5339 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5340 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5341 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5342 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5343 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5356 is_exception_class (MonoClass *class)
5359 if (class == mono_defaults.exception_class)
5361 class = class->parent;
5367 * mono_method_to_ir:
5369 * Translate the .net IL into linear IR.
5372 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5373 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5374 guint inline_offset, gboolean is_virtual_call)
5376 MonoInst *ins, **sp, **stack_start;
5377 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5378 MonoMethod *cmethod, *method_definition;
5379 MonoInst **arg_array;
5380 MonoMethodHeader *header;
5382 guint32 token, ins_flag;
5384 MonoClass *constrained_call = NULL;
5385 unsigned char *ip, *end, *target, *err_pos;
5386 static double r8_0 = 0.0;
5387 MonoMethodSignature *sig;
5388 MonoGenericContext *generic_context = NULL;
5389 MonoGenericContainer *generic_container = NULL;
5390 MonoType **param_types;
5391 int i, n, start_new_bblock, dreg;
5392 int num_calls = 0, inline_costs = 0;
5393 int breakpoint_id = 0;
5395 MonoBoolean security, pinvoke;
5396 MonoSecurityManager* secman = NULL;
5397 MonoDeclSecurityActions actions;
5398 GSList *class_inits = NULL;
5399 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5401 gboolean init_locals, seq_points;
5403 /* serialization and xdomain stuff may need access to private fields and methods */
5404 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5405 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5406 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5407 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5408 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5409 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5411 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5413 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5414 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5415 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5416 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5418 image = method->klass->image;
5419 header = mono_method_get_header (method);
5420 generic_container = mono_method_get_generic_container (method);
5421 sig = mono_method_signature (method);
5422 num_args = sig->hasthis + sig->param_count;
5423 ip = (unsigned char*)header->code;
5424 cfg->cil_start = ip;
5425 end = ip + header->code_size;
5426 mono_jit_stats.cil_code_size += header->code_size;
5427 init_locals = header->init_locals;
5429 seq_points = cfg->gen_seq_points && cfg->method == method;
5432 * Methods without init_locals set could cause asserts in various passes
5437 method_definition = method;
5438 while (method_definition->is_inflated) {
5439 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5440 method_definition = imethod->declaring;
5443 /* SkipVerification is not allowed if core-clr is enabled */
5444 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5446 dont_verify_stloc = TRUE;
5449 if (!dont_verify && mini_method_verify (cfg, method_definition))
5450 goto exception_exit;
5452 if (mono_debug_using_mono_debugger ())
5453 cfg->keep_cil_nops = TRUE;
5455 if (sig->is_inflated)
5456 generic_context = mono_method_get_context (method);
5457 else if (generic_container)
5458 generic_context = &generic_container->context;
5459 cfg->generic_context = generic_context;
5461 if (!cfg->generic_sharing_context)
5462 g_assert (!sig->has_type_parameters);
5464 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5465 g_assert (method->is_inflated);
5466 g_assert (mono_method_get_context (method)->method_inst);
5468 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5469 g_assert (sig->generic_param_count);
5471 if (cfg->method == method) {
5472 cfg->real_offset = 0;
5474 cfg->real_offset = inline_offset;
5477 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5478 cfg->cil_offset_to_bb_len = header->code_size;
5480 cfg->current_method = method;
5482 if (cfg->verbose_level > 2)
5483 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5485 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5487 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5488 for (n = 0; n < sig->param_count; ++n)
5489 param_types [n + sig->hasthis] = sig->params [n];
5490 cfg->arg_types = param_types;
5492 dont_inline = g_list_prepend (dont_inline, method);
5493 if (cfg->method == method) {
5495 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5496 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5499 NEW_BBLOCK (cfg, start_bblock);
5500 cfg->bb_entry = start_bblock;
5501 start_bblock->cil_code = NULL;
5502 start_bblock->cil_length = 0;
5505 NEW_BBLOCK (cfg, end_bblock);
5506 cfg->bb_exit = end_bblock;
5507 end_bblock->cil_code = NULL;
5508 end_bblock->cil_length = 0;
5509 g_assert (cfg->num_bblocks == 2);
5511 arg_array = cfg->args;
5513 if (header->num_clauses) {
5514 cfg->spvars = g_hash_table_new (NULL, NULL);
5515 cfg->exvars = g_hash_table_new (NULL, NULL);
5517 /* handle exception clauses */
5518 for (i = 0; i < header->num_clauses; ++i) {
5519 MonoBasicBlock *try_bb;
5520 MonoExceptionClause *clause = &header->clauses [i];
5521 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5522 try_bb->real_offset = clause->try_offset;
5523 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5524 tblock->real_offset = clause->handler_offset;
5525 tblock->flags |= BB_EXCEPTION_HANDLER;
5527 link_bblock (cfg, try_bb, tblock);
5529 if (*(ip + clause->handler_offset) == CEE_POP)
5530 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5532 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5533 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5534 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5535 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5536 MONO_ADD_INS (tblock, ins);
5538 /* todo: is a fault block unsafe to optimize? */
5539 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5540 tblock->flags |= BB_EXCEPTION_UNSAFE;
5544 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5546 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5548 /* catch and filter blocks get the exception object on the stack */
5549 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5550 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5551 MonoInst *dummy_use;
5553 /* mostly like handle_stack_args (), but just sets the input args */
5554 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5555 tblock->in_scount = 1;
5556 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5557 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5560 * Add a dummy use for the exvar so its liveness info will be
5564 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5566 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5567 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5568 tblock->flags |= BB_EXCEPTION_HANDLER;
5569 tblock->real_offset = clause->data.filter_offset;
5570 tblock->in_scount = 1;
5571 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5572 /* The filter block shares the exvar with the handler block */
5573 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5574 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5575 MONO_ADD_INS (tblock, ins);
5579 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5580 clause->data.catch_class &&
5581 cfg->generic_sharing_context &&
5582 mono_class_check_context_used (clause->data.catch_class)) {
5584 * In shared generic code with catch
5585 * clauses containing type variables
5586 * the exception handling code has to
5587 * be able to get to the rgctx.
5588 * Therefore we have to make sure that
5589 * the vtable/mrgctx argument (for
5590 * static or generic methods) or the
5591 * "this" argument (for non-static
5592 * methods) are live.
5594 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5595 mini_method_get_context (method)->method_inst ||
5596 method->klass->valuetype) {
5597 mono_get_vtable_var (cfg);
5599 MonoInst *dummy_use;
5601 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5606 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5607 cfg->cbb = start_bblock;
5608 cfg->args = arg_array;
5609 mono_save_args (cfg, sig, inline_args);
5612 /* FIRST CODE BLOCK */
5613 NEW_BBLOCK (cfg, bblock);
5614 bblock->cil_code = ip;
5618 ADD_BBLOCK (cfg, bblock);
5620 if (cfg->method == method) {
5621 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5622 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5623 MONO_INST_NEW (cfg, ins, OP_BREAK);
5624 MONO_ADD_INS (bblock, ins);
5628 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5629 secman = mono_security_manager_get_methods ();
5631 security = (secman && mono_method_has_declsec (method));
5632 /* at this point having security doesn't mean we have any code to generate */
5633 if (security && (cfg->method == method)) {
5634 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5635 * And we do not want to enter the next section (with allocation) if we
5636 * have nothing to generate */
5637 security = mono_declsec_get_demands (method, &actions);
5640 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5641 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5643 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5644 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5645 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5647 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5648 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5652 mono_custom_attrs_free (custom);
5655 custom = mono_custom_attrs_from_class (wrapped->klass);
5656 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5660 mono_custom_attrs_free (custom);
5663 /* not a P/Invoke after all */
5668 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5669 /* we use a separate basic block for the initialization code */
5670 NEW_BBLOCK (cfg, init_localsbb);
5671 cfg->bb_init = init_localsbb;
5672 init_localsbb->real_offset = cfg->real_offset;
5673 start_bblock->next_bb = init_localsbb;
5674 init_localsbb->next_bb = bblock;
5675 link_bblock (cfg, start_bblock, init_localsbb);
5676 link_bblock (cfg, init_localsbb, bblock);
5678 cfg->cbb = init_localsbb;
5680 start_bblock->next_bb = bblock;
5681 link_bblock (cfg, start_bblock, bblock);
5684 /* at this point we know, if security is TRUE, that some code needs to be generated */
5685 if (security && (cfg->method == method)) {
5688 mono_jit_stats.cas_demand_generation++;
5690 if (actions.demand.blob) {
5691 /* Add code for SecurityAction.Demand */
5692 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5693 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5694 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5695 mono_emit_method_call (cfg, secman->demand, args, NULL);
5697 if (actions.noncasdemand.blob) {
5698 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5699 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5700 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5701 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5702 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5703 mono_emit_method_call (cfg, secman->demand, args, NULL);
5705 if (actions.demandchoice.blob) {
5706 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5707 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5708 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5709 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5710 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5714 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5716 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5719 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5720 /* check if this is native code, e.g. an icall or a p/invoke */
5721 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5722 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5724 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5725 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5727 /* if this ia a native call then it can only be JITted from platform code */
5728 if ((icall || pinvk) && method->klass && method->klass->image) {
5729 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5730 MonoException *ex = icall ? mono_get_exception_security () :
5731 mono_get_exception_method_access ();
5732 emit_throw_exception (cfg, ex);
5739 if (header->code_size == 0)
5742 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5747 if (cfg->method == method)
5748 mono_debug_init_method (cfg, bblock, breakpoint_id);
5750 for (n = 0; n < header->num_locals; ++n) {
5751 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5756 /* We force the vtable variable here for all shared methods
5757 for the possibility that they might show up in a stack
5758 trace where their exact instantiation is needed. */
5759 if (cfg->generic_sharing_context && method == cfg->method) {
5760 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5761 mini_method_get_context (method)->method_inst ||
5762 method->klass->valuetype) {
5763 mono_get_vtable_var (cfg);
5765 /* FIXME: Is there a better way to do this?
5766 We need the variable live for the duration
5767 of the whole method. */
5768 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5772 /* add a check for this != NULL to inlined methods */
5773 if (is_virtual_call) {
5776 NEW_ARGLOAD (cfg, arg_ins, 0);
5777 MONO_ADD_INS (cfg->cbb, arg_ins);
5778 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5781 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5782 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5785 start_new_bblock = 0;
5789 if (cfg->method == method)
5790 cfg->real_offset = ip - header->code;
5792 cfg->real_offset = inline_offset;
5797 if (start_new_bblock) {
5798 bblock->cil_length = ip - bblock->cil_code;
5799 if (start_new_bblock == 2) {
5800 g_assert (ip == tblock->cil_code);
5802 GET_BBLOCK (cfg, tblock, ip);
5804 bblock->next_bb = tblock;
5807 start_new_bblock = 0;
5808 for (i = 0; i < bblock->in_scount; ++i) {
5809 if (cfg->verbose_level > 3)
5810 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5811 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5815 g_slist_free (class_inits);
5818 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5819 link_bblock (cfg, bblock, tblock);
5820 if (sp != stack_start) {
5821 handle_stack_args (cfg, stack_start, sp - stack_start);
5823 CHECK_UNVERIFIABLE (cfg);
5825 bblock->next_bb = tblock;
5828 for (i = 0; i < bblock->in_scount; ++i) {
5829 if (cfg->verbose_level > 3)
5830 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5831 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5834 g_slist_free (class_inits);
5840 * Sequence points are points where the debugger can place a breakpoint.
5841 * Currently, we generate these automatically at points where the IL
5844 if (seq_points && sp == stack_start) {
5845 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5846 MONO_ADD_INS (cfg->cbb, ins);
5849 bblock->real_offset = cfg->real_offset;
5851 if ((cfg->method == method) && cfg->coverage_info) {
5852 guint32 cil_offset = ip - header->code;
5853 cfg->coverage_info->data [cil_offset].cil_code = ip;
5855 /* TODO: Use an increment here */
5856 #if defined(TARGET_X86)
5857 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5858 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5860 MONO_ADD_INS (cfg->cbb, ins);
5862 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5863 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5867 if (cfg->verbose_level > 3)
5868 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5872 if (cfg->keep_cil_nops)
5873 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5875 MONO_INST_NEW (cfg, ins, OP_NOP);
5877 MONO_ADD_INS (bblock, ins);
5880 MONO_INST_NEW (cfg, ins, OP_BREAK);
5882 MONO_ADD_INS (bblock, ins);
5888 CHECK_STACK_OVF (1);
5889 n = (*ip)-CEE_LDARG_0;
5891 EMIT_NEW_ARGLOAD (cfg, ins, n);
5899 CHECK_STACK_OVF (1);
5900 n = (*ip)-CEE_LDLOC_0;
5902 EMIT_NEW_LOCLOAD (cfg, ins, n);
5911 n = (*ip)-CEE_STLOC_0;
5914 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5916 emit_stloc_ir (cfg, sp, header, n);
5923 CHECK_STACK_OVF (1);
5926 EMIT_NEW_ARGLOAD (cfg, ins, n);
5932 CHECK_STACK_OVF (1);
5935 NEW_ARGLOADA (cfg, ins, n);
5936 MONO_ADD_INS (cfg->cbb, ins);
5946 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5948 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5953 CHECK_STACK_OVF (1);
5956 EMIT_NEW_LOCLOAD (cfg, ins, n);
5960 case CEE_LDLOCA_S: {
5961 unsigned char *tmp_ip;
5963 CHECK_STACK_OVF (1);
5964 CHECK_LOCAL (ip [1]);
5966 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5972 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5981 CHECK_LOCAL (ip [1]);
5982 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5984 emit_stloc_ir (cfg, sp, header, ip [1]);
5989 CHECK_STACK_OVF (1);
5990 EMIT_NEW_PCONST (cfg, ins, NULL);
5991 ins->type = STACK_OBJ;
5996 CHECK_STACK_OVF (1);
5997 EMIT_NEW_ICONST (cfg, ins, -1);
6010 CHECK_STACK_OVF (1);
6011 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6017 CHECK_STACK_OVF (1);
6019 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6025 CHECK_STACK_OVF (1);
6026 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6032 CHECK_STACK_OVF (1);
6033 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6034 ins->type = STACK_I8;
6035 ins->dreg = alloc_dreg (cfg, STACK_I8);
6037 ins->inst_l = (gint64)read64 (ip);
6038 MONO_ADD_INS (bblock, ins);
6044 gboolean use_aotconst = FALSE;
6046 #ifdef TARGET_POWERPC
6047 /* FIXME: Clean this up */
6048 if (cfg->compile_aot)
6049 use_aotconst = TRUE;
6052 /* FIXME: we should really allocate this only late in the compilation process */
6053 f = mono_domain_alloc (cfg->domain, sizeof (float));
6055 CHECK_STACK_OVF (1);
6061 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6063 dreg = alloc_freg (cfg);
6064 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6065 ins->type = STACK_R8;
6067 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6068 ins->type = STACK_R8;
6069 ins->dreg = alloc_dreg (cfg, STACK_R8);
6071 MONO_ADD_INS (bblock, ins);
6081 gboolean use_aotconst = FALSE;
6083 #ifdef TARGET_POWERPC
6084 /* FIXME: Clean this up */
6085 if (cfg->compile_aot)
6086 use_aotconst = TRUE;
6089 /* FIXME: we should really allocate this only late in the compilation process */
6090 d = mono_domain_alloc (cfg->domain, sizeof (double));
6092 CHECK_STACK_OVF (1);
6098 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6100 dreg = alloc_freg (cfg);
6101 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6102 ins->type = STACK_R8;
6104 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6105 ins->type = STACK_R8;
6106 ins->dreg = alloc_dreg (cfg, STACK_R8);
6108 MONO_ADD_INS (bblock, ins);
6117 MonoInst *temp, *store;
6119 CHECK_STACK_OVF (1);
6123 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6124 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6126 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6129 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6142 if (sp [0]->type == STACK_R8)
6143 /* we need to pop the value from the x86 FP stack */
6144 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6153 if (stack_start != sp)
6155 token = read32 (ip + 1);
6156 /* FIXME: check the signature matches */
6157 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6162 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6163 GENERIC_SHARING_FAILURE (CEE_JMP);
6165 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6166 CHECK_CFG_EXCEPTION;
6168 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6170 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6173 /* Handle tail calls similarly to calls */
6174 n = fsig->param_count + fsig->hasthis;
6176 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6177 call->method = cmethod;
6178 call->tail_call = TRUE;
6179 call->signature = mono_method_signature (cmethod);
6180 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6181 call->inst.inst_p0 = cmethod;
6182 for (i = 0; i < n; ++i)
6183 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6185 mono_arch_emit_call (cfg, call);
6186 MONO_ADD_INS (bblock, (MonoInst*)call);
6189 for (i = 0; i < num_args; ++i)
6190 /* Prevent arguments from being optimized away */
6191 arg_array [i]->flags |= MONO_INST_VOLATILE;
6193 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6194 ins = (MonoInst*)call;
6195 ins->inst_p0 = cmethod;
6196 MONO_ADD_INS (bblock, ins);
6200 start_new_bblock = 1;
6205 case CEE_CALLVIRT: {
6206 MonoInst *addr = NULL;
6207 MonoMethodSignature *fsig = NULL;
6209 int virtual = *ip == CEE_CALLVIRT;
6210 int calli = *ip == CEE_CALLI;
6211 gboolean pass_imt_from_rgctx = FALSE;
6212 MonoInst *imt_arg = NULL;
6213 gboolean pass_vtable = FALSE;
6214 gboolean pass_mrgctx = FALSE;
6215 MonoInst *vtable_arg = NULL;
6216 gboolean check_this = FALSE;
6217 gboolean supported_tail_call = FALSE;
6220 token = read32 (ip + 1);
6227 if (method->wrapper_type != MONO_WRAPPER_NONE)
6228 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6230 fsig = mono_metadata_parse_signature (image, token);
6232 n = fsig->param_count + fsig->hasthis;
6234 MonoMethod *cil_method;
6236 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6237 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6238 cil_method = cmethod;
6239 } else if (constrained_call) {
6240 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6242 * This is needed since get_method_constrained can't find
6243 * the method in klass representing a type var.
6244 * The type var is guaranteed to be a reference type in this
6247 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6248 cil_method = cmethod;
6249 g_assert (!cmethod->klass->valuetype);
6251 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6254 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6255 cil_method = cmethod;
6260 if (!dont_verify && !cfg->skip_visibility) {
6261 MonoMethod *target_method = cil_method;
6262 if (method->is_inflated) {
6263 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6265 if (!mono_method_can_access_method (method_definition, target_method) &&
6266 !mono_method_can_access_method (method, cil_method))
6267 METHOD_ACCESS_FAILURE;
6270 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6271 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6273 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6274 /* MS.NET seems to silently convert this to a callvirt */
6277 if (!cmethod->klass->inited)
6278 if (!mono_class_init (cmethod->klass))
6281 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6282 mini_class_is_system_array (cmethod->klass)) {
6283 array_rank = cmethod->klass->rank;
6284 fsig = mono_method_signature (cmethod);
6286 if (mono_method_signature (cmethod)->pinvoke) {
6287 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6288 check_for_pending_exc, FALSE);
6289 fsig = mono_method_signature (wrapper);
6290 } else if (constrained_call) {
6291 fsig = mono_method_signature (cmethod);
6293 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6297 mono_save_token_info (cfg, image, token, cil_method);
6299 n = fsig->param_count + fsig->hasthis;
6301 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6302 if (check_linkdemand (cfg, method, cmethod))
6304 CHECK_CFG_EXCEPTION;
6307 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6308 g_assert_not_reached ();
6311 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6314 if (!cfg->generic_sharing_context && cmethod)
6315 g_assert (!mono_method_check_context_used (cmethod));
6319 //g_assert (!virtual || fsig->hasthis);
6323 if (constrained_call) {
6325 * We have the `constrained.' prefix opcode.
6327 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6329 * The type parameter is instantiated as a valuetype,
6330 * but that type doesn't override the method we're
6331 * calling, so we need to box `this'.
6333 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6334 ins->klass = constrained_call;
6335 sp [0] = handle_box (cfg, ins, constrained_call);
6336 CHECK_CFG_EXCEPTION;
6337 } else if (!constrained_call->valuetype) {
6338 int dreg = alloc_preg (cfg);
6341 * The type parameter is instantiated as a reference
6342 * type. We have a managed pointer on the stack, so
6343 * we need to dereference it here.
6345 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6346 ins->type = STACK_OBJ;
6348 } else if (cmethod->klass->valuetype)
6350 constrained_call = NULL;
6353 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6357 * If the callee is a shared method, then its static cctor
6358 * might not get called after the call was patched.
6360 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6361 emit_generic_class_init (cfg, cmethod->klass);
6362 CHECK_TYPELOAD (cmethod->klass);
6365 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6366 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6367 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6368 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6369 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6372 * Pass vtable iff target method might
6373 * be shared, which means that sharing
6374 * is enabled for its class and its
6375 * context is sharable (and it's not a
6378 if (sharing_enabled && context_sharable &&
6379 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6383 if (cmethod && mini_method_get_context (cmethod) &&
6384 mini_method_get_context (cmethod)->method_inst) {
6385 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6386 MonoGenericContext *context = mini_method_get_context (cmethod);
6387 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6389 g_assert (!pass_vtable);
6391 if (sharing_enabled && context_sharable)
6395 if (cfg->generic_sharing_context && cmethod) {
6396 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6398 context_used = mono_method_check_context_used (cmethod);
6400 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6401 /* Generic method interface
6402 calls are resolved via a
6403 helper function and don't
6405 if (!cmethod_context || !cmethod_context->method_inst)
6406 pass_imt_from_rgctx = TRUE;
6410 * If a shared method calls another
6411 * shared method then the caller must
6412 * have a generic sharing context
6413 * because the magic trampoline
6414 * requires it. FIXME: We shouldn't
6415 * have to force the vtable/mrgctx
6416 * variable here. Instead there
6417 * should be a flag in the cfg to
6418 * request a generic sharing context.
6421 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6422 mono_get_vtable_var (cfg);
6427 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6429 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6431 CHECK_TYPELOAD (cmethod->klass);
6432 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6437 g_assert (!vtable_arg);
6439 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6441 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6442 MONO_METHOD_IS_FINAL (cmethod)) {
6449 if (pass_imt_from_rgctx) {
6450 g_assert (!pass_vtable);
6453 imt_arg = emit_get_rgctx_method (cfg, context_used,
6454 cmethod, MONO_RGCTX_INFO_METHOD);
6458 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6460 /* Calling virtual generic methods */
6461 if (cmethod && virtual &&
6462 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6463 !(MONO_METHOD_IS_FINAL (cmethod) &&
6464 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6465 mono_method_signature (cmethod)->generic_param_count) {
6466 MonoInst *this_temp, *this_arg_temp, *store;
6467 MonoInst *iargs [4];
6469 g_assert (mono_method_signature (cmethod)->is_inflated);
6471 /* Prevent inlining of methods that contain indirect calls */
6474 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6475 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6476 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6477 g_assert (!imt_arg);
6479 g_assert (cmethod->is_inflated);
6480 imt_arg = emit_get_rgctx_method (cfg, context_used,
6481 cmethod, MONO_RGCTX_INFO_METHOD);
6482 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6486 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6487 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6488 MONO_ADD_INS (bblock, store);
6490 /* FIXME: This should be a managed pointer */
6491 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6493 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6494 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6495 cmethod, MONO_RGCTX_INFO_METHOD);
6496 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6497 addr = mono_emit_jit_icall (cfg,
6498 mono_helper_compile_generic_method, iargs);
6500 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6502 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6505 if (!MONO_TYPE_IS_VOID (fsig->ret))
6506 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6513 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6514 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6516 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6520 /* FIXME: runtime generic context pointer for jumps? */
6521 /* FIXME: handle this for generic sharing eventually */
6522 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6525 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6528 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6529 /* Handle tail calls similarly to calls */
6530 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6532 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6533 call->tail_call = TRUE;
6534 call->method = cmethod;
6535 call->signature = mono_method_signature (cmethod);
6538 * We implement tail calls by storing the actual arguments into the
6539 * argument variables, then emitting a CEE_JMP.
6541 for (i = 0; i < n; ++i) {
6542 /* Prevent argument from being register allocated */
6543 arg_array [i]->flags |= MONO_INST_VOLATILE;
6544 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6548 ins = (MonoInst*)call;
6549 ins->inst_p0 = cmethod;
6550 ins->inst_p1 = arg_array [0];
6551 MONO_ADD_INS (bblock, ins);
6552 link_bblock (cfg, bblock, end_bblock);
6553 start_new_bblock = 1;
6554 /* skip CEE_RET as well */
6560 /* Conversion to a JIT intrinsic */
6561 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6562 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6563 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6574 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6575 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6576 mono_method_check_inlining (cfg, cmethod) &&
6577 !g_list_find (dont_inline, cmethod)) {
6579 gboolean allways = FALSE;
6581 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6582 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6583 /* Prevent inlining of methods that call wrappers */
6585 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6589 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6591 cfg->real_offset += 5;
6594 if (!MONO_TYPE_IS_VOID (fsig->ret))
6595 /* *sp is already set by inline_method */
6598 inline_costs += costs;
6604 inline_costs += 10 * num_calls++;
6606 /* Tail recursion elimination */
6607 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6608 gboolean has_vtargs = FALSE;
6611 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6614 /* keep it simple */
6615 for (i = fsig->param_count - 1; i >= 0; i--) {
6616 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6621 for (i = 0; i < n; ++i)
6622 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6623 MONO_INST_NEW (cfg, ins, OP_BR);
6624 MONO_ADD_INS (bblock, ins);
6625 tblock = start_bblock->out_bb [0];
6626 link_bblock (cfg, bblock, tblock);
6627 ins->inst_target_bb = tblock;
6628 start_new_bblock = 1;
6630 /* skip the CEE_RET, too */
6631 if (ip_in_bb (cfg, bblock, ip + 5))
6641 /* Generic sharing */
6642 /* FIXME: only do this for generic methods if
6643 they are not shared! */
6644 if (context_used && !imt_arg && !array_rank &&
6645 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6646 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6647 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6648 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6651 g_assert (cfg->generic_sharing_context && cmethod);
6655 * We are compiling a call to a
6656 * generic method from shared code,
6657 * which means that we have to look up
6658 * the method in the rgctx and do an
6661 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6664 /* Indirect calls */
6666 g_assert (!imt_arg);
6668 if (*ip == CEE_CALL)
6669 g_assert (context_used);
6670 else if (*ip == CEE_CALLI)
6671 g_assert (!vtable_arg);
6673 /* FIXME: what the hell is this??? */
6674 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6675 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6677 /* Prevent inlining of methods with indirect calls */
6681 #ifdef MONO_ARCH_RGCTX_REG
6683 int rgctx_reg = mono_alloc_preg (cfg);
6685 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6686 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6687 call = (MonoCallInst*)ins;
6688 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6689 cfg->uses_rgctx_reg = TRUE;
6690 call->rgctx_reg = TRUE;
6695 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6697 * Instead of emitting an indirect call, emit a direct call
6698 * with the contents of the aotconst as the patch info.
6700 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6702 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6703 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6706 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6709 if (!MONO_TYPE_IS_VOID (fsig->ret))
6710 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6721 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6722 if (sp [fsig->param_count]->type == STACK_OBJ) {
6723 MonoInst *iargs [2];
6726 iargs [1] = sp [fsig->param_count];
6728 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6731 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6732 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6733 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6734 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6736 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6739 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6740 if (!cmethod->klass->element_class->valuetype && !readonly)
6741 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6742 CHECK_TYPELOAD (cmethod->klass);
6745 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6748 g_assert_not_reached ();
6756 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6758 if (!MONO_TYPE_IS_VOID (fsig->ret))
6759 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6769 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6771 } else if (imt_arg) {
6772 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6774 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6777 if (!MONO_TYPE_IS_VOID (fsig->ret))
6778 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6785 if (cfg->method != method) {
6786 /* return from inlined method */
6788 * If in_count == 0, that means the ret is unreachable due to
6789 * being preceeded by a throw. In that case, inline_method () will
6790 * handle setting the return value
6791 * (test case: test_0_inline_throw ()).
6793 if (return_var && cfg->cbb->in_count) {
6797 //g_assert (returnvar != -1);
6798 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6799 cfg->ret_var_set = TRUE;
6803 MonoType *ret_type = mono_method_signature (method)->ret;
6805 g_assert (!return_var);
6808 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6811 if (!cfg->vret_addr) {
6814 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6816 EMIT_NEW_RETLOADA (cfg, ret_addr);
6818 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6819 ins->klass = mono_class_from_mono_type (ret_type);
6822 #ifdef MONO_ARCH_SOFT_FLOAT
6823 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6824 MonoInst *iargs [1];
6828 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6829 mono_arch_emit_setret (cfg, method, conv);
6831 mono_arch_emit_setret (cfg, method, *sp);
6834 mono_arch_emit_setret (cfg, method, *sp);
6839 if (sp != stack_start)
6841 MONO_INST_NEW (cfg, ins, OP_BR);
6843 ins->inst_target_bb = end_bblock;
6844 MONO_ADD_INS (bblock, ins);
6845 link_bblock (cfg, bblock, end_bblock);
6846 start_new_bblock = 1;
6850 MONO_INST_NEW (cfg, ins, OP_BR);
6852 target = ip + 1 + (signed char)(*ip);
6854 GET_BBLOCK (cfg, tblock, target);
6855 link_bblock (cfg, bblock, tblock);
6856 ins->inst_target_bb = tblock;
6857 if (sp != stack_start) {
6858 handle_stack_args (cfg, stack_start, sp - stack_start);
6860 CHECK_UNVERIFIABLE (cfg);
6862 MONO_ADD_INS (bblock, ins);
6863 start_new_bblock = 1;
6864 inline_costs += BRANCH_COST;
6878 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6880 target = ip + 1 + *(signed char*)ip;
6886 inline_costs += BRANCH_COST;
6890 MONO_INST_NEW (cfg, ins, OP_BR);
6893 target = ip + 4 + (gint32)read32(ip);
6895 GET_BBLOCK (cfg, tblock, target);
6896 link_bblock (cfg, bblock, tblock);
6897 ins->inst_target_bb = tblock;
6898 if (sp != stack_start) {
6899 handle_stack_args (cfg, stack_start, sp - stack_start);
6901 CHECK_UNVERIFIABLE (cfg);
6904 MONO_ADD_INS (bblock, ins);
6906 start_new_bblock = 1;
6907 inline_costs += BRANCH_COST;
6914 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6915 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6916 guint32 opsize = is_short ? 1 : 4;
6918 CHECK_OPSIZE (opsize);
6920 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6923 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6928 GET_BBLOCK (cfg, tblock, target);
6929 link_bblock (cfg, bblock, tblock);
6930 GET_BBLOCK (cfg, tblock, ip);
6931 link_bblock (cfg, bblock, tblock);
6933 if (sp != stack_start) {
6934 handle_stack_args (cfg, stack_start, sp - stack_start);
6935 CHECK_UNVERIFIABLE (cfg);
6938 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6939 cmp->sreg1 = sp [0]->dreg;
6940 type_from_op (cmp, sp [0], NULL);
6943 #if SIZEOF_REGISTER == 4
6944 if (cmp->opcode == OP_LCOMPARE_IMM) {
6945 /* Convert it to OP_LCOMPARE */
6946 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6947 ins->type = STACK_I8;
6948 ins->dreg = alloc_dreg (cfg, STACK_I8);
6950 MONO_ADD_INS (bblock, ins);
6951 cmp->opcode = OP_LCOMPARE;
6952 cmp->sreg2 = ins->dreg;
6955 MONO_ADD_INS (bblock, cmp);
6957 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6958 type_from_op (ins, sp [0], NULL);
6959 MONO_ADD_INS (bblock, ins);
6960 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6961 GET_BBLOCK (cfg, tblock, target);
6962 ins->inst_true_bb = tblock;
6963 GET_BBLOCK (cfg, tblock, ip);
6964 ins->inst_false_bb = tblock;
6965 start_new_bblock = 2;
6968 inline_costs += BRANCH_COST;
6983 MONO_INST_NEW (cfg, ins, *ip);
6985 target = ip + 4 + (gint32)read32(ip);
6991 inline_costs += BRANCH_COST;
6995 MonoBasicBlock **targets;
6996 MonoBasicBlock *default_bblock;
6997 MonoJumpInfoBBTable *table;
6998 int offset_reg = alloc_preg (cfg);
6999 int target_reg = alloc_preg (cfg);
7000 int table_reg = alloc_preg (cfg);
7001 int sum_reg = alloc_preg (cfg);
7002 gboolean use_op_switch;
7006 n = read32 (ip + 1);
7009 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7013 CHECK_OPSIZE (n * sizeof (guint32));
7014 target = ip + n * sizeof (guint32);
7016 GET_BBLOCK (cfg, default_bblock, target);
7018 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7019 for (i = 0; i < n; ++i) {
7020 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7021 targets [i] = tblock;
7025 if (sp != stack_start) {
7027 * Link the current bb with the targets as well, so handle_stack_args
7028 * will set their in_stack correctly.
7030 link_bblock (cfg, bblock, default_bblock);
7031 for (i = 0; i < n; ++i)
7032 link_bblock (cfg, bblock, targets [i]);
7034 handle_stack_args (cfg, stack_start, sp - stack_start);
7036 CHECK_UNVERIFIABLE (cfg);
7039 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7040 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7043 for (i = 0; i < n; ++i)
7044 link_bblock (cfg, bblock, targets [i]);
7046 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7047 table->table = targets;
7048 table->table_size = n;
7050 use_op_switch = FALSE;
7052 /* ARM implements SWITCH statements differently */
7053 /* FIXME: Make it use the generic implementation */
7054 if (!cfg->compile_aot)
7055 use_op_switch = TRUE;
7058 if (COMPILE_LLVM (cfg))
7059 use_op_switch = TRUE;
7061 cfg->cbb->has_jump_table = 1;
7063 if (use_op_switch) {
7064 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7065 ins->sreg1 = src1->dreg;
7066 ins->inst_p0 = table;
7067 ins->inst_many_bb = targets;
7068 ins->klass = GUINT_TO_POINTER (n);
7069 MONO_ADD_INS (cfg->cbb, ins);
7071 if (sizeof (gpointer) == 8)
7072 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7074 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7076 #if SIZEOF_REGISTER == 8
7077 /* The upper word might not be zero, and we add it to a 64 bit address later */
7078 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7081 if (cfg->compile_aot) {
7082 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7084 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7085 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7086 ins->inst_p0 = table;
7087 ins->dreg = table_reg;
7088 MONO_ADD_INS (cfg->cbb, ins);
7091 /* FIXME: Use load_memindex */
7092 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7093 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7094 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7096 start_new_bblock = 1;
7097 inline_costs += (BRANCH_COST * 2);
7117 dreg = alloc_freg (cfg);
7120 dreg = alloc_lreg (cfg);
7123 dreg = alloc_preg (cfg);
7126 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7127 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7128 ins->flags |= ins_flag;
7130 MONO_ADD_INS (bblock, ins);
7145 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7146 ins->flags |= ins_flag;
7148 MONO_ADD_INS (bblock, ins);
7150 #if HAVE_WRITE_BARRIERS
7151 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7152 /* insert call to write barrier */
7153 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7154 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7165 MONO_INST_NEW (cfg, ins, (*ip));
7167 ins->sreg1 = sp [0]->dreg;
7168 ins->sreg2 = sp [1]->dreg;
7169 type_from_op (ins, sp [0], sp [1]);
7171 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7173 /* Use the immediate opcodes if possible */
7174 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7175 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7176 if (imm_opcode != -1) {
7177 ins->opcode = imm_opcode;
7178 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7181 sp [1]->opcode = OP_NOP;
7185 MONO_ADD_INS ((cfg)->cbb, (ins));
7187 *sp++ = mono_decompose_opcode (cfg, ins);
7204 MONO_INST_NEW (cfg, ins, (*ip));
7206 ins->sreg1 = sp [0]->dreg;
7207 ins->sreg2 = sp [1]->dreg;
7208 type_from_op (ins, sp [0], sp [1]);
7210 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7211 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7213 /* FIXME: Pass opcode to is_inst_imm */
7215 /* Use the immediate opcodes if possible */
7216 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7219 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7220 if (imm_opcode != -1) {
7221 ins->opcode = imm_opcode;
7222 if (sp [1]->opcode == OP_I8CONST) {
7223 #if SIZEOF_REGISTER == 8
7224 ins->inst_imm = sp [1]->inst_l;
7226 ins->inst_ls_word = sp [1]->inst_ls_word;
7227 ins->inst_ms_word = sp [1]->inst_ms_word;
7231 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7234 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7235 if (sp [1]->next == NULL)
7236 sp [1]->opcode = OP_NOP;
7239 MONO_ADD_INS ((cfg)->cbb, (ins));
7241 *sp++ = mono_decompose_opcode (cfg, ins);
7254 case CEE_CONV_OVF_I8:
7255 case CEE_CONV_OVF_U8:
7259 /* Special case this earlier so we have long constants in the IR */
7260 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7261 int data = sp [-1]->inst_c0;
7262 sp [-1]->opcode = OP_I8CONST;
7263 sp [-1]->type = STACK_I8;
7264 #if SIZEOF_REGISTER == 8
7265 if ((*ip) == CEE_CONV_U8)
7266 sp [-1]->inst_c0 = (guint32)data;
7268 sp [-1]->inst_c0 = data;
7270 sp [-1]->inst_ls_word = data;
7271 if ((*ip) == CEE_CONV_U8)
7272 sp [-1]->inst_ms_word = 0;
7274 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7276 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7283 case CEE_CONV_OVF_I4:
7284 case CEE_CONV_OVF_I1:
7285 case CEE_CONV_OVF_I2:
7286 case CEE_CONV_OVF_I:
7287 case CEE_CONV_OVF_U:
7290 if (sp [-1]->type == STACK_R8) {
7291 ADD_UNOP (CEE_CONV_OVF_I8);
7298 case CEE_CONV_OVF_U1:
7299 case CEE_CONV_OVF_U2:
7300 case CEE_CONV_OVF_U4:
7303 if (sp [-1]->type == STACK_R8) {
7304 ADD_UNOP (CEE_CONV_OVF_U8);
7311 case CEE_CONV_OVF_I1_UN:
7312 case CEE_CONV_OVF_I2_UN:
7313 case CEE_CONV_OVF_I4_UN:
7314 case CEE_CONV_OVF_I8_UN:
7315 case CEE_CONV_OVF_U1_UN:
7316 case CEE_CONV_OVF_U2_UN:
7317 case CEE_CONV_OVF_U4_UN:
7318 case CEE_CONV_OVF_U8_UN:
7319 case CEE_CONV_OVF_I_UN:
7320 case CEE_CONV_OVF_U_UN:
7330 case CEE_ADD_OVF_UN:
7332 case CEE_MUL_OVF_UN:
7334 case CEE_SUB_OVF_UN:
7342 token = read32 (ip + 1);
7343 klass = mini_get_class (method, token, generic_context);
7344 CHECK_TYPELOAD (klass);
7346 if (generic_class_is_reference_type (cfg, klass)) {
7347 MonoInst *store, *load;
7348 int dreg = alloc_preg (cfg);
7350 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7351 load->flags |= ins_flag;
7352 MONO_ADD_INS (cfg->cbb, load);
7354 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7355 store->flags |= ins_flag;
7356 MONO_ADD_INS (cfg->cbb, store);
7358 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7370 token = read32 (ip + 1);
7371 klass = mini_get_class (method, token, generic_context);
7372 CHECK_TYPELOAD (klass);
7374 /* Optimize the common ldobj+stloc combination */
7384 loc_index = ip [5] - CEE_STLOC_0;
7391 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7392 CHECK_LOCAL (loc_index);
7394 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7395 ins->dreg = cfg->locals [loc_index]->dreg;
7401 /* Optimize the ldobj+stobj combination */
7402 /* The reference case ends up being a load+store anyway */
7403 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7408 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7415 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7424 CHECK_STACK_OVF (1);
7426 n = read32 (ip + 1);
7428 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7429 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7430 ins->type = STACK_OBJ;
7433 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7434 MonoInst *iargs [1];
7436 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7437 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7439 if (cfg->opt & MONO_OPT_SHARED) {
7440 MonoInst *iargs [3];
7442 if (cfg->compile_aot) {
7443 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7445 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7446 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7447 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7448 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7449 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7451 if (bblock->out_of_line) {
7452 MonoInst *iargs [2];
7454 if (image == mono_defaults.corlib) {
7456 * Avoid relocations in AOT and save some space by using a
7457 * version of helper_ldstr specialized to mscorlib.
7459 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7460 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7462 /* Avoid creating the string object */
7463 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7464 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7465 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7469 if (cfg->compile_aot) {
7470 NEW_LDSTRCONST (cfg, ins, image, n);
7472 MONO_ADD_INS (bblock, ins);
7475 NEW_PCONST (cfg, ins, NULL);
7476 ins->type = STACK_OBJ;
7477 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7479 MONO_ADD_INS (bblock, ins);
7488 MonoInst *iargs [2];
7489 MonoMethodSignature *fsig;
7492 MonoInst *vtable_arg = NULL;
7495 token = read32 (ip + 1);
7496 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7499 fsig = mono_method_get_signature (cmethod, image, token);
7501 mono_save_token_info (cfg, image, token, cmethod);
7503 if (!mono_class_init (cmethod->klass))
7506 if (cfg->generic_sharing_context)
7507 context_used = mono_method_check_context_used (cmethod);
7509 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7510 if (check_linkdemand (cfg, method, cmethod))
7512 CHECK_CFG_EXCEPTION;
7513 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7514 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7517 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7518 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7519 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7520 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7521 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7524 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7525 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7527 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7529 CHECK_TYPELOAD (cmethod->klass);
7530 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7535 n = fsig->param_count;
7539 * Generate smaller code for the common newobj <exception> instruction in
7540 * argument checking code.
7542 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7543 is_exception_class (cmethod->klass) && n <= 2 &&
7544 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7545 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7546 MonoInst *iargs [3];
7548 g_assert (!vtable_arg);
7552 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7555 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7559 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7564 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7567 g_assert_not_reached ();
7575 /* move the args to allow room for 'this' in the first position */
7581 /* check_call_signature () requires sp[0] to be set */
7582 this_ins.type = STACK_OBJ;
7584 if (check_call_signature (cfg, fsig, sp))
7589 if (mini_class_is_system_array (cmethod->klass)) {
7590 g_assert (!vtable_arg);
7592 *sp = emit_get_rgctx_method (cfg, context_used,
7593 cmethod, MONO_RGCTX_INFO_METHOD);
7595 /* Avoid varargs in the common case */
7596 if (fsig->param_count == 1)
7597 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7598 else if (fsig->param_count == 2)
7599 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7601 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7602 } else if (cmethod->string_ctor) {
7603 g_assert (!context_used);
7604 g_assert (!vtable_arg);
7605 /* we simply pass a null pointer */
7606 EMIT_NEW_PCONST (cfg, *sp, NULL);
7607 /* now call the string ctor */
7608 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7610 MonoInst* callvirt_this_arg = NULL;
7612 if (cmethod->klass->valuetype) {
7613 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7614 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7615 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7620 * The code generated by mini_emit_virtual_call () expects
7621 * iargs [0] to be a boxed instance, but luckily the vcall
7622 * will be transformed into a normal call there.
7624 } else if (context_used) {
7628 if (cfg->opt & MONO_OPT_SHARED)
7629 rgctx_info = MONO_RGCTX_INFO_KLASS;
7631 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7632 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7634 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7637 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7639 CHECK_TYPELOAD (cmethod->klass);
7642 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7643 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7644 * As a workaround, we call class cctors before allocating objects.
7646 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7647 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7648 if (cfg->verbose_level > 2)
7649 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7650 class_inits = g_slist_prepend (class_inits, vtable);
7653 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7656 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7659 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7661 /* Now call the actual ctor */
7662 /* Avoid virtual calls to ctors if possible */
7663 if (cmethod->klass->marshalbyref)
7664 callvirt_this_arg = sp [0];
7666 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7667 mono_method_check_inlining (cfg, cmethod) &&
7668 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7669 !g_list_find (dont_inline, cmethod)) {
7672 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7673 cfg->real_offset += 5;
7676 inline_costs += costs - 5;
7679 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7681 } else if (context_used &&
7682 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7683 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7684 MonoInst *cmethod_addr;
7686 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7687 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7689 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7692 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7693 callvirt_this_arg, NULL, vtable_arg);
7697 if (alloc == NULL) {
7699 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7700 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7714 token = read32 (ip + 1);
7715 klass = mini_get_class (method, token, generic_context);
7716 CHECK_TYPELOAD (klass);
7717 if (sp [0]->type != STACK_OBJ)
7720 if (cfg->generic_sharing_context)
7721 context_used = mono_class_check_context_used (klass);
7730 args [1] = emit_get_rgctx_klass (cfg, context_used,
7731 klass, MONO_RGCTX_INFO_KLASS);
7733 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7737 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7738 MonoMethod *mono_castclass;
7739 MonoInst *iargs [1];
7742 mono_castclass = mono_marshal_get_castclass (klass);
7745 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7746 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7747 g_assert (costs > 0);
7750 cfg->real_offset += 5;
7755 inline_costs += costs;
7758 ins = handle_castclass (cfg, klass, *sp);
7759 CHECK_CFG_EXCEPTION;
7769 token = read32 (ip + 1);
7770 klass = mini_get_class (method, token, generic_context);
7771 CHECK_TYPELOAD (klass);
7772 if (sp [0]->type != STACK_OBJ)
7775 if (cfg->generic_sharing_context)
7776 context_used = mono_class_check_context_used (klass);
7785 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7787 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7791 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7792 MonoMethod *mono_isinst;
7793 MonoInst *iargs [1];
7796 mono_isinst = mono_marshal_get_isinst (klass);
7799 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7800 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7801 g_assert (costs > 0);
7804 cfg->real_offset += 5;
7809 inline_costs += costs;
7812 ins = handle_isinst (cfg, klass, *sp);
7813 CHECK_CFG_EXCEPTION;
7820 case CEE_UNBOX_ANY: {
7824 token = read32 (ip + 1);
7825 klass = mini_get_class (method, token, generic_context);
7826 CHECK_TYPELOAD (klass);
7828 mono_save_token_info (cfg, image, token, klass);
7830 if (cfg->generic_sharing_context)
7831 context_used = mono_class_check_context_used (klass);
7833 if (generic_class_is_reference_type (cfg, klass)) {
7834 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7836 MonoInst *iargs [2];
7841 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7842 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7846 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7847 MonoMethod *mono_castclass;
7848 MonoInst *iargs [1];
7851 mono_castclass = mono_marshal_get_castclass (klass);
7854 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7855 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7857 g_assert (costs > 0);
7860 cfg->real_offset += 5;
7864 inline_costs += costs;
7866 ins = handle_castclass (cfg, klass, *sp);
7867 CHECK_CFG_EXCEPTION;
7875 if (mono_class_is_nullable (klass)) {
7876 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7883 ins = handle_unbox (cfg, klass, sp, context_used);
7889 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7902 token = read32 (ip + 1);
7903 klass = mini_get_class (method, token, generic_context);
7904 CHECK_TYPELOAD (klass);
7906 mono_save_token_info (cfg, image, token, klass);
7908 if (cfg->generic_sharing_context)
7909 context_used = mono_class_check_context_used (klass);
7911 if (generic_class_is_reference_type (cfg, klass)) {
7917 if (klass == mono_defaults.void_class)
7919 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7921 /* frequent check in generic code: box (struct), brtrue */
7922 if (!mono_class_is_nullable (klass) &&
7923 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7924 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7926 MONO_INST_NEW (cfg, ins, OP_BR);
7927 if (*ip == CEE_BRTRUE_S) {
7930 target = ip + 1 + (signed char)(*ip);
7935 target = ip + 4 + (gint)(read32 (ip));
7938 GET_BBLOCK (cfg, tblock, target);
7939 link_bblock (cfg, bblock, tblock);
7940 ins->inst_target_bb = tblock;
7941 GET_BBLOCK (cfg, tblock, ip);
7943 * This leads to some inconsistency, since the two bblocks are
7944 * not really connected, but it is needed for handling stack
7945 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7946 * FIXME: This should only be needed if sp != stack_start, but that
7947 * doesn't work for some reason (test failure in mcs/tests on x86).
7949 link_bblock (cfg, bblock, tblock);
7950 if (sp != stack_start) {
7951 handle_stack_args (cfg, stack_start, sp - stack_start);
7953 CHECK_UNVERIFIABLE (cfg);
7955 MONO_ADD_INS (bblock, ins);
7956 start_new_bblock = 1;
7964 if (cfg->opt & MONO_OPT_SHARED)
7965 rgctx_info = MONO_RGCTX_INFO_KLASS;
7967 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7968 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7969 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7971 *sp++ = handle_box (cfg, val, klass);
7974 CHECK_CFG_EXCEPTION;
7983 token = read32 (ip + 1);
7984 klass = mini_get_class (method, token, generic_context);
7985 CHECK_TYPELOAD (klass);
7987 mono_save_token_info (cfg, image, token, klass);
7989 if (cfg->generic_sharing_context)
7990 context_used = mono_class_check_context_used (klass);
7992 if (mono_class_is_nullable (klass)) {
7995 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7996 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8000 ins = handle_unbox (cfg, klass, sp, context_used);
8010 MonoClassField *field;
8014 if (*ip == CEE_STFLD) {
8021 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8023 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8026 token = read32 (ip + 1);
8027 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8028 field = mono_method_get_wrapper_data (method, token);
8029 klass = field->parent;
8032 field = mono_field_from_token (image, token, &klass, generic_context);
8036 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8037 FIELD_ACCESS_FAILURE;
8038 mono_class_init (klass);
8040 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8041 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8042 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8043 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8046 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8047 if (*ip == CEE_STFLD) {
8048 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8050 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8051 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8052 MonoInst *iargs [5];
8055 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8056 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8057 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8061 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8062 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8063 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8064 g_assert (costs > 0);
8066 cfg->real_offset += 5;
8069 inline_costs += costs;
8071 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8076 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8078 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8080 #if HAVE_WRITE_BARRIERS
8081 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8082 /* insert call to write barrier */
8083 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8084 MonoInst *iargs [2];
8087 dreg = alloc_preg (cfg);
8088 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8090 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8094 store->flags |= ins_flag;
8101 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8102 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8103 MonoInst *iargs [4];
8106 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8107 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8108 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8109 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8110 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8111 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8113 g_assert (costs > 0);
8115 cfg->real_offset += 5;
8119 inline_costs += costs;
8121 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8125 if (sp [0]->type == STACK_VTYPE) {
8128 /* Have to compute the address of the variable */
8130 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8132 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8134 g_assert (var->klass == klass);
8136 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8140 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8142 if (*ip == CEE_LDFLDA) {
8143 dreg = alloc_preg (cfg);
8145 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8146 ins->klass = mono_class_from_mono_type (field->type);
8147 ins->type = STACK_MP;
8152 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8153 load->flags |= ins_flag;
8164 MonoClassField *field;
8165 gpointer addr = NULL;
8166 gboolean is_special_static;
8169 token = read32 (ip + 1);
8171 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8172 field = mono_method_get_wrapper_data (method, token);
8173 klass = field->parent;
8176 field = mono_field_from_token (image, token, &klass, generic_context);
8179 mono_class_init (klass);
8180 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8181 FIELD_ACCESS_FAILURE;
8183 /* if the class is Critical then transparent code cannot access it's fields */
8184 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8185 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8188 * We can only support shared generic static
8189 * field access on architectures where the
8190 * trampoline code has been extended to handle
8191 * the generic class init.
8193 #ifndef MONO_ARCH_VTABLE_REG
8194 GENERIC_SHARING_FAILURE (*ip);
8197 if (cfg->generic_sharing_context)
8198 context_used = mono_class_check_context_used (klass);
8200 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8202 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8203 * to be called here.
8205 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8206 mono_class_vtable (cfg->domain, klass);
8207 CHECK_TYPELOAD (klass);
8209 mono_domain_lock (cfg->domain);
8210 if (cfg->domain->special_static_fields)
8211 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8212 mono_domain_unlock (cfg->domain);
8214 is_special_static = mono_class_field_is_special_static (field);
8216 /* Generate IR to compute the field address */
8218 if ((cfg->opt & MONO_OPT_SHARED) ||
8219 (cfg->compile_aot && is_special_static) ||
8220 (context_used && is_special_static)) {
8221 MonoInst *iargs [2];
8223 g_assert (field->parent);
8224 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8226 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8227 field, MONO_RGCTX_INFO_CLASS_FIELD);
8229 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8231 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8232 } else if (context_used) {
8233 MonoInst *static_data;
8236 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8237 method->klass->name_space, method->klass->name, method->name,
8238 depth, field->offset);
8241 if (mono_class_needs_cctor_run (klass, method)) {
8245 vtable = emit_get_rgctx_klass (cfg, context_used,
8246 klass, MONO_RGCTX_INFO_VTABLE);
8248 // FIXME: This doesn't work since it tries to pass the argument
8249 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8251 * The vtable pointer is always passed in a register regardless of
8252 * the calling convention, so assign it manually, and make a call
8253 * using a signature without parameters.
8255 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8256 #ifdef MONO_ARCH_VTABLE_REG
8257 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8258 cfg->uses_vtable_reg = TRUE;
8265 * The pointer we're computing here is
8267 * super_info.static_data + field->offset
8269 static_data = emit_get_rgctx_klass (cfg, context_used,
8270 klass, MONO_RGCTX_INFO_STATIC_DATA);
8272 if (field->offset == 0) {
8275 int addr_reg = mono_alloc_preg (cfg);
8276 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8278 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8279 MonoInst *iargs [2];
8281 g_assert (field->parent);
8282 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8283 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8284 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8286 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8288 CHECK_TYPELOAD (klass);
8290 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8291 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8292 if (cfg->verbose_level > 2)
8293 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8294 class_inits = g_slist_prepend (class_inits, vtable);
8296 if (cfg->run_cctors) {
8298 /* This makes so that inline cannot trigger */
8299 /* .cctors: too many apps depend on them */
8300 /* running with a specific order... */
8301 if (! vtable->initialized)
8303 ex = mono_runtime_class_init_full (vtable, FALSE);
8305 set_exception_object (cfg, ex);
8306 goto exception_exit;
8310 addr = (char*)vtable->data + field->offset;
8312 if (cfg->compile_aot)
8313 EMIT_NEW_SFLDACONST (cfg, ins, field);
8315 EMIT_NEW_PCONST (cfg, ins, addr);
8318 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8319 * This could be later optimized to do just a couple of
8320 * memory dereferences with constant offsets.
8322 MonoInst *iargs [1];
8323 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8324 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8328 /* Generate IR to do the actual load/store operation */
8330 if (*ip == CEE_LDSFLDA) {
8331 ins->klass = mono_class_from_mono_type (field->type);
8332 ins->type = STACK_PTR;
8334 } else if (*ip == CEE_STSFLD) {
8339 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8340 store->flags |= ins_flag;
8342 gboolean is_const = FALSE;
8343 MonoVTable *vtable = NULL;
8345 if (!context_used) {
8346 vtable = mono_class_vtable (cfg->domain, klass);
8347 CHECK_TYPELOAD (klass);
8349 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8350 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8351 gpointer addr = (char*)vtable->data + field->offset;
8352 int ro_type = field->type->type;
8353 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8354 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8356 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8359 case MONO_TYPE_BOOLEAN:
8361 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8365 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8368 case MONO_TYPE_CHAR:
8370 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8374 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8379 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8383 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8386 #ifndef HAVE_MOVING_COLLECTOR
8389 case MONO_TYPE_STRING:
8390 case MONO_TYPE_OBJECT:
8391 case MONO_TYPE_CLASS:
8392 case MONO_TYPE_SZARRAY:
8394 case MONO_TYPE_FNPTR:
8395 case MONO_TYPE_ARRAY:
8396 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8397 type_to_eval_stack_type ((cfg), field->type, *sp);
8403 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8408 case MONO_TYPE_VALUETYPE:
8418 CHECK_STACK_OVF (1);
8420 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8421 load->flags |= ins_flag;
8434 token = read32 (ip + 1);
8435 klass = mini_get_class (method, token, generic_context);
8436 CHECK_TYPELOAD (klass);
8437 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8438 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8449 const char *data_ptr;
8451 guint32 field_token;
8457 token = read32 (ip + 1);
8459 klass = mini_get_class (method, token, generic_context);
8460 CHECK_TYPELOAD (klass);
8462 if (cfg->generic_sharing_context)
8463 context_used = mono_class_check_context_used (klass);
8465 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8466 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8467 ins->sreg1 = sp [0]->dreg;
8468 ins->type = STACK_I4;
8469 ins->dreg = alloc_ireg (cfg);
8470 MONO_ADD_INS (cfg->cbb, ins);
8471 *sp = mono_decompose_opcode (cfg, ins);
8476 MonoClass *array_class = mono_array_class_get (klass, 1);
8477 /* FIXME: we cannot get a managed
8478 allocator because we can't get the
8479 open generic class's vtable. We
8480 have the same problem in
8481 handle_alloc_from_inst(). This
8482 needs to be solved so that we can
8483 have managed allocs of shared
8486 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8487 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8489 MonoMethod *managed_alloc = NULL;
8491 /* FIXME: Decompose later to help abcrem */
8494 args [0] = emit_get_rgctx_klass (cfg, context_used,
8495 array_class, MONO_RGCTX_INFO_VTABLE);
8500 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8502 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8504 if (cfg->opt & MONO_OPT_SHARED) {
8505 /* Decompose now to avoid problems with references to the domainvar */
8506 MonoInst *iargs [3];
8508 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8509 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8512 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8514 /* Decompose later since it is needed by abcrem */
8515 MonoClass *array_type = mono_array_class_get (klass, 1);
8516 mono_class_vtable (cfg->domain, array_type);
8517 CHECK_TYPELOAD (array_type);
8519 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8520 ins->dreg = alloc_preg (cfg);
8521 ins->sreg1 = sp [0]->dreg;
8522 ins->inst_newa_class = klass;
8523 ins->type = STACK_OBJ;
8525 MONO_ADD_INS (cfg->cbb, ins);
8526 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8527 cfg->cbb->has_array_access = TRUE;
8529 /* Needed so mono_emit_load_get_addr () gets called */
8530 mono_get_got_var (cfg);
8540 * we inline/optimize the initialization sequence if possible.
8541 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8542 * for small sizes open code the memcpy
8543 * ensure the rva field is big enough
8545 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8546 MonoMethod *memcpy_method = get_memcpy_method ();
8547 MonoInst *iargs [3];
8548 int add_reg = alloc_preg (cfg);
8550 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8551 if (cfg->compile_aot) {
8552 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8554 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8556 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8557 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8566 if (sp [0]->type != STACK_OBJ)
8569 dreg = alloc_preg (cfg);
8570 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8571 ins->dreg = alloc_preg (cfg);
8572 ins->sreg1 = sp [0]->dreg;
8573 ins->type = STACK_I4;
8574 MONO_ADD_INS (cfg->cbb, ins);
8575 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8576 cfg->cbb->has_array_access = TRUE;
8584 if (sp [0]->type != STACK_OBJ)
8587 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8589 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8590 CHECK_TYPELOAD (klass);
8591 /* we need to make sure that this array is exactly the type it needs
8592 * to be for correctness. the wrappers are lax with their usage
8593 * so we need to ignore them here
8595 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8596 MonoClass *array_class = mono_array_class_get (klass, 1);
8597 mini_emit_check_array_type (cfg, sp [0], array_class);
8598 CHECK_TYPELOAD (array_class);
8602 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8617 case CEE_LDELEM_REF: {
8623 if (*ip == CEE_LDELEM) {
8625 token = read32 (ip + 1);
8626 klass = mini_get_class (method, token, generic_context);
8627 CHECK_TYPELOAD (klass);
8628 mono_class_init (klass);
8631 klass = array_access_to_klass (*ip);
8633 if (sp [0]->type != STACK_OBJ)
8636 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8638 if (sp [1]->opcode == OP_ICONST) {
8639 int array_reg = sp [0]->dreg;
8640 int index_reg = sp [1]->dreg;
8641 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8643 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8644 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8646 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8647 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8650 if (*ip == CEE_LDELEM)
8663 case CEE_STELEM_REF:
8670 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8672 if (*ip == CEE_STELEM) {
8674 token = read32 (ip + 1);
8675 klass = mini_get_class (method, token, generic_context);
8676 CHECK_TYPELOAD (klass);
8677 mono_class_init (klass);
8680 klass = array_access_to_klass (*ip);
8682 if (sp [0]->type != STACK_OBJ)
8685 /* storing a NULL doesn't need any of the complex checks in stelemref */
8686 if (generic_class_is_reference_type (cfg, klass) &&
8687 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8688 MonoMethod* helper = mono_marshal_get_stelemref ();
8689 MonoInst *iargs [3];
8691 if (sp [0]->type != STACK_OBJ)
8693 if (sp [2]->type != STACK_OBJ)
8700 mono_emit_method_call (cfg, helper, iargs, NULL);
8702 if (sp [1]->opcode == OP_ICONST) {
8703 int array_reg = sp [0]->dreg;
8704 int index_reg = sp [1]->dreg;
8705 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8707 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8708 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8710 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8711 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8715 if (*ip == CEE_STELEM)
8722 case CEE_CKFINITE: {
8726 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8727 ins->sreg1 = sp [0]->dreg;
8728 ins->dreg = alloc_freg (cfg);
8729 ins->type = STACK_R8;
8730 MONO_ADD_INS (bblock, ins);
8732 *sp++ = mono_decompose_opcode (cfg, ins);
8737 case CEE_REFANYVAL: {
8738 MonoInst *src_var, *src;
8740 int klass_reg = alloc_preg (cfg);
8741 int dreg = alloc_preg (cfg);
8744 MONO_INST_NEW (cfg, ins, *ip);
8747 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8748 CHECK_TYPELOAD (klass);
8749 mono_class_init (klass);
8751 if (cfg->generic_sharing_context)
8752 context_used = mono_class_check_context_used (klass);
8755 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8757 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8758 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8759 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8762 MonoInst *klass_ins;
8764 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8765 klass, MONO_RGCTX_INFO_KLASS);
8768 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8769 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8771 mini_emit_class_check (cfg, klass_reg, klass);
8773 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8774 ins->type = STACK_MP;
8779 case CEE_MKREFANY: {
8780 MonoInst *loc, *addr;
8783 MONO_INST_NEW (cfg, ins, *ip);
8786 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8787 CHECK_TYPELOAD (klass);
8788 mono_class_init (klass);
8790 if (cfg->generic_sharing_context)
8791 context_used = mono_class_check_context_used (klass);
8793 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8794 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8797 MonoInst *const_ins;
8798 int type_reg = alloc_preg (cfg);
8800 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8801 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8802 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8803 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8804 } else if (cfg->compile_aot) {
8805 int const_reg = alloc_preg (cfg);
8806 int type_reg = alloc_preg (cfg);
8808 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8809 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8811 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8813 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8814 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8816 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8818 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8819 ins->type = STACK_VTYPE;
8820 ins->klass = mono_defaults.typed_reference_class;
8827 MonoClass *handle_class;
8829 CHECK_STACK_OVF (1);
8832 n = read32 (ip + 1);
8834 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8835 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8836 handle = mono_method_get_wrapper_data (method, n);
8837 handle_class = mono_method_get_wrapper_data (method, n + 1);
8838 if (handle_class == mono_defaults.typehandle_class)
8839 handle = &((MonoClass*)handle)->byval_arg;
8842 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8846 mono_class_init (handle_class);
8847 if (cfg->generic_sharing_context) {
8848 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8849 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8850 /* This case handles ldtoken
8851 of an open type, like for
8854 } else if (handle_class == mono_defaults.typehandle_class) {
8855 /* If we get a MONO_TYPE_CLASS
8856 then we need to provide the
8858 instantiation of it. */
8859 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8862 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8863 } else if (handle_class == mono_defaults.fieldhandle_class)
8864 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8865 else if (handle_class == mono_defaults.methodhandle_class)
8866 context_used = mono_method_check_context_used (handle);
8868 g_assert_not_reached ();
8871 if ((cfg->opt & MONO_OPT_SHARED) &&
8872 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8873 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8874 MonoInst *addr, *vtvar, *iargs [3];
8875 int method_context_used;
8877 if (cfg->generic_sharing_context)
8878 method_context_used = mono_method_check_context_used (method);
8880 method_context_used = 0;
8882 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8884 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8885 EMIT_NEW_ICONST (cfg, iargs [1], n);
8886 if (method_context_used) {
8887 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8888 method, MONO_RGCTX_INFO_METHOD);
8889 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8891 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8892 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8894 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8898 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8900 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8901 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8902 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8903 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8904 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8905 MonoClass *tclass = mono_class_from_mono_type (handle);
8907 mono_class_init (tclass);
8909 ins = emit_get_rgctx_klass (cfg, context_used,
8910 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8911 } else if (cfg->compile_aot) {
8912 if (method->wrapper_type) {
8913 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8914 /* Special case for static synchronized wrappers */
8915 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8917 /* FIXME: n is not a normal token */
8918 cfg->disable_aot = TRUE;
8919 EMIT_NEW_PCONST (cfg, ins, NULL);
8922 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8925 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8927 ins->type = STACK_OBJ;
8928 ins->klass = cmethod->klass;
8931 MonoInst *addr, *vtvar;
8933 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8936 if (handle_class == mono_defaults.typehandle_class) {
8937 ins = emit_get_rgctx_klass (cfg, context_used,
8938 mono_class_from_mono_type (handle),
8939 MONO_RGCTX_INFO_TYPE);
8940 } else if (handle_class == mono_defaults.methodhandle_class) {
8941 ins = emit_get_rgctx_method (cfg, context_used,
8942 handle, MONO_RGCTX_INFO_METHOD);
8943 } else if (handle_class == mono_defaults.fieldhandle_class) {
8944 ins = emit_get_rgctx_field (cfg, context_used,
8945 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8947 g_assert_not_reached ();
8949 } else if (cfg->compile_aot) {
8950 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8952 EMIT_NEW_PCONST (cfg, ins, handle);
8954 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8955 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8956 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8966 MONO_INST_NEW (cfg, ins, OP_THROW);
8968 ins->sreg1 = sp [0]->dreg;
8970 bblock->out_of_line = TRUE;
8971 MONO_ADD_INS (bblock, ins);
8972 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8973 MONO_ADD_INS (bblock, ins);
8976 link_bblock (cfg, bblock, end_bblock);
8977 start_new_bblock = 1;
8979 case CEE_ENDFINALLY:
8980 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8981 MONO_ADD_INS (bblock, ins);
8983 start_new_bblock = 1;
8986 * Control will leave the method so empty the stack, otherwise
8987 * the next basic block will start with a nonempty stack.
8989 while (sp != stack_start) {
8997 if (*ip == CEE_LEAVE) {
8999 target = ip + 5 + (gint32)read32(ip + 1);
9002 target = ip + 2 + (signed char)(ip [1]);
9005 /* empty the stack */
9006 while (sp != stack_start) {
9011 * If this leave statement is in a catch block, check for a
9012 * pending exception, and rethrow it if necessary.
9013 * We avoid doing this in runtime invoke wrappers, since those are called
9014 * by native code which excepts the wrapper to catch all exceptions.
9016 for (i = 0; i < header->num_clauses; ++i) {
9017 MonoExceptionClause *clause = &header->clauses [i];
9020 * Use <= in the final comparison to handle clauses with multiple
9021 * leave statements, like in bug #78024.
9022 * The ordering of the exception clauses guarantees that we find the
9025 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9027 MonoBasicBlock *dont_throw;
9032 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9035 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9037 NEW_BBLOCK (cfg, dont_throw);
9040 * Currently, we allways rethrow the abort exception, despite the
9041 * fact that this is not correct. See thread6.cs for an example.
9042 * But propagating the abort exception is more important than
9043 * getting the sematics right.
9045 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9046 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9047 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9049 MONO_START_BB (cfg, dont_throw);
9054 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9056 for (tmp = handlers; tmp; tmp = tmp->next) {
9058 link_bblock (cfg, bblock, tblock);
9059 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9060 ins->inst_target_bb = tblock;
9061 MONO_ADD_INS (bblock, ins);
9062 bblock->has_call_handler = 1;
9063 if (COMPILE_LLVM (cfg)) {
9064 MonoBasicBlock *target_bb;
9067 * Link the finally bblock with the target, since it will
9068 * conceptually branch there.
9069 * FIXME: Have to link the bblock containing the endfinally.
9071 GET_BBLOCK (cfg, target_bb, target);
9072 link_bblock (cfg, tblock, target_bb);
9075 g_list_free (handlers);
9078 MONO_INST_NEW (cfg, ins, OP_BR);
9079 MONO_ADD_INS (bblock, ins);
9080 GET_BBLOCK (cfg, tblock, target);
9081 link_bblock (cfg, bblock, tblock);
9082 ins->inst_target_bb = tblock;
9083 start_new_bblock = 1;
9085 if (*ip == CEE_LEAVE)
9094 * Mono specific opcodes
9096 case MONO_CUSTOM_PREFIX: {
9098 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9102 case CEE_MONO_ICALL: {
9104 MonoJitICallInfo *info;
9106 token = read32 (ip + 2);
9107 func = mono_method_get_wrapper_data (method, token);
9108 info = mono_find_jit_icall_by_addr (func);
9111 CHECK_STACK (info->sig->param_count);
9112 sp -= info->sig->param_count;
9114 ins = mono_emit_jit_icall (cfg, info->func, sp);
9115 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9119 inline_costs += 10 * num_calls++;
9123 case CEE_MONO_LDPTR: {
9126 CHECK_STACK_OVF (1);
9128 token = read32 (ip + 2);
9130 ptr = mono_method_get_wrapper_data (method, token);
9131 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9132 MonoJitICallInfo *callinfo;
9133 const char *icall_name;
9135 icall_name = method->name + strlen ("__icall_wrapper_");
9136 g_assert (icall_name);
9137 callinfo = mono_find_jit_icall_by_name (icall_name);
9138 g_assert (callinfo);
9140 if (ptr == callinfo->func) {
9141 /* Will be transformed into an AOTCONST later */
9142 EMIT_NEW_PCONST (cfg, ins, ptr);
9148 /* FIXME: Generalize this */
9149 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9150 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9155 EMIT_NEW_PCONST (cfg, ins, ptr);
9158 inline_costs += 10 * num_calls++;
9159 /* Can't embed random pointers into AOT code */
9160 cfg->disable_aot = 1;
9163 case CEE_MONO_ICALL_ADDR: {
9164 MonoMethod *cmethod;
9167 CHECK_STACK_OVF (1);
9169 token = read32 (ip + 2);
9171 cmethod = mono_method_get_wrapper_data (method, token);
9173 if (cfg->compile_aot) {
9174 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9176 ptr = mono_lookup_internal_call (cmethod);
9178 EMIT_NEW_PCONST (cfg, ins, ptr);
9184 case CEE_MONO_VTADDR: {
9185 MonoInst *src_var, *src;
9191 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9192 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9197 case CEE_MONO_NEWOBJ: {
9198 MonoInst *iargs [2];
9200 CHECK_STACK_OVF (1);
9202 token = read32 (ip + 2);
9203 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9204 mono_class_init (klass);
9205 NEW_DOMAINCONST (cfg, iargs [0]);
9206 MONO_ADD_INS (cfg->cbb, iargs [0]);
9207 NEW_CLASSCONST (cfg, iargs [1], klass);
9208 MONO_ADD_INS (cfg->cbb, iargs [1]);
9209 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9211 inline_costs += 10 * num_calls++;
9214 case CEE_MONO_OBJADDR:
9217 MONO_INST_NEW (cfg, ins, OP_MOVE);
9218 ins->dreg = alloc_preg (cfg);
9219 ins->sreg1 = sp [0]->dreg;
9220 ins->type = STACK_MP;
9221 MONO_ADD_INS (cfg->cbb, ins);
9225 case CEE_MONO_LDNATIVEOBJ:
9227 * Similar to LDOBJ, but instead load the unmanaged
9228 * representation of the vtype to the stack.
9233 token = read32 (ip + 2);
9234 klass = mono_method_get_wrapper_data (method, token);
9235 g_assert (klass->valuetype);
9236 mono_class_init (klass);
9239 MonoInst *src, *dest, *temp;
9242 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9243 temp->backend.is_pinvoke = 1;
9244 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9245 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9247 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9248 dest->type = STACK_VTYPE;
9249 dest->klass = klass;
9255 case CEE_MONO_RETOBJ: {
9257 * Same as RET, but return the native representation of a vtype
9260 g_assert (cfg->ret);
9261 g_assert (mono_method_signature (method)->pinvoke);
9266 token = read32 (ip + 2);
9267 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9269 if (!cfg->vret_addr) {
9270 g_assert (cfg->ret_var_is_local);
9272 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9274 EMIT_NEW_RETLOADA (cfg, ins);
9276 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9278 if (sp != stack_start)
9281 MONO_INST_NEW (cfg, ins, OP_BR);
9282 ins->inst_target_bb = end_bblock;
9283 MONO_ADD_INS (bblock, ins);
9284 link_bblock (cfg, bblock, end_bblock);
9285 start_new_bblock = 1;
9289 case CEE_MONO_CISINST:
9290 case CEE_MONO_CCASTCLASS: {
9295 token = read32 (ip + 2);
9296 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9297 if (ip [1] == CEE_MONO_CISINST)
9298 ins = handle_cisinst (cfg, klass, sp [0]);
9300 ins = handle_ccastclass (cfg, klass, sp [0]);
9306 case CEE_MONO_SAVE_LMF:
9307 case CEE_MONO_RESTORE_LMF:
9308 #ifdef MONO_ARCH_HAVE_LMF_OPS
9309 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9310 MONO_ADD_INS (bblock, ins);
9311 cfg->need_lmf_area = TRUE;
9315 case CEE_MONO_CLASSCONST:
9316 CHECK_STACK_OVF (1);
9318 token = read32 (ip + 2);
9319 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9322 inline_costs += 10 * num_calls++;
9324 case CEE_MONO_NOT_TAKEN:
9325 bblock->out_of_line = TRUE;
9329 CHECK_STACK_OVF (1);
9331 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9332 ins->dreg = alloc_preg (cfg);
9333 ins->inst_offset = (gint32)read32 (ip + 2);
9334 ins->type = STACK_PTR;
9335 MONO_ADD_INS (bblock, ins);
9339 case CEE_MONO_DYN_CALL: {
9342 /* It would be easier to call a trampoline, but that would put an
9343 * extra frame on the stack, confusing exception handling. So
9344 * implement it inline using an opcode for now.
9347 if (!cfg->dyn_call_var) {
9348 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9349 /* prevent it from being register allocated */
9350 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9353 /* Has to use a call inst since it local regalloc expects it */
9354 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9355 ins = (MonoInst*)call;
9357 ins->sreg1 = sp [0]->dreg;
9358 ins->sreg2 = sp [1]->dreg;
9359 MONO_ADD_INS (bblock, ins);
9361 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9362 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9366 inline_costs += 10 * num_calls++;
9371 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9381 /* somewhat similar to LDTOKEN */
9382 MonoInst *addr, *vtvar;
9383 CHECK_STACK_OVF (1);
9384 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9386 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9387 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9389 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9390 ins->type = STACK_VTYPE;
9391 ins->klass = mono_defaults.argumenthandle_class;
9404 * The following transforms:
9405 * CEE_CEQ into OP_CEQ
9406 * CEE_CGT into OP_CGT
9407 * CEE_CGT_UN into OP_CGT_UN
9408 * CEE_CLT into OP_CLT
9409 * CEE_CLT_UN into OP_CLT_UN
9411 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9413 MONO_INST_NEW (cfg, ins, cmp->opcode);
9415 cmp->sreg1 = sp [0]->dreg;
9416 cmp->sreg2 = sp [1]->dreg;
9417 type_from_op (cmp, sp [0], sp [1]);
9419 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9420 cmp->opcode = OP_LCOMPARE;
9421 else if (sp [0]->type == STACK_R8)
9422 cmp->opcode = OP_FCOMPARE;
9424 cmp->opcode = OP_ICOMPARE;
9425 MONO_ADD_INS (bblock, cmp);
9426 ins->type = STACK_I4;
9427 ins->dreg = alloc_dreg (cfg, ins->type);
9428 type_from_op (ins, sp [0], sp [1]);
9430 if (cmp->opcode == OP_FCOMPARE) {
9432 * The backends expect the fceq opcodes to do the
9435 cmp->opcode = OP_NOP;
9436 ins->sreg1 = cmp->sreg1;
9437 ins->sreg2 = cmp->sreg2;
9439 MONO_ADD_INS (bblock, ins);
9446 MonoMethod *cil_method;
9447 gboolean needs_static_rgctx_invoke;
9448 int invoke_context_used = 0;
9450 CHECK_STACK_OVF (1);
9452 n = read32 (ip + 2);
9453 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9456 mono_class_init (cmethod->klass);
9458 mono_save_token_info (cfg, image, n, cmethod);
9460 if (cfg->generic_sharing_context)
9461 context_used = mono_method_check_context_used (cmethod);
9463 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9465 cil_method = cmethod;
9466 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9467 METHOD_ACCESS_FAILURE;
9469 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9470 if (check_linkdemand (cfg, method, cmethod))
9472 CHECK_CFG_EXCEPTION;
9473 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9474 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9478 * Optimize the common case of ldftn+delegate creation
9480 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9481 /* FIXME: SGEN support */
9482 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9483 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9484 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9485 MonoInst *target_ins;
9488 invoke = mono_get_delegate_invoke (ctor_method->klass);
9489 if (!invoke || !mono_method_signature (invoke))
9492 if (cfg->generic_sharing_context)
9493 invoke_context_used = mono_method_check_context_used (invoke);
9495 if (invoke_context_used == 0) {
9497 if (cfg->verbose_level > 3)
9498 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9499 target_ins = sp [-1];
9501 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9502 CHECK_CFG_EXCEPTION;
9511 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9512 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9516 inline_costs += 10 * num_calls++;
9519 case CEE_LDVIRTFTN: {
9524 n = read32 (ip + 2);
9525 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9528 mono_class_init (cmethod->klass);
9530 if (cfg->generic_sharing_context)
9531 context_used = mono_method_check_context_used (cmethod);
9533 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9534 if (check_linkdemand (cfg, method, cmethod))
9536 CHECK_CFG_EXCEPTION;
9537 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9538 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9544 args [1] = emit_get_rgctx_method (cfg, context_used,
9545 cmethod, MONO_RGCTX_INFO_METHOD);
9548 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9550 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9553 inline_costs += 10 * num_calls++;
9557 CHECK_STACK_OVF (1);
9559 n = read16 (ip + 2);
9561 EMIT_NEW_ARGLOAD (cfg, ins, n);
9566 CHECK_STACK_OVF (1);
9568 n = read16 (ip + 2);
9570 NEW_ARGLOADA (cfg, ins, n);
9571 MONO_ADD_INS (cfg->cbb, ins);
9579 n = read16 (ip + 2);
9581 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9583 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9587 CHECK_STACK_OVF (1);
9589 n = read16 (ip + 2);
9591 EMIT_NEW_LOCLOAD (cfg, ins, n);
9596 unsigned char *tmp_ip;
9597 CHECK_STACK_OVF (1);
9599 n = read16 (ip + 2);
9602 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9608 EMIT_NEW_LOCLOADA (cfg, ins, n);
9617 n = read16 (ip + 2);
9619 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9621 emit_stloc_ir (cfg, sp, header, n);
9628 if (sp != stack_start)
9630 if (cfg->method != method)
9632 * Inlining this into a loop in a parent could lead to
9633 * stack overflows which is different behavior than the
9634 * non-inlined case, thus disable inlining in this case.
9636 goto inline_failure;
9638 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9639 ins->dreg = alloc_preg (cfg);
9640 ins->sreg1 = sp [0]->dreg;
9641 ins->type = STACK_PTR;
9642 MONO_ADD_INS (cfg->cbb, ins);
9644 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9646 ins->flags |= MONO_INST_INIT;
9651 case CEE_ENDFILTER: {
9652 MonoExceptionClause *clause, *nearest;
9653 int cc, nearest_num;
9657 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9659 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9660 ins->sreg1 = (*sp)->dreg;
9661 MONO_ADD_INS (bblock, ins);
9662 start_new_bblock = 1;
9667 for (cc = 0; cc < header->num_clauses; ++cc) {
9668 clause = &header->clauses [cc];
9669 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9670 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9671 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9677 if ((ip - header->code) != nearest->handler_offset)
9682 case CEE_UNALIGNED_:
9683 ins_flag |= MONO_INST_UNALIGNED;
9684 /* FIXME: record alignment? we can assume 1 for now */
9689 ins_flag |= MONO_INST_VOLATILE;
9693 ins_flag |= MONO_INST_TAILCALL;
9694 cfg->flags |= MONO_CFG_HAS_TAIL;
9695 /* Can't inline tail calls at this time */
9696 inline_costs += 100000;
9703 token = read32 (ip + 2);
9704 klass = mini_get_class (method, token, generic_context);
9705 CHECK_TYPELOAD (klass);
9706 if (generic_class_is_reference_type (cfg, klass))
9707 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9709 mini_emit_initobj (cfg, *sp, NULL, klass);
9713 case CEE_CONSTRAINED_:
9715 token = read32 (ip + 2);
9716 if (method->wrapper_type != MONO_WRAPPER_NONE)
9717 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9719 constrained_call = mono_class_get_full (image, token, generic_context);
9720 CHECK_TYPELOAD (constrained_call);
9725 MonoInst *iargs [3];
9729 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9730 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9731 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9732 /* emit_memset only works when val == 0 */
9733 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9738 if (ip [1] == CEE_CPBLK) {
9739 MonoMethod *memcpy_method = get_memcpy_method ();
9740 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9742 MonoMethod *memset_method = get_memset_method ();
9743 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9753 ins_flag |= MONO_INST_NOTYPECHECK;
9755 ins_flag |= MONO_INST_NORANGECHECK;
9756 /* we ignore the no-nullcheck for now since we
9757 * really do it explicitly only when doing callvirt->call
9763 int handler_offset = -1;
9765 for (i = 0; i < header->num_clauses; ++i) {
9766 MonoExceptionClause *clause = &header->clauses [i];
9767 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9768 handler_offset = clause->handler_offset;
9773 bblock->flags |= BB_EXCEPTION_UNSAFE;
9775 g_assert (handler_offset != -1);
9777 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9778 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9779 ins->sreg1 = load->dreg;
9780 MONO_ADD_INS (bblock, ins);
9782 link_bblock (cfg, bblock, end_bblock);
9783 start_new_bblock = 1;
9791 CHECK_STACK_OVF (1);
9793 token = read32 (ip + 2);
9794 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9795 MonoType *type = mono_type_create_from_typespec (image, token);
9796 token = mono_type_size (type, &ialign);
9798 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9799 CHECK_TYPELOAD (klass);
9800 mono_class_init (klass);
9801 token = mono_class_value_size (klass, &align);
9803 EMIT_NEW_ICONST (cfg, ins, token);
9808 case CEE_REFANYTYPE: {
9809 MonoInst *src_var, *src;
9815 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9817 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9818 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9819 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9829 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9834 g_error ("opcode 0x%02x not handled", *ip);
9837 if (start_new_bblock != 1)
9840 bblock->cil_length = ip - bblock->cil_code;
9841 bblock->next_bb = end_bblock;
9843 if (cfg->method == method && cfg->domainvar) {
9845 MonoInst *get_domain;
9847 cfg->cbb = init_localsbb;
9849 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9850 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9853 get_domain->dreg = alloc_preg (cfg);
9854 MONO_ADD_INS (cfg->cbb, get_domain);
9856 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9857 MONO_ADD_INS (cfg->cbb, store);
9860 #ifdef TARGET_POWERPC
9861 if (cfg->compile_aot)
9862 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9863 mono_get_got_var (cfg);
9866 if (cfg->method == method && cfg->got_var)
9867 mono_emit_load_got_addr (cfg);
9872 cfg->cbb = init_localsbb;
9874 for (i = 0; i < header->num_locals; ++i) {
9875 MonoType *ptype = header->locals [i];
9876 int t = ptype->type;
9877 dreg = cfg->locals [i]->dreg;
9879 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9880 t = mono_class_enum_basetype (ptype->data.klass)->type;
9882 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9883 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9884 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9885 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9886 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9887 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9888 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9889 ins->type = STACK_R8;
9890 ins->inst_p0 = (void*)&r8_0;
9891 ins->dreg = alloc_dreg (cfg, STACK_R8);
9892 MONO_ADD_INS (init_localsbb, ins);
9893 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9894 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9895 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9896 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9898 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9903 /* Add a sequence point for method entry/exit events */
9905 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9906 MONO_ADD_INS (init_localsbb, ins);
9907 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9908 MONO_ADD_INS (cfg->bb_exit, ins);
9913 if (cfg->method == method) {
9915 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9916 bb->region = mono_find_block_region (cfg, bb->real_offset);
9918 mono_create_spvar_for_region (cfg, bb->region);
9919 if (cfg->verbose_level > 2)
9920 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9924 g_slist_free (class_inits);
9925 dont_inline = g_list_remove (dont_inline, method);
9927 if (inline_costs < 0) {
9930 /* Method is too large */
9931 mname = mono_method_full_name (method, TRUE);
9932 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9933 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9938 if ((cfg->verbose_level > 2) && (cfg->method == method))
9939 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9941 return inline_costs;
9944 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9945 g_slist_free (class_inits);
9946 dont_inline = g_list_remove (dont_inline, method);
9950 g_slist_free (class_inits);
9951 dont_inline = g_list_remove (dont_inline, method);
9955 g_slist_free (class_inits);
9956 dont_inline = g_list_remove (dont_inline, method);
9957 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9961 g_slist_free (class_inits);
9962 dont_inline = g_list_remove (dont_inline, method);
9963 set_exception_type_from_invalid_il (cfg, method, ip);
9968 store_membase_reg_to_store_membase_imm (int opcode)
9971 case OP_STORE_MEMBASE_REG:
9972 return OP_STORE_MEMBASE_IMM;
9973 case OP_STOREI1_MEMBASE_REG:
9974 return OP_STOREI1_MEMBASE_IMM;
9975 case OP_STOREI2_MEMBASE_REG:
9976 return OP_STOREI2_MEMBASE_IMM;
9977 case OP_STOREI4_MEMBASE_REG:
9978 return OP_STOREI4_MEMBASE_IMM;
9979 case OP_STOREI8_MEMBASE_REG:
9980 return OP_STOREI8_MEMBASE_IMM;
9982 g_assert_not_reached ();
9988 #endif /* DISABLE_JIT */
9991 mono_op_to_op_imm (int opcode)
10001 return OP_IDIV_UN_IMM;
10003 return OP_IREM_IMM;
10005 return OP_IREM_UN_IMM;
10007 return OP_IMUL_IMM;
10009 return OP_IAND_IMM;
10013 return OP_IXOR_IMM;
10015 return OP_ISHL_IMM;
10017 return OP_ISHR_IMM;
10019 return OP_ISHR_UN_IMM;
10022 return OP_LADD_IMM;
10024 return OP_LSUB_IMM;
10026 return OP_LAND_IMM;
10030 return OP_LXOR_IMM;
10032 return OP_LSHL_IMM;
10034 return OP_LSHR_IMM;
10036 return OP_LSHR_UN_IMM;
10039 return OP_COMPARE_IMM;
10041 return OP_ICOMPARE_IMM;
10043 return OP_LCOMPARE_IMM;
10045 case OP_STORE_MEMBASE_REG:
10046 return OP_STORE_MEMBASE_IMM;
10047 case OP_STOREI1_MEMBASE_REG:
10048 return OP_STOREI1_MEMBASE_IMM;
10049 case OP_STOREI2_MEMBASE_REG:
10050 return OP_STOREI2_MEMBASE_IMM;
10051 case OP_STOREI4_MEMBASE_REG:
10052 return OP_STOREI4_MEMBASE_IMM;
10054 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10056 return OP_X86_PUSH_IMM;
10057 case OP_X86_COMPARE_MEMBASE_REG:
10058 return OP_X86_COMPARE_MEMBASE_IMM;
10060 #if defined(TARGET_AMD64)
10061 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10062 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10064 case OP_VOIDCALL_REG:
10065 return OP_VOIDCALL;
10073 return OP_LOCALLOC_IMM;
10080 ldind_to_load_membase (int opcode)
10084 return OP_LOADI1_MEMBASE;
10086 return OP_LOADU1_MEMBASE;
10088 return OP_LOADI2_MEMBASE;
10090 return OP_LOADU2_MEMBASE;
10092 return OP_LOADI4_MEMBASE;
10094 return OP_LOADU4_MEMBASE;
10096 return OP_LOAD_MEMBASE;
10097 case CEE_LDIND_REF:
10098 return OP_LOAD_MEMBASE;
10100 return OP_LOADI8_MEMBASE;
10102 return OP_LOADR4_MEMBASE;
10104 return OP_LOADR8_MEMBASE;
10106 g_assert_not_reached ();
10113 stind_to_store_membase (int opcode)
10117 return OP_STOREI1_MEMBASE_REG;
10119 return OP_STOREI2_MEMBASE_REG;
10121 return OP_STOREI4_MEMBASE_REG;
10123 case CEE_STIND_REF:
10124 return OP_STORE_MEMBASE_REG;
10126 return OP_STOREI8_MEMBASE_REG;
10128 return OP_STORER4_MEMBASE_REG;
10130 return OP_STORER8_MEMBASE_REG;
10132 g_assert_not_reached ();
10139 mono_load_membase_to_load_mem (int opcode)
10141 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10142 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10144 case OP_LOAD_MEMBASE:
10145 return OP_LOAD_MEM;
10146 case OP_LOADU1_MEMBASE:
10147 return OP_LOADU1_MEM;
10148 case OP_LOADU2_MEMBASE:
10149 return OP_LOADU2_MEM;
10150 case OP_LOADI4_MEMBASE:
10151 return OP_LOADI4_MEM;
10152 case OP_LOADU4_MEMBASE:
10153 return OP_LOADU4_MEM;
10154 #if SIZEOF_REGISTER == 8
10155 case OP_LOADI8_MEMBASE:
10156 return OP_LOADI8_MEM;
10165 op_to_op_dest_membase (int store_opcode, int opcode)
10167 #if defined(TARGET_X86)
10168 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10173 return OP_X86_ADD_MEMBASE_REG;
10175 return OP_X86_SUB_MEMBASE_REG;
10177 return OP_X86_AND_MEMBASE_REG;
10179 return OP_X86_OR_MEMBASE_REG;
10181 return OP_X86_XOR_MEMBASE_REG;
10184 return OP_X86_ADD_MEMBASE_IMM;
10187 return OP_X86_SUB_MEMBASE_IMM;
10190 return OP_X86_AND_MEMBASE_IMM;
10193 return OP_X86_OR_MEMBASE_IMM;
10196 return OP_X86_XOR_MEMBASE_IMM;
10202 #if defined(TARGET_AMD64)
10203 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10208 return OP_X86_ADD_MEMBASE_REG;
10210 return OP_X86_SUB_MEMBASE_REG;
10212 return OP_X86_AND_MEMBASE_REG;
10214 return OP_X86_OR_MEMBASE_REG;
10216 return OP_X86_XOR_MEMBASE_REG;
10218 return OP_X86_ADD_MEMBASE_IMM;
10220 return OP_X86_SUB_MEMBASE_IMM;
10222 return OP_X86_AND_MEMBASE_IMM;
10224 return OP_X86_OR_MEMBASE_IMM;
10226 return OP_X86_XOR_MEMBASE_IMM;
10228 return OP_AMD64_ADD_MEMBASE_REG;
10230 return OP_AMD64_SUB_MEMBASE_REG;
10232 return OP_AMD64_AND_MEMBASE_REG;
10234 return OP_AMD64_OR_MEMBASE_REG;
10236 return OP_AMD64_XOR_MEMBASE_REG;
10239 return OP_AMD64_ADD_MEMBASE_IMM;
10242 return OP_AMD64_SUB_MEMBASE_IMM;
10245 return OP_AMD64_AND_MEMBASE_IMM;
10248 return OP_AMD64_OR_MEMBASE_IMM;
10251 return OP_AMD64_XOR_MEMBASE_IMM;
10261 op_to_op_store_membase (int store_opcode, int opcode)
10263 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10266 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10267 return OP_X86_SETEQ_MEMBASE;
10269 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10270 return OP_X86_SETNE_MEMBASE;
10278 op_to_op_src1_membase (int load_opcode, int opcode)
10281 /* FIXME: This has sign extension issues */
10283 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10284 return OP_X86_COMPARE_MEMBASE8_IMM;
10287 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10292 return OP_X86_PUSH_MEMBASE;
10293 case OP_COMPARE_IMM:
10294 case OP_ICOMPARE_IMM:
10295 return OP_X86_COMPARE_MEMBASE_IMM;
10298 return OP_X86_COMPARE_MEMBASE_REG;
10302 #ifdef TARGET_AMD64
10303 /* FIXME: This has sign extension issues */
10305 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10306 return OP_X86_COMPARE_MEMBASE8_IMM;
10311 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10312 return OP_X86_PUSH_MEMBASE;
10314 /* FIXME: This only works for 32 bit immediates
10315 case OP_COMPARE_IMM:
10316 case OP_LCOMPARE_IMM:
10317 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10318 return OP_AMD64_COMPARE_MEMBASE_IMM;
10320 case OP_ICOMPARE_IMM:
10321 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10322 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10326 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10327 return OP_AMD64_COMPARE_MEMBASE_REG;
10330 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10331 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10340 op_to_op_src2_membase (int load_opcode, int opcode)
10343 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10349 return OP_X86_COMPARE_REG_MEMBASE;
10351 return OP_X86_ADD_REG_MEMBASE;
10353 return OP_X86_SUB_REG_MEMBASE;
10355 return OP_X86_AND_REG_MEMBASE;
10357 return OP_X86_OR_REG_MEMBASE;
10359 return OP_X86_XOR_REG_MEMBASE;
10363 #ifdef TARGET_AMD64
10366 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10367 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10371 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10372 return OP_AMD64_COMPARE_REG_MEMBASE;
10375 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10376 return OP_X86_ADD_REG_MEMBASE;
10378 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10379 return OP_X86_SUB_REG_MEMBASE;
10381 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10382 return OP_X86_AND_REG_MEMBASE;
10384 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10385 return OP_X86_OR_REG_MEMBASE;
10387 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10388 return OP_X86_XOR_REG_MEMBASE;
10390 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10391 return OP_AMD64_ADD_REG_MEMBASE;
10393 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10394 return OP_AMD64_SUB_REG_MEMBASE;
10396 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10397 return OP_AMD64_AND_REG_MEMBASE;
10399 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10400 return OP_AMD64_OR_REG_MEMBASE;
10402 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10403 return OP_AMD64_XOR_REG_MEMBASE;
10411 mono_op_to_op_imm_noemul (int opcode)
10414 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10419 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10427 return mono_op_to_op_imm (opcode);
10431 #ifndef DISABLE_JIT
10434 * mono_handle_global_vregs:
10436 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10440 mono_handle_global_vregs (MonoCompile *cfg)
10442 gint32 *vreg_to_bb;
10443 MonoBasicBlock *bb;
10446 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10448 #ifdef MONO_ARCH_SIMD_INTRINSICS
10449 if (cfg->uses_simd_intrinsics)
10450 mono_simd_simplify_indirection (cfg);
10453 /* Find local vregs used in more than one bb */
10454 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10455 MonoInst *ins = bb->code;
10456 int block_num = bb->block_num;
10458 if (cfg->verbose_level > 2)
10459 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10462 for (; ins; ins = ins->next) {
10463 const char *spec = INS_INFO (ins->opcode);
10464 int regtype = 0, regindex;
10467 if (G_UNLIKELY (cfg->verbose_level > 2))
10468 mono_print_ins (ins);
10470 g_assert (ins->opcode >= MONO_CEE_LAST);
10472 for (regindex = 0; regindex < 4; regindex ++) {
10475 if (regindex == 0) {
10476 regtype = spec [MONO_INST_DEST];
10477 if (regtype == ' ')
10480 } else if (regindex == 1) {
10481 regtype = spec [MONO_INST_SRC1];
10482 if (regtype == ' ')
10485 } else if (regindex == 2) {
10486 regtype = spec [MONO_INST_SRC2];
10487 if (regtype == ' ')
10490 } else if (regindex == 3) {
10491 regtype = spec [MONO_INST_SRC3];
10492 if (regtype == ' ')
10497 #if SIZEOF_REGISTER == 4
10498 /* In the LLVM case, the long opcodes are not decomposed */
10499 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10501 * Since some instructions reference the original long vreg,
10502 * and some reference the two component vregs, it is quite hard
10503 * to determine when it needs to be global. So be conservative.
10505 if (!get_vreg_to_inst (cfg, vreg)) {
10506 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10508 if (cfg->verbose_level > 2)
10509 printf ("LONG VREG R%d made global.\n", vreg);
10513 * Make the component vregs volatile since the optimizations can
10514 * get confused otherwise.
10516 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10517 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10521 g_assert (vreg != -1);
10523 prev_bb = vreg_to_bb [vreg];
10524 if (prev_bb == 0) {
10525 /* 0 is a valid block num */
10526 vreg_to_bb [vreg] = block_num + 1;
10527 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10528 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10531 if (!get_vreg_to_inst (cfg, vreg)) {
10532 if (G_UNLIKELY (cfg->verbose_level > 2))
10533 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10537 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10540 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10543 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10546 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10549 g_assert_not_reached ();
10553 /* Flag as having been used in more than one bb */
10554 vreg_to_bb [vreg] = -1;
10560 /* If a variable is used in only one bblock, convert it into a local vreg */
10561 for (i = 0; i < cfg->num_varinfo; i++) {
10562 MonoInst *var = cfg->varinfo [i];
10563 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10565 switch (var->type) {
10571 #if SIZEOF_REGISTER == 8
10574 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10575 /* Enabling this screws up the fp stack on x86 */
10578 /* Arguments are implicitly global */
10579 /* Putting R4 vars into registers doesn't work currently */
10580 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10582 * Make that the variable's liveness interval doesn't contain a call, since
10583 * that would cause the lvreg to be spilled, making the whole optimization
10586 /* This is too slow for JIT compilation */
10588 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10590 int def_index, call_index, ins_index;
10591 gboolean spilled = FALSE;
10596 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10597 const char *spec = INS_INFO (ins->opcode);
10599 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10600 def_index = ins_index;
10602 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10603 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10604 if (call_index > def_index) {
10610 if (MONO_IS_CALL (ins))
10611 call_index = ins_index;
10621 if (G_UNLIKELY (cfg->verbose_level > 2))
10622 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10623 var->flags |= MONO_INST_IS_DEAD;
10624 cfg->vreg_to_inst [var->dreg] = NULL;
10631 * Compress the varinfo and vars tables so the liveness computation is faster and
10632 * takes up less space.
10635 for (i = 0; i < cfg->num_varinfo; ++i) {
10636 MonoInst *var = cfg->varinfo [i];
10637 if (pos < i && cfg->locals_start == i)
10638 cfg->locals_start = pos;
10639 if (!(var->flags & MONO_INST_IS_DEAD)) {
10641 cfg->varinfo [pos] = cfg->varinfo [i];
10642 cfg->varinfo [pos]->inst_c0 = pos;
10643 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10644 cfg->vars [pos].idx = pos;
10645 #if SIZEOF_REGISTER == 4
10646 if (cfg->varinfo [pos]->type == STACK_I8) {
10647 /* Modify the two component vars too */
10650 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10651 var1->inst_c0 = pos;
10652 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10653 var1->inst_c0 = pos;
10660 cfg->num_varinfo = pos;
10661 if (cfg->locals_start > cfg->num_varinfo)
10662 cfg->locals_start = cfg->num_varinfo;
10666 * mono_spill_global_vars:
10668 * Generate spill code for variables which are not allocated to registers,
10669 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10670 * code is generated which could be optimized by the local optimization passes.
10673 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10675 MonoBasicBlock *bb;
10677 int orig_next_vreg;
10678 guint32 *vreg_to_lvreg;
10680 guint32 i, lvregs_len;
10681 gboolean dest_has_lvreg = FALSE;
10682 guint32 stacktypes [128];
10683 MonoInst **live_range_start, **live_range_end;
10684 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10686 *need_local_opts = FALSE;
10688 memset (spec2, 0, sizeof (spec2));
10690 /* FIXME: Move this function to mini.c */
10691 stacktypes ['i'] = STACK_PTR;
10692 stacktypes ['l'] = STACK_I8;
10693 stacktypes ['f'] = STACK_R8;
10694 #ifdef MONO_ARCH_SIMD_INTRINSICS
10695 stacktypes ['x'] = STACK_VTYPE;
10698 #if SIZEOF_REGISTER == 4
10699 /* Create MonoInsts for longs */
10700 for (i = 0; i < cfg->num_varinfo; i++) {
10701 MonoInst *ins = cfg->varinfo [i];
10703 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10704 switch (ins->type) {
10705 #ifdef MONO_ARCH_SOFT_FLOAT
10711 g_assert (ins->opcode == OP_REGOFFSET);
10713 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10715 tree->opcode = OP_REGOFFSET;
10716 tree->inst_basereg = ins->inst_basereg;
10717 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10719 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10721 tree->opcode = OP_REGOFFSET;
10722 tree->inst_basereg = ins->inst_basereg;
10723 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10733 /* FIXME: widening and truncation */
10736 * As an optimization, when a variable allocated to the stack is first loaded into
10737 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10738 * the variable again.
10740 orig_next_vreg = cfg->next_vreg;
10741 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10742 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10746 * These arrays contain the first and last instructions accessing a given
10748 * Since we emit bblocks in the same order we process them here, and we
10749 * don't split live ranges, these will precisely describe the live range of
10750 * the variable, i.e. the instruction range where a valid value can be found
10751 * in the variables location.
10753 /* FIXME: Only do this if debugging info is requested */
10754 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10755 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10756 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10757 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10759 /* Add spill loads/stores */
10760 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10763 if (cfg->verbose_level > 2)
10764 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10766 /* Clear vreg_to_lvreg array */
10767 for (i = 0; i < lvregs_len; i++)
10768 vreg_to_lvreg [lvregs [i]] = 0;
10772 MONO_BB_FOR_EACH_INS (bb, ins) {
10773 const char *spec = INS_INFO (ins->opcode);
10774 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10775 gboolean store, no_lvreg;
10776 int sregs [MONO_MAX_SRC_REGS];
10778 if (G_UNLIKELY (cfg->verbose_level > 2))
10779 mono_print_ins (ins);
10781 if (ins->opcode == OP_NOP)
10785 * We handle LDADDR here as well, since it can only be decomposed
10786 * when variable addresses are known.
10788 if (ins->opcode == OP_LDADDR) {
10789 MonoInst *var = ins->inst_p0;
10791 if (var->opcode == OP_VTARG_ADDR) {
10792 /* Happens on SPARC/S390 where vtypes are passed by reference */
10793 MonoInst *vtaddr = var->inst_left;
10794 if (vtaddr->opcode == OP_REGVAR) {
10795 ins->opcode = OP_MOVE;
10796 ins->sreg1 = vtaddr->dreg;
10798 else if (var->inst_left->opcode == OP_REGOFFSET) {
10799 ins->opcode = OP_LOAD_MEMBASE;
10800 ins->inst_basereg = vtaddr->inst_basereg;
10801 ins->inst_offset = vtaddr->inst_offset;
10805 g_assert (var->opcode == OP_REGOFFSET);
10807 ins->opcode = OP_ADD_IMM;
10808 ins->sreg1 = var->inst_basereg;
10809 ins->inst_imm = var->inst_offset;
10812 *need_local_opts = TRUE;
10813 spec = INS_INFO (ins->opcode);
10816 if (ins->opcode < MONO_CEE_LAST) {
10817 mono_print_ins (ins);
10818 g_assert_not_reached ();
10822 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10826 if (MONO_IS_STORE_MEMBASE (ins)) {
10827 tmp_reg = ins->dreg;
10828 ins->dreg = ins->sreg2;
10829 ins->sreg2 = tmp_reg;
10832 spec2 [MONO_INST_DEST] = ' ';
10833 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10834 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10835 spec2 [MONO_INST_SRC3] = ' ';
10837 } else if (MONO_IS_STORE_MEMINDEX (ins))
10838 g_assert_not_reached ();
10843 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10844 printf ("\t %.3s %d", spec, ins->dreg);
10845 num_sregs = mono_inst_get_src_registers (ins, sregs);
10846 for (srcindex = 0; srcindex < 3; ++srcindex)
10847 printf (" %d", sregs [srcindex]);
10854 regtype = spec [MONO_INST_DEST];
10855 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10858 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10859 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10860 MonoInst *store_ins;
10862 MonoInst *def_ins = ins;
10863 int dreg = ins->dreg; /* The original vreg */
10865 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10867 if (var->opcode == OP_REGVAR) {
10868 ins->dreg = var->dreg;
10869 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10871 * Instead of emitting a load+store, use a _membase opcode.
10873 g_assert (var->opcode == OP_REGOFFSET);
10874 if (ins->opcode == OP_MOVE) {
10878 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10879 ins->inst_basereg = var->inst_basereg;
10880 ins->inst_offset = var->inst_offset;
10883 spec = INS_INFO (ins->opcode);
10887 g_assert (var->opcode == OP_REGOFFSET);
10889 prev_dreg = ins->dreg;
10891 /* Invalidate any previous lvreg for this vreg */
10892 vreg_to_lvreg [ins->dreg] = 0;
10896 #ifdef MONO_ARCH_SOFT_FLOAT
10897 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10899 store_opcode = OP_STOREI8_MEMBASE_REG;
10903 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10905 if (regtype == 'l') {
10906 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10907 mono_bblock_insert_after_ins (bb, ins, store_ins);
10908 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10909 mono_bblock_insert_after_ins (bb, ins, store_ins);
10910 def_ins = store_ins;
10913 g_assert (store_opcode != OP_STOREV_MEMBASE);
10915 /* Try to fuse the store into the instruction itself */
10916 /* FIXME: Add more instructions */
10917 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10918 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10919 ins->inst_imm = ins->inst_c0;
10920 ins->inst_destbasereg = var->inst_basereg;
10921 ins->inst_offset = var->inst_offset;
10922 spec = INS_INFO (ins->opcode);
10923 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10924 ins->opcode = store_opcode;
10925 ins->inst_destbasereg = var->inst_basereg;
10926 ins->inst_offset = var->inst_offset;
10930 tmp_reg = ins->dreg;
10931 ins->dreg = ins->sreg2;
10932 ins->sreg2 = tmp_reg;
10935 spec2 [MONO_INST_DEST] = ' ';
10936 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10937 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10938 spec2 [MONO_INST_SRC3] = ' ';
10940 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10941 // FIXME: The backends expect the base reg to be in inst_basereg
10942 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10944 ins->inst_basereg = var->inst_basereg;
10945 ins->inst_offset = var->inst_offset;
10946 spec = INS_INFO (ins->opcode);
10948 /* printf ("INS: "); mono_print_ins (ins); */
10949 /* Create a store instruction */
10950 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10952 /* Insert it after the instruction */
10953 mono_bblock_insert_after_ins (bb, ins, store_ins);
10955 def_ins = store_ins;
10958 * We can't assign ins->dreg to var->dreg here, since the
10959 * sregs could use it. So set a flag, and do it after
10962 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10963 dest_has_lvreg = TRUE;
10968 if (def_ins && !live_range_start [dreg]) {
10969 live_range_start [dreg] = def_ins;
10970 live_range_start_bb [dreg] = bb;
10977 num_sregs = mono_inst_get_src_registers (ins, sregs);
10978 for (srcindex = 0; srcindex < 3; ++srcindex) {
10979 regtype = spec [MONO_INST_SRC1 + srcindex];
10980 sreg = sregs [srcindex];
10982 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10983 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10984 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10985 MonoInst *use_ins = ins;
10986 MonoInst *load_ins;
10987 guint32 load_opcode;
10989 if (var->opcode == OP_REGVAR) {
10990 sregs [srcindex] = var->dreg;
10991 //mono_inst_set_src_registers (ins, sregs);
10992 live_range_end [sreg] = use_ins;
10993 live_range_end_bb [sreg] = bb;
10997 g_assert (var->opcode == OP_REGOFFSET);
10999 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11001 g_assert (load_opcode != OP_LOADV_MEMBASE);
11003 if (vreg_to_lvreg [sreg]) {
11004 g_assert (vreg_to_lvreg [sreg] != -1);
11006 /* The variable is already loaded to an lvreg */
11007 if (G_UNLIKELY (cfg->verbose_level > 2))
11008 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11009 sregs [srcindex] = vreg_to_lvreg [sreg];
11010 //mono_inst_set_src_registers (ins, sregs);
11014 /* Try to fuse the load into the instruction */
11015 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11016 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11017 sregs [0] = var->inst_basereg;
11018 //mono_inst_set_src_registers (ins, sregs);
11019 ins->inst_offset = var->inst_offset;
11020 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11021 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11022 sregs [1] = var->inst_basereg;
11023 //mono_inst_set_src_registers (ins, sregs);
11024 ins->inst_offset = var->inst_offset;
11026 if (MONO_IS_REAL_MOVE (ins)) {
11027 ins->opcode = OP_NOP;
11030 //printf ("%d ", srcindex); mono_print_ins (ins);
11032 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11034 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11035 if (var->dreg == prev_dreg) {
11037 * sreg refers to the value loaded by the load
11038 * emitted below, but we need to use ins->dreg
11039 * since it refers to the store emitted earlier.
11043 g_assert (sreg != -1);
11044 vreg_to_lvreg [var->dreg] = sreg;
11045 g_assert (lvregs_len < 1024);
11046 lvregs [lvregs_len ++] = var->dreg;
11050 sregs [srcindex] = sreg;
11051 //mono_inst_set_src_registers (ins, sregs);
11053 if (regtype == 'l') {
11054 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11055 mono_bblock_insert_before_ins (bb, ins, load_ins);
11056 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11057 mono_bblock_insert_before_ins (bb, ins, load_ins);
11058 use_ins = load_ins;
11061 #if SIZEOF_REGISTER == 4
11062 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11064 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11065 mono_bblock_insert_before_ins (bb, ins, load_ins);
11066 use_ins = load_ins;
11070 if (var->dreg < orig_next_vreg) {
11071 live_range_end [var->dreg] = use_ins;
11072 live_range_end_bb [var->dreg] = bb;
11076 mono_inst_set_src_registers (ins, sregs);
11078 if (dest_has_lvreg) {
11079 g_assert (ins->dreg != -1);
11080 vreg_to_lvreg [prev_dreg] = ins->dreg;
11081 g_assert (lvregs_len < 1024);
11082 lvregs [lvregs_len ++] = prev_dreg;
11083 dest_has_lvreg = FALSE;
11087 tmp_reg = ins->dreg;
11088 ins->dreg = ins->sreg2;
11089 ins->sreg2 = tmp_reg;
11092 if (MONO_IS_CALL (ins)) {
11093 /* Clear vreg_to_lvreg array */
11094 for (i = 0; i < lvregs_len; i++)
11095 vreg_to_lvreg [lvregs [i]] = 0;
11097 } else if (ins->opcode == OP_NOP) {
11099 MONO_INST_NULLIFY_SREGS (ins);
11102 if (cfg->verbose_level > 2)
11103 mono_print_ins_index (1, ins);
11107 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11109 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11110 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11112 for (i = 0; i < cfg->num_varinfo; ++i) {
11113 int vreg = MONO_VARINFO (cfg, i)->vreg;
11116 if (live_range_start [vreg]) {
11117 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11119 ins->inst_c1 = vreg;
11120 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11122 if (live_range_end [vreg]) {
11123 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11125 ins->inst_c1 = vreg;
11126 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11131 g_free (live_range_start);
11132 g_free (live_range_end);
11133 g_free (live_range_start_bb);
11134 g_free (live_range_end_bb);
11139 * - use 'iadd' instead of 'int_add'
11140 * - handling ovf opcodes: decompose in method_to_ir.
11141 * - unify iregs/fregs
11142 * -> partly done, the missing parts are:
11143 * - a more complete unification would involve unifying the hregs as well, so
11144 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11145 * would no longer map to the machine hregs, so the code generators would need to
11146 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11147 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11148 * fp/non-fp branches speeds it up by about 15%.
11149 * - use sext/zext opcodes instead of shifts
11151 * - get rid of TEMPLOADs if possible and use vregs instead
11152 * - clean up usage of OP_P/OP_ opcodes
11153 * - cleanup usage of DUMMY_USE
11154 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11156 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11157 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11158 * - make sure handle_stack_args () is called before the branch is emitted
11159 * - when the new IR is done, get rid of all unused stuff
11160 * - COMPARE/BEQ as separate instructions or unify them ?
11161 * - keeping them separate allows specialized compare instructions like
11162 * compare_imm, compare_membase
11163 * - most back ends unify fp compare+branch, fp compare+ceq
11164 * - integrate mono_save_args into inline_method
11165 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11166 * - handle long shift opts on 32 bit platforms somehow: they require
11167 * 3 sregs (2 for arg1 and 1 for arg2)
11168 * - make byref a 'normal' type.
11169 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11170 * variable if needed.
11171 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11172 * like inline_method.
11173 * - remove inlining restrictions
11174 * - fix LNEG and enable cfold of INEG
11175 * - generalize x86 optimizations like ldelema as a peephole optimization
11176 * - add store_mem_imm for amd64
11177 * - optimize the loading of the interruption flag in the managed->native wrappers
11178 * - avoid special handling of OP_NOP in passes
11179 * - move code inserting instructions into one function/macro.
11180 * - try a coalescing phase after liveness analysis
11181 * - add float -> vreg conversion + local optimizations on !x86
11182 * - figure out how to handle decomposed branches during optimizations, ie.
11183 * compare+branch, op_jump_table+op_br etc.
11184 * - promote RuntimeXHandles to vregs
11185 * - vtype cleanups:
11186 * - add a NEW_VARLOADA_VREG macro
11187 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11188 * accessing vtype fields.
11189 * - get rid of I8CONST on 64 bit platforms
11190 * - dealing with the increase in code size due to branches created during opcode
11192 * - use extended basic blocks
11193 * - all parts of the JIT
11194 * - handle_global_vregs () && local regalloc
11195 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11196 * - sources of increase in code size:
11199 * - isinst and castclass
11200 * - lvregs not allocated to global registers even if used multiple times
11201 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11203 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11204 * - add all micro optimizations from the old JIT
11205 * - put tree optimizations into the deadce pass
11206 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11207 * specific function.
11208 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11209 * fcompare + branchCC.
11210 * - create a helper function for allocating a stack slot, taking into account
11211 * MONO_CFG_HAS_SPILLUP.
11213 * - merge the ia64 switch changes.
11214 * - optimize mono_regstate2_alloc_int/float.
11215 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11216 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11217 * parts of the tree could be separated by other instructions, killing the tree
11218 * arguments, or stores killing loads etc. Also, should we fold loads into other
11219 * instructions if the result of the load is used multiple times ?
11220 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11221 * - LAST MERGE: 108395.
11222 * - when returning vtypes in registers, generate IR and append it to the end of the
11223 * last bb instead of doing it in the epilog.
11224 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11232 - When to decompose opcodes:
11233 - earlier: this makes some optimizations hard to implement, since the low level IR
11234 no longer contains the neccessary information. But it is easier to do.
11235 - later: harder to implement, enables more optimizations.
11236 - Branches inside bblocks:
11237 - created when decomposing complex opcodes.
11238 - branches to another bblock: harmless, but not tracked by the branch
11239 optimizations, so need to branch to a label at the start of the bblock.
11240 - branches to inside the same bblock: very problematic, trips up the local
11241 reg allocator. Can be fixed by spitting the current bblock, but that is a
11242 complex operation, since some local vregs can become global vregs etc.
11243 - Local/global vregs:
11244 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11245 local register allocator.
11246 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11247 structure, created by mono_create_var (). Assigned to hregs or the stack by
11248 the global register allocator.
11249 - When to do optimizations like alu->alu_imm:
11250 - earlier -> saves work later on since the IR will be smaller/simpler
11251 - later -> can work on more instructions
11252 - Handling of valuetypes:
11253 - When a vtype is pushed on the stack, a new temporary is created, an
11254 instruction computing its address (LDADDR) is emitted and pushed on
11255 the stack. Need to optimize cases when the vtype is used immediately as in
11256 argument passing, stloc etc.
11257 - Instead of the to_end stuff in the old JIT, simply call the function handling
11258 the values on the stack before emitting the last instruction of the bb.
11261 #endif /* DISABLE_JIT */