2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #include "debugger-agent.h"
63 #define BRANCH_COST 100
64 #define INLINE_LENGTH_LIMIT 20
65 #define INLINE_FAILURE do {\
66 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
69 #define CHECK_CFG_EXCEPTION do {\
70 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
73 #define METHOD_ACCESS_FAILURE do { \
74 char *method_fname = mono_method_full_name (method, TRUE); \
75 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
76 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
77 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
78 g_free (method_fname); \
79 g_free (cil_method_fname); \
80 goto exception_exit; \
82 #define FIELD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *field_fname = mono_field_full_name (field); \
85 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (field_fname); \
89 goto exception_exit; \
91 #define GENERIC_SHARING_FAILURE(opcode) do { \
92 if (cfg->generic_sharing_context) { \
93 if (cfg->verbose_level > 2) \
94 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
95 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
96 goto exception_exit; \
100 /* Determine whenever 'ins' represents a load of the 'this' argument */
101 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
103 static int ldind_to_load_membase (int opcode);
104 static int stind_to_store_membase (int opcode);
106 int mono_op_to_op_imm (int opcode);
107 int mono_op_to_op_imm_noemul (int opcode);
109 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
110 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
111 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
113 /* helper methods signature */
114 extern MonoMethodSignature *helper_sig_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_domain_get;
116 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
117 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
118 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
122 * Instruction metadata
130 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
131 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
137 #if SIZEOF_REGISTER == 8
142 /* keep in sync with the enum in mini.h */
145 #include "mini-ops.h"
150 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
151 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
153 * This should contain the index of the last sreg + 1. This is not the same
154 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
156 const gint8 ins_sreg_counts[] = {
157 #include "mini-ops.h"
162 #define MONO_INIT_VARINFO(vi,id) do { \
163 (vi)->range.first_use.pos.bid = 0xffff; \
169 mono_inst_set_src_registers (MonoInst *ins, int *regs)
171 ins->sreg1 = regs [0];
172 ins->sreg2 = regs [1];
173 ins->sreg3 = regs [2];
177 mono_alloc_ireg (MonoCompile *cfg)
179 return alloc_ireg (cfg);
183 mono_alloc_freg (MonoCompile *cfg)
185 return alloc_freg (cfg);
189 mono_alloc_preg (MonoCompile *cfg)
191 return alloc_preg (cfg);
195 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
197 return alloc_dreg (cfg, stack_type);
201 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
207 switch (type->type) {
210 case MONO_TYPE_BOOLEAN:
222 case MONO_TYPE_FNPTR:
224 case MONO_TYPE_CLASS:
225 case MONO_TYPE_STRING:
226 case MONO_TYPE_OBJECT:
227 case MONO_TYPE_SZARRAY:
228 case MONO_TYPE_ARRAY:
232 #if SIZEOF_REGISTER == 8
241 case MONO_TYPE_VALUETYPE:
242 if (type->data.klass->enumtype) {
243 type = mono_class_enum_basetype (type->data.klass);
246 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
249 case MONO_TYPE_TYPEDBYREF:
251 case MONO_TYPE_GENERICINST:
252 type = &type->data.generic_class->container_class->byval_arg;
256 g_assert (cfg->generic_sharing_context);
259 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
265 mono_print_bb (MonoBasicBlock *bb, const char *msg)
270 printf ("\n%s %d: [IN: ", msg, bb->block_num);
271 for (i = 0; i < bb->in_count; ++i)
272 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
274 for (i = 0; i < bb->out_count; ++i)
275 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
277 for (tree = bb->code; tree; tree = tree->next)
278 mono_print_ins_index (-1, tree);
282 * Can't put this at the beginning, since other files reference stuff from this
287 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
289 #define GET_BBLOCK(cfg,tblock,ip) do { \
290 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
292 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
293 NEW_BBLOCK (cfg, (tblock)); \
294 (tblock)->cil_code = (ip); \
295 ADD_BBLOCK (cfg, (tblock)); \
299 #if defined(TARGET_X86) || defined(TARGET_AMD64)
300 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
301 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
302 (dest)->dreg = alloc_preg ((cfg)); \
303 (dest)->sreg1 = (sr1); \
304 (dest)->sreg2 = (sr2); \
305 (dest)->inst_imm = (imm); \
306 (dest)->backend.shift_amount = (shift); \
307 MONO_ADD_INS ((cfg)->cbb, (dest)); \
311 #if SIZEOF_REGISTER == 8
312 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
313 /* FIXME: Need to add many more cases */ \
314 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
316 int dr = alloc_preg (cfg); \
317 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
318 (ins)->sreg2 = widen->dreg; \
322 #define ADD_WIDEN_OP(ins, arg1, arg2)
325 #define ADD_BINOP(op) do { \
326 MONO_INST_NEW (cfg, ins, (op)); \
328 ins->sreg1 = sp [0]->dreg; \
329 ins->sreg2 = sp [1]->dreg; \
330 type_from_op (ins, sp [0], sp [1]); \
332 /* Have to insert a widening op */ \
333 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
334 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
335 MONO_ADD_INS ((cfg)->cbb, (ins)); \
336 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
339 #define ADD_UNOP(op) do { \
340 MONO_INST_NEW (cfg, ins, (op)); \
342 ins->sreg1 = sp [0]->dreg; \
343 type_from_op (ins, sp [0], NULL); \
345 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
346 MONO_ADD_INS ((cfg)->cbb, (ins)); \
347 *sp++ = mono_decompose_opcode (cfg, ins); \
350 #define ADD_BINCOND(next_block) do { \
353 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
354 cmp->sreg1 = sp [0]->dreg; \
355 cmp->sreg2 = sp [1]->dreg; \
356 type_from_op (cmp, sp [0], sp [1]); \
358 type_from_op (ins, sp [0], sp [1]); \
359 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
360 GET_BBLOCK (cfg, tblock, target); \
361 link_bblock (cfg, bblock, tblock); \
362 ins->inst_true_bb = tblock; \
363 if ((next_block)) { \
364 link_bblock (cfg, bblock, (next_block)); \
365 ins->inst_false_bb = (next_block); \
366 start_new_bblock = 1; \
368 GET_BBLOCK (cfg, tblock, ip); \
369 link_bblock (cfg, bblock, tblock); \
370 ins->inst_false_bb = tblock; \
371 start_new_bblock = 2; \
373 if (sp != stack_start) { \
374 handle_stack_args (cfg, stack_start, sp - stack_start); \
375 CHECK_UNVERIFIABLE (cfg); \
377 MONO_ADD_INS (bblock, cmp); \
378 MONO_ADD_INS (bblock, ins); \
382 * link_bblock: Links two basic blocks
384 * links two basic blocks in the control flow graph, the 'from'
385 * argument is the starting block and the 'to' argument is the block
386 * the control flow ends to after 'from'.
389 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
391 MonoBasicBlock **newa;
395 if (from->cil_code) {
397 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
399 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
402 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
404 printf ("edge from entry to exit\n");
409 for (i = 0; i < from->out_count; ++i) {
410 if (to == from->out_bb [i]) {
416 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
417 for (i = 0; i < from->out_count; ++i) {
418 newa [i] = from->out_bb [i];
426 for (i = 0; i < to->in_count; ++i) {
427 if (from == to->in_bb [i]) {
433 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
434 for (i = 0; i < to->in_count; ++i) {
435 newa [i] = to->in_bb [i];
444 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
446 link_bblock (cfg, from, to);
450 * mono_find_block_region:
452 * We mark each basic block with a region ID. We use that to avoid BB
453 * optimizations when blocks are in different regions.
456 * A region token that encodes where this region is, and information
457 * about the clause owner for this block.
459 * The region encodes the try/catch/filter clause that owns this block
460 * as well as the type. -1 is a special value that represents a block
461 * that is in none of try/catch/filter.
464 mono_find_block_region (MonoCompile *cfg, int offset)
466 MonoMethod *method = cfg->method;
467 MonoMethodHeader *header = mono_method_get_header (method);
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethod *method = cfg->method;
497 MonoMethodHeader *header = mono_method_get_header (method);
498 MonoExceptionClause *clause;
499 MonoBasicBlock *handler;
503 for (i = 0; i < header->num_clauses; ++i) {
504 clause = &header->clauses [i];
505 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
506 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
507 if (clause->flags == type) {
508 handler = cfg->cil_offset_to_bb [clause->handler_offset];
510 res = g_list_append (res, handler);
518 mono_create_spvar_for_region (MonoCompile *cfg, int region)
522 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
526 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
527 /* prevent it from being register allocated */
528 var->flags |= MONO_INST_INDIRECT;
530 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
534 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
536 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
540 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
544 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
548 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
549 /* prevent it from being register allocated */
550 var->flags |= MONO_INST_INDIRECT;
552 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
558 * Returns the type used in the eval stack when @type is loaded.
559 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
562 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
566 inst->klass = klass = mono_class_from_mono_type (type);
568 inst->type = STACK_MP;
573 switch (type->type) {
575 inst->type = STACK_INV;
579 case MONO_TYPE_BOOLEAN:
585 inst->type = STACK_I4;
590 case MONO_TYPE_FNPTR:
591 inst->type = STACK_PTR;
593 case MONO_TYPE_CLASS:
594 case MONO_TYPE_STRING:
595 case MONO_TYPE_OBJECT:
596 case MONO_TYPE_SZARRAY:
597 case MONO_TYPE_ARRAY:
598 inst->type = STACK_OBJ;
602 inst->type = STACK_I8;
606 inst->type = STACK_R8;
608 case MONO_TYPE_VALUETYPE:
609 if (type->data.klass->enumtype) {
610 type = mono_class_enum_basetype (type->data.klass);
614 inst->type = STACK_VTYPE;
617 case MONO_TYPE_TYPEDBYREF:
618 inst->klass = mono_defaults.typed_reference_class;
619 inst->type = STACK_VTYPE;
621 case MONO_TYPE_GENERICINST:
622 type = &type->data.generic_class->container_class->byval_arg;
625 case MONO_TYPE_MVAR :
626 /* FIXME: all the arguments must be references for now,
627 * later look inside cfg and see if the arg num is
630 g_assert (cfg->generic_sharing_context);
631 inst->type = STACK_OBJ;
634 g_error ("unknown type 0x%02x in eval stack type", type->type);
639 * The following tables are used to quickly validate the IL code in type_from_op ().
642 bin_num_table [STACK_MAX] [STACK_MAX] = {
643 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
655 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
658 /* reduce the size of this table */
660 bin_int_table [STACK_MAX] [STACK_MAX] = {
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
672 bin_comp_table [STACK_MAX] [STACK_MAX] = {
673 /* Inv i L p F & O vt */
675 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
676 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
677 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
678 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
679 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
680 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
681 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
684 /* reduce the size of this table */
686 shift_table [STACK_MAX] [STACK_MAX] = {
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
698 * Tables to map from the non-specific opcode to the matching
699 * type-specific opcode.
701 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
703 binops_op_map [STACK_MAX] = {
704 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
707 /* handles from CEE_NEG to CEE_CONV_U8 */
709 unops_op_map [STACK_MAX] = {
710 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
713 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
715 ovfops_op_map [STACK_MAX] = {
716 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
719 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
721 ovf2ops_op_map [STACK_MAX] = {
722 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
725 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
727 ovf3ops_op_map [STACK_MAX] = {
728 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
731 /* handles from CEE_BEQ to CEE_BLT_UN */
733 beqops_op_map [STACK_MAX] = {
734 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
737 /* handles from CEE_CEQ to CEE_CLT_UN */
739 ceqops_op_map [STACK_MAX] = {
740 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
744 * Sets ins->type (the type on the eval stack) according to the
745 * type of the opcode and the arguments to it.
746 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
748 * FIXME: this function sets ins->type unconditionally in some cases, but
749 * it should set it to invalid for some types (a conv.x on an object)
752 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
754 switch (ins->opcode) {
761 /* FIXME: check unverifiable args for STACK_MP */
762 ins->type = bin_num_table [src1->type] [src2->type];
763 ins->opcode += binops_op_map [ins->type];
770 ins->type = bin_int_table [src1->type] [src2->type];
771 ins->opcode += binops_op_map [ins->type];
776 ins->type = shift_table [src1->type] [src2->type];
777 ins->opcode += binops_op_map [ins->type];
782 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
783 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
784 ins->opcode = OP_LCOMPARE;
785 else if (src1->type == STACK_R8)
786 ins->opcode = OP_FCOMPARE;
788 ins->opcode = OP_ICOMPARE;
790 case OP_ICOMPARE_IMM:
791 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
792 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
793 ins->opcode = OP_LCOMPARE_IMM;
805 ins->opcode += beqops_op_map [src1->type];
808 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
809 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
816 ins->opcode += ceqops_op_map [src1->type];
820 ins->type = neg_table [src1->type];
821 ins->opcode += unops_op_map [ins->type];
824 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
825 ins->type = src1->type;
827 ins->type = STACK_INV;
828 ins->opcode += unops_op_map [ins->type];
834 ins->type = STACK_I4;
835 ins->opcode += unops_op_map [src1->type];
838 ins->type = STACK_R8;
839 switch (src1->type) {
842 ins->opcode = OP_ICONV_TO_R_UN;
845 ins->opcode = OP_LCONV_TO_R_UN;
849 case CEE_CONV_OVF_I1:
850 case CEE_CONV_OVF_U1:
851 case CEE_CONV_OVF_I2:
852 case CEE_CONV_OVF_U2:
853 case CEE_CONV_OVF_I4:
854 case CEE_CONV_OVF_U4:
855 ins->type = STACK_I4;
856 ins->opcode += ovf3ops_op_map [src1->type];
858 case CEE_CONV_OVF_I_UN:
859 case CEE_CONV_OVF_U_UN:
860 ins->type = STACK_PTR;
861 ins->opcode += ovf2ops_op_map [src1->type];
863 case CEE_CONV_OVF_I1_UN:
864 case CEE_CONV_OVF_I2_UN:
865 case CEE_CONV_OVF_I4_UN:
866 case CEE_CONV_OVF_U1_UN:
867 case CEE_CONV_OVF_U2_UN:
868 case CEE_CONV_OVF_U4_UN:
869 ins->type = STACK_I4;
870 ins->opcode += ovf2ops_op_map [src1->type];
873 ins->type = STACK_PTR;
874 switch (src1->type) {
876 ins->opcode = OP_ICONV_TO_U;
880 #if SIZEOF_REGISTER == 8
881 ins->opcode = OP_LCONV_TO_U;
883 ins->opcode = OP_MOVE;
887 ins->opcode = OP_LCONV_TO_U;
890 ins->opcode = OP_FCONV_TO_U;
896 ins->type = STACK_I8;
897 ins->opcode += unops_op_map [src1->type];
899 case CEE_CONV_OVF_I8:
900 case CEE_CONV_OVF_U8:
901 ins->type = STACK_I8;
902 ins->opcode += ovf3ops_op_map [src1->type];
904 case CEE_CONV_OVF_U8_UN:
905 case CEE_CONV_OVF_I8_UN:
906 ins->type = STACK_I8;
907 ins->opcode += ovf2ops_op_map [src1->type];
911 ins->type = STACK_R8;
912 ins->opcode += unops_op_map [src1->type];
915 ins->type = STACK_R8;
919 ins->type = STACK_I4;
920 ins->opcode += ovfops_op_map [src1->type];
925 ins->type = STACK_PTR;
926 ins->opcode += ovfops_op_map [src1->type];
934 ins->type = bin_num_table [src1->type] [src2->type];
935 ins->opcode += ovfops_op_map [src1->type];
936 if (ins->type == STACK_R8)
937 ins->type = STACK_INV;
939 case OP_LOAD_MEMBASE:
940 ins->type = STACK_PTR;
942 case OP_LOADI1_MEMBASE:
943 case OP_LOADU1_MEMBASE:
944 case OP_LOADI2_MEMBASE:
945 case OP_LOADU2_MEMBASE:
946 case OP_LOADI4_MEMBASE:
947 case OP_LOADU4_MEMBASE:
948 ins->type = STACK_PTR;
950 case OP_LOADI8_MEMBASE:
951 ins->type = STACK_I8;
953 case OP_LOADR4_MEMBASE:
954 case OP_LOADR8_MEMBASE:
955 ins->type = STACK_R8;
958 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
962 if (ins->type == STACK_MP)
963 ins->klass = mono_defaults.object_class;
968 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
974 param_table [STACK_MAX] [STACK_MAX] = {
979 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
983 switch (args->type) {
993 for (i = 0; i < sig->param_count; ++i) {
994 switch (args [i].type) {
998 if (!sig->params [i]->byref)
1002 if (sig->params [i]->byref)
1004 switch (sig->params [i]->type) {
1005 case MONO_TYPE_CLASS:
1006 case MONO_TYPE_STRING:
1007 case MONO_TYPE_OBJECT:
1008 case MONO_TYPE_SZARRAY:
1009 case MONO_TYPE_ARRAY:
1016 if (sig->params [i]->byref)
1018 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1027 /*if (!param_table [args [i].type] [sig->params [i]->type])
1035 * When we need a pointer to the current domain many times in a method, we
1036 * call mono_domain_get() once and we store the result in a local variable.
1037 * This function returns the variable that represents the MonoDomain*.
1039 inline static MonoInst *
1040 mono_get_domainvar (MonoCompile *cfg)
1042 if (!cfg->domainvar)
1043 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1044 return cfg->domainvar;
1048 * The got_var contains the address of the Global Offset Table when AOT
1052 mono_get_got_var (MonoCompile *cfg)
1054 #ifdef MONO_ARCH_NEED_GOT_VAR
1055 if (!cfg->compile_aot)
1057 if (!cfg->got_var) {
1058 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1060 return cfg->got_var;
1067 mono_get_vtable_var (MonoCompile *cfg)
1069 g_assert (cfg->generic_sharing_context);
1071 if (!cfg->rgctx_var) {
1072 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1073 /* force the var to be stack allocated */
1074 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1077 return cfg->rgctx_var;
1081 type_from_stack_type (MonoInst *ins) {
1082 switch (ins->type) {
1083 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1084 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1085 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1086 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1088 return &ins->klass->this_arg;
1089 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1090 case STACK_VTYPE: return &ins->klass->byval_arg;
1092 g_error ("stack type %d to monotype not handled\n", ins->type);
1097 static G_GNUC_UNUSED int
1098 type_to_stack_type (MonoType *t)
1100 t = mono_type_get_underlying_type (t);
1104 case MONO_TYPE_BOOLEAN:
1107 case MONO_TYPE_CHAR:
1114 case MONO_TYPE_FNPTR:
1116 case MONO_TYPE_CLASS:
1117 case MONO_TYPE_STRING:
1118 case MONO_TYPE_OBJECT:
1119 case MONO_TYPE_SZARRAY:
1120 case MONO_TYPE_ARRAY:
1128 case MONO_TYPE_VALUETYPE:
1129 case MONO_TYPE_TYPEDBYREF:
1131 case MONO_TYPE_GENERICINST:
1132 if (mono_type_generic_inst_is_valuetype (t))
1138 g_assert_not_reached ();
1145 array_access_to_klass (int opcode)
1149 return mono_defaults.byte_class;
1151 return mono_defaults.uint16_class;
1154 return mono_defaults.int_class;
1157 return mono_defaults.sbyte_class;
1160 return mono_defaults.int16_class;
1163 return mono_defaults.int32_class;
1165 return mono_defaults.uint32_class;
1168 return mono_defaults.int64_class;
1171 return mono_defaults.single_class;
1174 return mono_defaults.double_class;
1175 case CEE_LDELEM_REF:
1176 case CEE_STELEM_REF:
1177 return mono_defaults.object_class;
1179 g_assert_not_reached ();
1185 * We try to share variables when possible
1188 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1193 /* inlining can result in deeper stacks */
1194 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1195 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1197 pos = ins->type - 1 + slot * STACK_MAX;
1199 switch (ins->type) {
1206 if ((vnum = cfg->intvars [pos]))
1207 return cfg->varinfo [vnum];
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1209 cfg->intvars [pos] = res->inst_c0;
1212 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1218 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1221 * Don't use this if a generic_context is set, since that means AOT can't
1222 * look up the method using just the image+token.
1223 * table == 0 means this is a reference made from a wrapper.
1225 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1226 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1227 jump_info_token->image = image;
1228 jump_info_token->token = token;
1229 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1234 * This function is called to handle items that are left on the evaluation stack
1235 * at basic block boundaries. What happens is that we save the values to local variables
1236 * and we reload them later when first entering the target basic block (with the
1237 * handle_loaded_temps () function).
1238 * A single joint point will use the same variables (stored in the array bb->out_stack or
1239 * bb->in_stack, if the basic block is before or after the joint point).
1241 * This function needs to be called _before_ emitting the last instruction of
1242 * the bb (i.e. before emitting a branch).
1243 * If the stack merge fails at a join point, cfg->unverifiable is set.
1246 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1249 MonoBasicBlock *bb = cfg->cbb;
1250 MonoBasicBlock *outb;
1251 MonoInst *inst, **locals;
1256 if (cfg->verbose_level > 3)
1257 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1258 if (!bb->out_scount) {
1259 bb->out_scount = count;
1260 //printf ("bblock %d has out:", bb->block_num);
1262 for (i = 0; i < bb->out_count; ++i) {
1263 outb = bb->out_bb [i];
1264 /* exception handlers are linked, but they should not be considered for stack args */
1265 if (outb->flags & BB_EXCEPTION_HANDLER)
1267 //printf (" %d", outb->block_num);
1268 if (outb->in_stack) {
1270 bb->out_stack = outb->in_stack;
1276 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1277 for (i = 0; i < count; ++i) {
1279 * try to reuse temps already allocated for this purpouse, if they occupy the same
1280 * stack slot and if they are of the same type.
1281 * This won't cause conflicts since if 'local' is used to
1282 * store one of the values in the in_stack of a bblock, then
1283 * the same variable will be used for the same outgoing stack
1285 * This doesn't work when inlining methods, since the bblocks
1286 * in the inlined methods do not inherit their in_stack from
1287 * the bblock they are inlined to. See bug #58863 for an
1290 if (cfg->inlined_method)
1291 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1293 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1298 for (i = 0; i < bb->out_count; ++i) {
1299 outb = bb->out_bb [i];
1300 /* exception handlers are linked, but they should not be considered for stack args */
1301 if (outb->flags & BB_EXCEPTION_HANDLER)
1303 if (outb->in_scount) {
1304 if (outb->in_scount != bb->out_scount) {
1305 cfg->unverifiable = TRUE;
1308 continue; /* check they are the same locals */
1310 outb->in_scount = count;
1311 outb->in_stack = bb->out_stack;
1314 locals = bb->out_stack;
1316 for (i = 0; i < count; ++i) {
1317 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1318 inst->cil_code = sp [i]->cil_code;
1319 sp [i] = locals [i];
1320 if (cfg->verbose_level > 3)
1321 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1325 * It is possible that the out bblocks already have in_stack assigned, and
1326 * the in_stacks differ. In this case, we will store to all the different
1333 /* Find a bblock which has a different in_stack */
1335 while (bindex < bb->out_count) {
1336 outb = bb->out_bb [bindex];
1337 /* exception handlers are linked, but they should not be considered for stack args */
1338 if (outb->flags & BB_EXCEPTION_HANDLER) {
1342 if (outb->in_stack != locals) {
1343 for (i = 0; i < count; ++i) {
1344 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1345 inst->cil_code = sp [i]->cil_code;
1346 sp [i] = locals [i];
1347 if (cfg->verbose_level > 3)
1348 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1350 locals = outb->in_stack;
1359 /* Emit code which loads interface_offsets [klass->interface_id]
1360 * The array is stored in memory before vtable.
1363 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1365 if (cfg->compile_aot) {
1366 int ioffset_reg = alloc_preg (cfg);
1367 int iid_reg = alloc_preg (cfg);
1369 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1370 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1371 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1374 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1379 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1380 * stored in "klass_reg" implements the interface "klass".
1383 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1385 int ibitmap_reg = alloc_preg (cfg);
1386 int ibitmap_byte_reg = alloc_preg (cfg);
1388 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1390 if (cfg->compile_aot) {
1391 int iid_reg = alloc_preg (cfg);
1392 int shifted_iid_reg = alloc_preg (cfg);
1393 int ibitmap_byte_address_reg = alloc_preg (cfg);
1394 int masked_iid_reg = alloc_preg (cfg);
1395 int iid_one_bit_reg = alloc_preg (cfg);
1396 int iid_bit_reg = alloc_preg (cfg);
1397 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1398 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1399 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1400 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1402 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1406 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1412 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1413 * stored in "vtable_reg" implements the interface "klass".
1416 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1418 int ibitmap_reg = alloc_preg (cfg);
1419 int ibitmap_byte_reg = alloc_preg (cfg);
1421 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1423 if (cfg->compile_aot) {
1424 int iid_reg = alloc_preg (cfg);
1425 int shifted_iid_reg = alloc_preg (cfg);
1426 int ibitmap_byte_address_reg = alloc_preg (cfg);
1427 int masked_iid_reg = alloc_preg (cfg);
1428 int iid_one_bit_reg = alloc_preg (cfg);
1429 int iid_bit_reg = alloc_preg (cfg);
1430 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1431 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1432 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1433 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1435 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1436 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1439 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1440 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1445 * Emit code which checks whenever the interface id of @klass is smaller than
1446 * than the value given by max_iid_reg.
1449 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1450 MonoBasicBlock *false_target)
1452 if (cfg->compile_aot) {
1453 int iid_reg = alloc_preg (cfg);
1454 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1455 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1458 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1460 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1462 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1465 /* Same as above, but obtains max_iid from a vtable */
1467 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1468 MonoBasicBlock *false_target)
1470 int max_iid_reg = alloc_preg (cfg);
1472 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1473 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1476 /* Same as above, but obtains max_iid from a klass */
1478 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1479 MonoBasicBlock *false_target)
1481 int max_iid_reg = alloc_preg (cfg);
1483 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1484 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1488 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1490 int idepth_reg = alloc_preg (cfg);
1491 int stypes_reg = alloc_preg (cfg);
1492 int stype = alloc_preg (cfg);
1494 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1496 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1497 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1499 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1501 if (cfg->compile_aot) {
1502 int const_reg = alloc_preg (cfg);
1503 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1504 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1506 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1508 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1545 if (cfg->compile_aot) {
1546 int const_reg = alloc_preg (cfg);
1547 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1548 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1552 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1556 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1558 if (cfg->compile_aot) {
1559 int const_reg = alloc_preg (cfg);
1560 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1561 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1565 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1569 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1572 int rank_reg = alloc_preg (cfg);
1573 int eclass_reg = alloc_preg (cfg);
1575 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1577 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1578 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1579 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1580 if (klass->cast_class == mono_defaults.object_class) {
1581 int parent_reg = alloc_preg (cfg);
1582 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1583 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1584 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1585 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1586 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1587 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1588 } else if (klass->cast_class == mono_defaults.enum_class) {
1589 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1590 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1591 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1593 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1594 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1597 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1598 /* Check that the object is a vector too */
1599 int bounds_reg = alloc_preg (cfg);
1600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1602 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1605 int idepth_reg = alloc_preg (cfg);
1606 int stypes_reg = alloc_preg (cfg);
1607 int stype = alloc_preg (cfg);
1609 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1610 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1611 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1612 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1614 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1616 mini_emit_class_check (cfg, stype, klass);
1621 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1625 g_assert (val == 0);
1630 if ((size <= 4) && (size <= align)) {
1633 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1636 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1639 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1641 #if SIZEOF_REGISTER == 8
1643 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1649 val_reg = alloc_preg (cfg);
1651 if (SIZEOF_REGISTER == 8)
1652 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1654 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1657 /* This could be optimized further if neccesary */
1659 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1666 #if !NO_UNALIGNED_ACCESS
1667 if (SIZEOF_REGISTER == 8) {
1669 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1674 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1682 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1698 #endif /* DISABLE_JIT */
1701 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1708 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1709 g_assert (size < 10000);
1712 /* This could be optimized further if neccesary */
1714 cur_reg = alloc_preg (cfg);
1715 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1716 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1723 #if !NO_UNALIGNED_ACCESS
1724 if (SIZEOF_REGISTER == 8) {
1726 cur_reg = alloc_preg (cfg);
1727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1728 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1737 cur_reg = alloc_preg (cfg);
1738 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1739 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1745 cur_reg = alloc_preg (cfg);
1746 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1747 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1765 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1768 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1771 type = mini_get_basic_type_from_generic (gsctx, type);
1772 switch (type->type) {
1773 case MONO_TYPE_VOID:
1774 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1777 case MONO_TYPE_BOOLEAN:
1780 case MONO_TYPE_CHAR:
1783 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1787 case MONO_TYPE_FNPTR:
1788 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 case MONO_TYPE_CLASS:
1790 case MONO_TYPE_STRING:
1791 case MONO_TYPE_OBJECT:
1792 case MONO_TYPE_SZARRAY:
1793 case MONO_TYPE_ARRAY:
1794 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1797 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1800 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1801 case MONO_TYPE_VALUETYPE:
1802 if (type->data.klass->enumtype) {
1803 type = mono_class_enum_basetype (type->data.klass);
1806 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1807 case MONO_TYPE_TYPEDBYREF:
1808 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1809 case MONO_TYPE_GENERICINST:
1810 type = &type->data.generic_class->container_class->byval_arg;
1813 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1819 * target_type_is_incompatible:
1820 * @cfg: MonoCompile context
1822 * Check that the item @arg on the evaluation stack can be stored
1823 * in the target type (can be a local, or field, etc).
1824 * The cfg arg can be used to check if we need verification or just
1827 * Returns: non-0 value if arg can't be stored on a target.
1830 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1832 MonoType *simple_type;
1835 if (target->byref) {
1836 /* FIXME: check that the pointed to types match */
1837 if (arg->type == STACK_MP)
1838 return arg->klass != mono_class_from_mono_type (target);
1839 if (arg->type == STACK_PTR)
1844 simple_type = mono_type_get_underlying_type (target);
1845 switch (simple_type->type) {
1846 case MONO_TYPE_VOID:
1850 case MONO_TYPE_BOOLEAN:
1853 case MONO_TYPE_CHAR:
1856 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1860 /* STACK_MP is needed when setting pinned locals */
1861 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1866 case MONO_TYPE_FNPTR:
1867 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1870 case MONO_TYPE_CLASS:
1871 case MONO_TYPE_STRING:
1872 case MONO_TYPE_OBJECT:
1873 case MONO_TYPE_SZARRAY:
1874 case MONO_TYPE_ARRAY:
1875 if (arg->type != STACK_OBJ)
1877 /* FIXME: check type compatibility */
1881 if (arg->type != STACK_I8)
1886 if (arg->type != STACK_R8)
1889 case MONO_TYPE_VALUETYPE:
1890 if (arg->type != STACK_VTYPE)
1892 klass = mono_class_from_mono_type (simple_type);
1893 if (klass != arg->klass)
1896 case MONO_TYPE_TYPEDBYREF:
1897 if (arg->type != STACK_VTYPE)
1899 klass = mono_class_from_mono_type (simple_type);
1900 if (klass != arg->klass)
1903 case MONO_TYPE_GENERICINST:
1904 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1905 if (arg->type != STACK_VTYPE)
1907 klass = mono_class_from_mono_type (simple_type);
1908 if (klass != arg->klass)
1912 if (arg->type != STACK_OBJ)
1914 /* FIXME: check type compatibility */
1918 case MONO_TYPE_MVAR:
1919 /* FIXME: all the arguments must be references for now,
1920 * later look inside cfg and see if the arg num is
1921 * really a reference
1923 g_assert (cfg->generic_sharing_context);
1924 if (arg->type != STACK_OBJ)
1928 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1934 * Prepare arguments for passing to a function call.
1935 * Return a non-zero value if the arguments can't be passed to the given
1937 * The type checks are not yet complete and some conversions may need
1938 * casts on 32 or 64 bit architectures.
1940 * FIXME: implement this using target_type_is_incompatible ()
1943 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1945 MonoType *simple_type;
1949 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1953 for (i = 0; i < sig->param_count; ++i) {
1954 if (sig->params [i]->byref) {
1955 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1959 simple_type = sig->params [i];
1960 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1962 switch (simple_type->type) {
1963 case MONO_TYPE_VOID:
1968 case MONO_TYPE_BOOLEAN:
1971 case MONO_TYPE_CHAR:
1974 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1980 case MONO_TYPE_FNPTR:
1981 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1984 case MONO_TYPE_CLASS:
1985 case MONO_TYPE_STRING:
1986 case MONO_TYPE_OBJECT:
1987 case MONO_TYPE_SZARRAY:
1988 case MONO_TYPE_ARRAY:
1989 if (args [i]->type != STACK_OBJ)
1994 if (args [i]->type != STACK_I8)
1999 if (args [i]->type != STACK_R8)
2002 case MONO_TYPE_VALUETYPE:
2003 if (simple_type->data.klass->enumtype) {
2004 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2007 if (args [i]->type != STACK_VTYPE)
2010 case MONO_TYPE_TYPEDBYREF:
2011 if (args [i]->type != STACK_VTYPE)
2014 case MONO_TYPE_GENERICINST:
2015 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2019 g_error ("unknown type 0x%02x in check_call_signature",
2027 callvirt_to_call (int opcode)
2032 case OP_VOIDCALLVIRT:
2041 g_assert_not_reached ();
2048 callvirt_to_call_membase (int opcode)
2052 return OP_CALL_MEMBASE;
2053 case OP_VOIDCALLVIRT:
2054 return OP_VOIDCALL_MEMBASE;
2056 return OP_FCALL_MEMBASE;
2058 return OP_LCALL_MEMBASE;
2060 return OP_VCALL_MEMBASE;
2062 g_assert_not_reached ();
2068 #ifdef MONO_ARCH_HAVE_IMT
2070 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2072 #ifdef MONO_ARCH_IMT_REG
2073 int method_reg = alloc_preg (cfg);
2076 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2077 } else if (cfg->compile_aot) {
2078 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2081 MONO_INST_NEW (cfg, ins, OP_PCONST);
2082 ins->inst_p0 = call->method;
2083 ins->dreg = method_reg;
2084 MONO_ADD_INS (cfg->cbb, ins);
2087 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2089 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2094 static MonoJumpInfo *
2095 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2097 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2101 ji->data.target = target;
2106 inline static MonoCallInst *
2107 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2108 MonoInst **args, int calli, int virtual, int tail)
2111 #ifdef MONO_ARCH_SOFT_FLOAT
2116 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2118 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2121 call->signature = sig;
2123 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2126 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2127 call->vret_var = cfg->vret_addr;
2128 //g_assert_not_reached ();
2130 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2131 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2134 temp->backend.is_pinvoke = sig->pinvoke;
2137 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2138 * address of return value to increase optimization opportunities.
2139 * Before vtype decomposition, the dreg of the call ins itself represents the
2140 * fact the call modifies the return value. After decomposition, the call will
2141 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2142 * will be transformed into an LDADDR.
2144 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2145 loada->dreg = alloc_preg (cfg);
2146 loada->inst_p0 = temp;
2147 /* We reference the call too since call->dreg could change during optimization */
2148 loada->inst_p1 = call;
2149 MONO_ADD_INS (cfg->cbb, loada);
2151 call->inst.dreg = temp->dreg;
2153 call->vret_var = loada;
2154 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2155 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2157 #ifdef MONO_ARCH_SOFT_FLOAT
2158 if (COMPILE_SOFT_FLOAT (cfg)) {
2160 * If the call has a float argument, we would need to do an r8->r4 conversion using
2161 * an icall, but that cannot be done during the call sequence since it would clobber
2162 * the call registers + the stack. So we do it before emitting the call.
2164 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2166 MonoInst *in = call->args [i];
2168 if (i >= sig->hasthis)
2169 t = sig->params [i - sig->hasthis];
2171 t = &mono_defaults.int_class->byval_arg;
2172 t = mono_type_get_underlying_type (t);
2174 if (!t->byref && t->type == MONO_TYPE_R4) {
2175 MonoInst *iargs [1];
2179 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2181 /* The result will be in an int vreg */
2182 call->args [i] = conv;
2189 if (COMPILE_LLVM (cfg))
2190 mono_llvm_emit_call (cfg, call);
2192 mono_arch_emit_call (cfg, call);
2194 mono_arch_emit_call (cfg, call);
2197 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2198 cfg->flags |= MONO_CFG_HAS_CALLS;
2203 inline static MonoInst*
2204 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2206 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2208 call->inst.sreg1 = addr->dreg;
2210 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2212 return (MonoInst*)call;
2215 inline static MonoInst*
2216 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2218 #ifdef MONO_ARCH_RGCTX_REG
2223 rgctx_reg = mono_alloc_preg (cfg);
2224 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2226 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2228 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2229 cfg->uses_rgctx_reg = TRUE;
2230 call->rgctx_reg = TRUE;
2232 return (MonoInst*)call;
2234 g_assert_not_reached ();
2240 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2242 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2245 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2246 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2248 gboolean might_be_remote;
2249 gboolean virtual = this != NULL;
2250 gboolean enable_for_aot = TRUE;
2254 if (method->string_ctor) {
2255 /* Create the real signature */
2256 /* FIXME: Cache these */
2257 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2258 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2263 might_be_remote = this && sig->hasthis &&
2264 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2265 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2267 context_used = mono_method_check_context_used (method);
2268 if (might_be_remote && context_used) {
2271 g_assert (cfg->generic_sharing_context);
2273 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2275 return mono_emit_calli (cfg, sig, args, addr);
2278 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2280 if (might_be_remote)
2281 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2283 call->method = method;
2284 call->inst.flags |= MONO_INST_HAS_METHOD;
2285 call->inst.inst_left = this;
2288 int vtable_reg, slot_reg, this_reg;
2290 this_reg = this->dreg;
2292 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2293 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2294 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2296 /* Make a call to delegate->invoke_impl */
2297 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2298 call->inst.inst_basereg = this_reg;
2299 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2300 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2302 return (MonoInst*)call;
2306 if ((!cfg->compile_aot || enable_for_aot) &&
2307 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2308 (MONO_METHOD_IS_FINAL (method) &&
2309 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2310 !(method->klass->marshalbyref && context_used)) {
2312 * the method is not virtual, we just need to ensure this is not null
2313 * and then we can call the method directly.
2315 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2317 * The check above ensures method is not gshared, this is needed since
2318 * gshared methods can't have wrappers.
2320 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2323 if (!method->string_ctor)
2324 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2326 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2328 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2330 return (MonoInst*)call;
2333 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2335 * the method is virtual, but we can statically dispatch since either
2336 * it's class or the method itself are sealed.
2337 * But first we need to ensure it's not a null reference.
2339 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2341 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2342 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2344 return (MonoInst*)call;
2347 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2349 vtable_reg = alloc_preg (cfg);
2350 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2351 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2353 #ifdef MONO_ARCH_HAVE_IMT
2355 guint32 imt_slot = mono_method_get_imt_slot (method);
2356 emit_imt_argument (cfg, call, imt_arg);
2357 slot_reg = vtable_reg;
2358 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2361 if (slot_reg == -1) {
2362 slot_reg = alloc_preg (cfg);
2363 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2364 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2367 slot_reg = vtable_reg;
2368 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2369 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2370 #ifdef MONO_ARCH_HAVE_IMT
2372 g_assert (mono_method_signature (method)->generic_param_count);
2373 emit_imt_argument (cfg, call, imt_arg);
2378 call->inst.sreg1 = slot_reg;
2379 call->virtual = TRUE;
2382 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2384 return (MonoInst*)call;
2388 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2389 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2391 #ifdef MONO_ARCH_RGCTX_REG
2398 #ifdef MONO_ARCH_RGCTX_REG
2399 rgctx_reg = mono_alloc_preg (cfg);
2400 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2405 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2407 call = (MonoCallInst*)ins;
2409 #ifdef MONO_ARCH_RGCTX_REG
2410 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2411 cfg->uses_rgctx_reg = TRUE;
2412 call->rgctx_reg = TRUE;
2422 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2424 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2428 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2435 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2438 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2440 return (MonoInst*)call;
2444 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2446 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2450 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2454 * mono_emit_abs_call:
2456 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2458 inline static MonoInst*
2459 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2460 MonoMethodSignature *sig, MonoInst **args)
2462 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2466 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2469 if (cfg->abs_patches == NULL)
2470 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2471 g_hash_table_insert (cfg->abs_patches, ji, ji);
2472 ins = mono_emit_native_call (cfg, ji, sig, args);
2473 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2478 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2480 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2481 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2485 * Native code might return non register sized integers
2486 * without initializing the upper bits.
2488 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2489 case OP_LOADI1_MEMBASE:
2490 widen_op = OP_ICONV_TO_I1;
2492 case OP_LOADU1_MEMBASE:
2493 widen_op = OP_ICONV_TO_U1;
2495 case OP_LOADI2_MEMBASE:
2496 widen_op = OP_ICONV_TO_I2;
2498 case OP_LOADU2_MEMBASE:
2499 widen_op = OP_ICONV_TO_U2;
2505 if (widen_op != -1) {
2506 int dreg = alloc_preg (cfg);
2509 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2510 widen->type = ins->type;
2520 get_memcpy_method (void)
2522 static MonoMethod *memcpy_method = NULL;
2523 if (!memcpy_method) {
2524 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2526 g_error ("Old corlib found. Install a new one");
2528 return memcpy_method;
2532 * Emit code to copy a valuetype of type @klass whose address is stored in
2533 * @src->dreg to memory whose address is stored at @dest->dreg.
2536 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2538 MonoInst *iargs [3];
2541 MonoMethod *memcpy_method;
2545 * This check breaks with spilled vars... need to handle it during verification anyway.
2546 * g_assert (klass && klass == src->klass && klass == dest->klass);
2550 n = mono_class_native_size (klass, &align);
2552 n = mono_class_value_size (klass, &align);
2554 #if HAVE_WRITE_BARRIERS
2555 /* if native is true there should be no references in the struct */
2556 if (klass->has_references && !native) {
2557 /* Avoid barriers when storing to the stack */
2558 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2559 (dest->opcode == OP_LDADDR))) {
2560 int context_used = 0;
2565 if (cfg->generic_sharing_context)
2566 context_used = mono_class_check_context_used (klass);
2568 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2570 if (cfg->compile_aot) {
2571 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2573 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2574 mono_class_compute_gc_descriptor (klass);
2578 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2583 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2584 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2585 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2589 EMIT_NEW_ICONST (cfg, iargs [2], n);
2591 memcpy_method = get_memcpy_method ();
2592 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2597 get_memset_method (void)
2599 static MonoMethod *memset_method = NULL;
2600 if (!memset_method) {
2601 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2603 g_error ("Old corlib found. Install a new one");
2605 return memset_method;
2609 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2611 MonoInst *iargs [3];
2614 MonoMethod *memset_method;
2616 /* FIXME: Optimize this for the case when dest is an LDADDR */
2618 mono_class_init (klass);
2619 n = mono_class_value_size (klass, &align);
2621 if (n <= sizeof (gpointer) * 5) {
2622 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2625 memset_method = get_memset_method ();
2627 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2628 EMIT_NEW_ICONST (cfg, iargs [2], n);
2629 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2634 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2636 MonoInst *this = NULL;
2638 g_assert (cfg->generic_sharing_context);
2640 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2641 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2642 !method->klass->valuetype)
2643 EMIT_NEW_ARGLOAD (cfg, this, 0);
2645 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2646 MonoInst *mrgctx_loc, *mrgctx_var;
2649 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2651 mrgctx_loc = mono_get_vtable_var (cfg);
2652 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2655 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2656 MonoInst *vtable_loc, *vtable_var;
2660 vtable_loc = mono_get_vtable_var (cfg);
2661 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2663 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2664 MonoInst *mrgctx_var = vtable_var;
2667 vtable_reg = alloc_preg (cfg);
2668 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2669 vtable_var->type = STACK_PTR;
2675 int vtable_reg, res_reg;
2677 vtable_reg = alloc_preg (cfg);
2678 res_reg = alloc_preg (cfg);
2679 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2684 static MonoJumpInfoRgctxEntry *
2685 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2687 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2688 res->method = method;
2689 res->in_mrgctx = in_mrgctx;
2690 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2691 res->data->type = patch_type;
2692 res->data->data.target = patch_data;
2693 res->info_type = info_type;
2698 static inline MonoInst*
2699 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2701 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2705 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2706 MonoClass *klass, int rgctx_type)
2708 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2709 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2711 return emit_rgctx_fetch (cfg, rgctx, entry);
2715 * emit_get_rgctx_method:
2717 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2718 * normal constants, else emit a load from the rgctx.
2721 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2722 MonoMethod *cmethod, int rgctx_type)
2724 if (!context_used) {
2727 switch (rgctx_type) {
2728 case MONO_RGCTX_INFO_METHOD:
2729 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2731 case MONO_RGCTX_INFO_METHOD_RGCTX:
2732 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2735 g_assert_not_reached ();
2738 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2739 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2741 return emit_rgctx_fetch (cfg, rgctx, entry);
2746 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2747 MonoClassField *field, int rgctx_type)
2749 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2750 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2752 return emit_rgctx_fetch (cfg, rgctx, entry);
2756 * On return the caller must check @klass for load errors.
2759 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2761 MonoInst *vtable_arg;
2763 int context_used = 0;
2765 if (cfg->generic_sharing_context)
2766 context_used = mono_class_check_context_used (klass);
2769 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2770 klass, MONO_RGCTX_INFO_VTABLE);
2772 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2776 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2779 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2780 #ifdef MONO_ARCH_VTABLE_REG
2781 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2782 cfg->uses_vtable_reg = TRUE;
2789 * On return the caller must check @array_class for load errors
2792 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2794 int vtable_reg = alloc_preg (cfg);
2795 int context_used = 0;
2797 if (cfg->generic_sharing_context)
2798 context_used = mono_class_check_context_used (array_class);
2800 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2802 if (cfg->opt & MONO_OPT_SHARED) {
2803 int class_reg = alloc_preg (cfg);
2804 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2805 if (cfg->compile_aot) {
2806 int klass_reg = alloc_preg (cfg);
2807 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2808 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2810 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2812 } else if (context_used) {
2813 MonoInst *vtable_ins;
2815 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2816 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2818 if (cfg->compile_aot) {
2822 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2824 vt_reg = alloc_preg (cfg);
2825 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2826 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2829 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2835 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2839 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2841 if (mini_get_debug_options ()->better_cast_details) {
2842 int to_klass_reg = alloc_preg (cfg);
2843 int vtable_reg = alloc_preg (cfg);
2844 int klass_reg = alloc_preg (cfg);
2845 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2848 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2852 MONO_ADD_INS (cfg->cbb, tls_get);
2853 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2854 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2857 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2863 reset_cast_details (MonoCompile *cfg)
2865 /* Reset the variables holding the cast details */
2866 if (mini_get_debug_options ()->better_cast_details) {
2867 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2869 MONO_ADD_INS (cfg->cbb, tls_get);
2870 /* It is enough to reset the from field */
2871 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2876 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2877 * generic code is generated.
2880 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2882 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2885 MonoInst *rgctx, *addr;
2887 /* FIXME: What if the class is shared? We might not
2888 have to get the address of the method from the
2890 addr = emit_get_rgctx_method (cfg, context_used, method,
2891 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2893 rgctx = emit_get_rgctx (cfg, method, context_used);
2895 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2897 return mono_emit_method_call (cfg, method, &val, NULL);
2902 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2906 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2907 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2908 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2909 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2911 obj_reg = sp [0]->dreg;
2912 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2913 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2915 /* FIXME: generics */
2916 g_assert (klass->rank == 0);
2919 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2920 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2923 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2926 MonoInst *element_class;
2928 /* This assertion is from the unboxcast insn */
2929 g_assert (klass->rank == 0);
2931 element_class = emit_get_rgctx_klass (cfg, context_used,
2932 klass->element_class, MONO_RGCTX_INFO_KLASS);
2934 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2935 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2937 save_cast_details (cfg, klass->element_class, obj_reg);
2938 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2939 reset_cast_details (cfg);
2942 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2943 MONO_ADD_INS (cfg->cbb, add);
2944 add->type = STACK_MP;
2951 * Returns NULL and set the cfg exception on error.
2954 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2956 MonoInst *iargs [2];
2959 if (cfg->opt & MONO_OPT_SHARED) {
2960 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2961 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2963 alloc_ftn = mono_object_new;
2964 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2965 /* This happens often in argument checking code, eg. throw new FooException... */
2966 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2967 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2968 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2970 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2971 MonoMethod *managed_alloc = NULL;
2975 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2976 cfg->exception_ptr = klass;
2980 #ifndef MONO_CROSS_COMPILE
2981 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2984 if (managed_alloc) {
2985 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2986 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2988 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2990 guint32 lw = vtable->klass->instance_size;
2991 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2992 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2993 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2996 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3000 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3004 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3007 MonoInst *iargs [2];
3008 MonoMethod *managed_alloc = NULL;
3012 FIXME: we cannot get managed_alloc here because we can't get
3013 the class's vtable (because it's not a closed class)
3015 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3016 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3019 if (cfg->opt & MONO_OPT_SHARED) {
3020 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3021 iargs [1] = data_inst;
3022 alloc_ftn = mono_object_new;
3024 if (managed_alloc) {
3025 iargs [0] = data_inst;
3026 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3029 iargs [0] = data_inst;
3030 alloc_ftn = mono_object_new_specific;
3033 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3037 * Returns NULL and set the cfg exception on error.
3040 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3042 MonoInst *alloc, *ins;
3044 if (mono_class_is_nullable (klass)) {
3045 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3046 return mono_emit_method_call (cfg, method, &val, NULL);
3049 alloc = handle_alloc (cfg, klass, TRUE);
3053 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3059 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3061 MonoInst *alloc, *ins;
3063 if (mono_class_is_nullable (klass)) {
3064 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3065 /* FIXME: What if the class is shared? We might not
3066 have to get the method address from the RGCTX. */
3067 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3068 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3069 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3071 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3073 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3075 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3082 * Returns NULL and set the cfg exception on error.
3085 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3087 MonoBasicBlock *is_null_bb;
3088 int obj_reg = src->dreg;
3089 int vtable_reg = alloc_preg (cfg);
3091 NEW_BBLOCK (cfg, is_null_bb);
3093 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3094 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3096 save_cast_details (cfg, klass, obj_reg);
3098 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3099 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3100 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3102 int klass_reg = alloc_preg (cfg);
3104 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3106 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3107 /* the remoting code is broken, access the class for now */
3108 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3109 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3111 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3112 cfg->exception_ptr = klass;
3115 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3117 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3118 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3120 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3122 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3123 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3127 MONO_START_BB (cfg, is_null_bb);
3129 reset_cast_details (cfg);
3135 * Returns NULL and set the cfg exception on error.
3138 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3141 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3142 int obj_reg = src->dreg;
3143 int vtable_reg = alloc_preg (cfg);
3144 int res_reg = alloc_preg (cfg);
3146 NEW_BBLOCK (cfg, is_null_bb);
3147 NEW_BBLOCK (cfg, false_bb);
3148 NEW_BBLOCK (cfg, end_bb);
3150 /* Do the assignment at the beginning, so the other assignment can be if converted */
3151 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3152 ins->type = STACK_OBJ;
3155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3156 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3158 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3159 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3160 /* the is_null_bb target simply copies the input register to the output */
3161 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3163 int klass_reg = alloc_preg (cfg);
3165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3168 int rank_reg = alloc_preg (cfg);
3169 int eclass_reg = alloc_preg (cfg);
3171 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3172 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3173 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3175 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3176 if (klass->cast_class == mono_defaults.object_class) {
3177 int parent_reg = alloc_preg (cfg);
3178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3179 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3180 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3181 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3182 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3183 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3184 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3185 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3186 } else if (klass->cast_class == mono_defaults.enum_class) {
3187 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3188 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3189 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3190 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3192 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3193 /* Check that the object is a vector too */
3194 int bounds_reg = alloc_preg (cfg);
3195 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3196 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3197 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3200 /* the is_null_bb target simply copies the input register to the output */
3201 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3203 } else if (mono_class_is_nullable (klass)) {
3204 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3205 /* the is_null_bb target simply copies the input register to the output */
3206 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3208 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3209 /* the remoting code is broken, access the class for now */
3210 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3211 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3213 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3214 cfg->exception_ptr = klass;
3217 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3220 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3226 /* the is_null_bb target simply copies the input register to the output */
3227 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3232 MONO_START_BB (cfg, false_bb);
3234 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3237 MONO_START_BB (cfg, is_null_bb);
3239 MONO_START_BB (cfg, end_bb);
3245 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3247 /* This opcode takes as input an object reference and a class, and returns:
3248 0) if the object is an instance of the class,
3249 1) if the object is not instance of the class,
3250 2) if the object is a proxy whose type cannot be determined */
3253 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3254 int obj_reg = src->dreg;
3255 int dreg = alloc_ireg (cfg);
3257 int klass_reg = alloc_preg (cfg);
3259 NEW_BBLOCK (cfg, true_bb);
3260 NEW_BBLOCK (cfg, false_bb);
3261 NEW_BBLOCK (cfg, false2_bb);
3262 NEW_BBLOCK (cfg, end_bb);
3263 NEW_BBLOCK (cfg, no_proxy_bb);
3265 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3266 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3268 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3269 NEW_BBLOCK (cfg, interface_fail_bb);
3271 tmp_reg = alloc_preg (cfg);
3272 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3273 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3274 MONO_START_BB (cfg, interface_fail_bb);
3275 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3277 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3279 tmp_reg = alloc_preg (cfg);
3280 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3281 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3282 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3284 tmp_reg = alloc_preg (cfg);
3285 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3286 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3288 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3289 tmp_reg = alloc_preg (cfg);
3290 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3291 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3293 tmp_reg = alloc_preg (cfg);
3294 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3295 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3296 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3298 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3299 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3301 MONO_START_BB (cfg, no_proxy_bb);
3303 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3306 MONO_START_BB (cfg, false_bb);
3308 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3309 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3311 MONO_START_BB (cfg, false2_bb);
3313 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3314 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3316 MONO_START_BB (cfg, true_bb);
3318 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3320 MONO_START_BB (cfg, end_bb);
3323 MONO_INST_NEW (cfg, ins, OP_ICONST);
3325 ins->type = STACK_I4;
3331 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3333 /* This opcode takes as input an object reference and a class, and returns:
3334 0) if the object is an instance of the class,
3335 1) if the object is a proxy whose type cannot be determined
3336 an InvalidCastException exception is thrown otherwhise*/
3339 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3340 int obj_reg = src->dreg;
3341 int dreg = alloc_ireg (cfg);
3342 int tmp_reg = alloc_preg (cfg);
3343 int klass_reg = alloc_preg (cfg);
3345 NEW_BBLOCK (cfg, end_bb);
3346 NEW_BBLOCK (cfg, ok_result_bb);
3348 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3349 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3351 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3352 NEW_BBLOCK (cfg, interface_fail_bb);
3354 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3355 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3356 MONO_START_BB (cfg, interface_fail_bb);
3357 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3359 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3361 tmp_reg = alloc_preg (cfg);
3362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3363 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3364 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3366 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3367 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3370 NEW_BBLOCK (cfg, no_proxy_bb);
3372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3374 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3376 tmp_reg = alloc_preg (cfg);
3377 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3380 tmp_reg = alloc_preg (cfg);
3381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3383 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3385 NEW_BBLOCK (cfg, fail_1_bb);
3387 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3389 MONO_START_BB (cfg, fail_1_bb);
3391 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3392 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3394 MONO_START_BB (cfg, no_proxy_bb);
3396 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3399 MONO_START_BB (cfg, ok_result_bb);
3401 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3403 MONO_START_BB (cfg, end_bb);
3406 MONO_INST_NEW (cfg, ins, OP_ICONST);
3408 ins->type = STACK_I4;
3414 * Returns NULL and set the cfg exception on error.
3416 static G_GNUC_UNUSED MonoInst*
3417 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3419 gpointer *trampoline;
3420 MonoInst *obj, *method_ins, *tramp_ins;
3424 obj = handle_alloc (cfg, klass, FALSE);
3428 /* Inline the contents of mono_delegate_ctor */
3430 /* Set target field */
3431 /* Optimize away setting of NULL target */
3432 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3433 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3435 /* Set method field */
3436 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3437 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3440 * To avoid looking up the compiled code belonging to the target method
3441 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3442 * store it, and we fill it after the method has been compiled.
3444 if (!cfg->compile_aot && !method->dynamic) {
3445 MonoInst *code_slot_ins;
3448 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3450 domain = mono_domain_get ();
3451 mono_domain_lock (domain);
3452 if (!domain_jit_info (domain)->method_code_hash)
3453 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3454 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3456 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3457 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3459 mono_domain_unlock (domain);
3461 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3463 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3466 /* Set invoke_impl field */
3467 if (cfg->compile_aot) {
3468 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3470 trampoline = mono_create_delegate_trampoline (klass);
3471 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3473 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3475 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3481 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3483 MonoJitICallInfo *info;
3485 /* Need to register the icall so it gets an icall wrapper */
3486 info = mono_get_array_new_va_icall (rank);
3488 cfg->flags |= MONO_CFG_HAS_VARARGS;
3490 /* mono_array_new_va () needs a vararg calling convention */
3491 cfg->disable_llvm = TRUE;
3493 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3494 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3498 mono_emit_load_got_addr (MonoCompile *cfg)
3500 MonoInst *getaddr, *dummy_use;
3502 if (!cfg->got_var || cfg->got_var_allocated)
3505 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3506 getaddr->dreg = cfg->got_var->dreg;
3508 /* Add it to the start of the first bblock */
3509 if (cfg->bb_entry->code) {
3510 getaddr->next = cfg->bb_entry->code;
3511 cfg->bb_entry->code = getaddr;
3514 MONO_ADD_INS (cfg->bb_entry, getaddr);
3516 cfg->got_var_allocated = TRUE;
3519 * Add a dummy use to keep the got_var alive, since real uses might
3520 * only be generated by the back ends.
3521 * Add it to end_bblock, so the variable's lifetime covers the whole
3523 * It would be better to make the usage of the got var explicit in all
3524 * cases when the backend needs it (i.e. calls, throw etc.), so this
3525 * wouldn't be needed.
3527 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3528 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3531 static int inline_limit;
3532 static gboolean inline_limit_inited;
3535 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3537 MonoMethodHeader *header;
3539 #ifdef MONO_ARCH_SOFT_FLOAT
3540 MonoMethodSignature *sig = mono_method_signature (method);
3544 if (cfg->generic_sharing_context)
3547 if (cfg->inline_depth > 10)
3550 #ifdef MONO_ARCH_HAVE_LMF_OPS
3551 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3552 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3553 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3557 if (method->is_inflated)
3558 /* Avoid inflating the header */
3559 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3561 header = mono_method_get_header (method);
3563 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3564 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3565 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3566 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3567 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3568 (method->klass->marshalbyref) ||
3569 !header || header->num_clauses)
3572 /* also consider num_locals? */
3573 /* Do the size check early to avoid creating vtables */
3574 if (!inline_limit_inited) {
3575 if (getenv ("MONO_INLINELIMIT"))
3576 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3578 inline_limit = INLINE_LENGTH_LIMIT;
3579 inline_limit_inited = TRUE;
3581 if (header->code_size >= inline_limit)
3585 * if we can initialize the class of the method right away, we do,
3586 * otherwise we don't allow inlining if the class needs initialization,
3587 * since it would mean inserting a call to mono_runtime_class_init()
3588 * inside the inlined code
3590 if (!(cfg->opt & MONO_OPT_SHARED)) {
3591 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3592 if (cfg->run_cctors && method->klass->has_cctor) {
3593 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3594 if (!method->klass->runtime_info)
3595 /* No vtable created yet */
3597 vtable = mono_class_vtable (cfg->domain, method->klass);
3600 /* This makes so that inline cannot trigger */
3601 /* .cctors: too many apps depend on them */
3602 /* running with a specific order... */
3603 if (! vtable->initialized)
3605 mono_runtime_class_init (vtable);
3607 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3608 if (!method->klass->runtime_info)
3609 /* No vtable created yet */
3611 vtable = mono_class_vtable (cfg->domain, method->klass);
3614 if (!vtable->initialized)
3619 * If we're compiling for shared code
3620 * the cctor will need to be run at aot method load time, for example,
3621 * or at the end of the compilation of the inlining method.
3623 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3628 * CAS - do not inline methods with declarative security
3629 * Note: this has to be before any possible return TRUE;
3631 if (mono_method_has_declsec (method))
3634 #ifdef MONO_ARCH_SOFT_FLOAT
3636 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3638 for (i = 0; i < sig->param_count; ++i)
3639 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3647 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3649 if (vtable->initialized && !cfg->compile_aot)
3652 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3655 if (!mono_class_needs_cctor_run (vtable->klass, method))
3658 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3659 /* The initialization is already done before the method is called */
3666 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3670 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3672 mono_class_init (klass);
3673 size = mono_class_array_element_size (klass);
3675 mult_reg = alloc_preg (cfg);
3676 array_reg = arr->dreg;
3677 index_reg = index->dreg;
3679 #if SIZEOF_REGISTER == 8
3680 /* The array reg is 64 bits but the index reg is only 32 */
3681 if (COMPILE_LLVM (cfg)) {
3683 index2_reg = index_reg;
3685 index2_reg = alloc_preg (cfg);
3686 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3689 if (index->type == STACK_I8) {
3690 index2_reg = alloc_preg (cfg);
3691 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3693 index2_reg = index_reg;
3697 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3699 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3700 if (size == 1 || size == 2 || size == 4 || size == 8) {
3701 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3703 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3704 ins->type = STACK_PTR;
3710 add_reg = alloc_preg (cfg);
3712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3713 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3714 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3715 ins->type = STACK_PTR;
3716 MONO_ADD_INS (cfg->cbb, ins);
3721 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3723 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3725 int bounds_reg = alloc_preg (cfg);
3726 int add_reg = alloc_preg (cfg);
3727 int mult_reg = alloc_preg (cfg);
3728 int mult2_reg = alloc_preg (cfg);
3729 int low1_reg = alloc_preg (cfg);
3730 int low2_reg = alloc_preg (cfg);
3731 int high1_reg = alloc_preg (cfg);
3732 int high2_reg = alloc_preg (cfg);
3733 int realidx1_reg = alloc_preg (cfg);
3734 int realidx2_reg = alloc_preg (cfg);
3735 int sum_reg = alloc_preg (cfg);
3740 mono_class_init (klass);
3741 size = mono_class_array_element_size (klass);
3743 index1 = index_ins1->dreg;
3744 index2 = index_ins2->dreg;
3746 /* range checking */
3747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3748 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3750 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3751 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3752 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3753 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3754 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3755 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3756 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3758 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3759 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3760 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3761 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3762 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3763 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3764 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3766 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3767 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3769 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3770 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3772 ins->type = STACK_MP;
3774 MONO_ADD_INS (cfg->cbb, ins);
3781 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3785 MonoMethod *addr_method;
3788 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3791 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3793 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3794 /* emit_ldelema_2 depends on OP_LMUL */
3795 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3796 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3800 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3801 addr_method = mono_marshal_get_array_address (rank, element_size);
3802 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3807 static MonoBreakPolicy
3808 always_insert_breakpoint (MonoMethod *method)
3810 return MONO_BREAK_POLICY_ALWAYS;
3813 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3816 * mono_set_break_policy:
3817 * policy_callback: the new callback function
3819 * Allow embedders to decide wherther to actually obey breakpoint instructions
3820 * (both break IL instructions and Debugger.Break () method calls), for example
3821 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3822 * untrusted or semi-trusted code.
3824 * @policy_callback will be called every time a break point instruction needs to
3825 * be inserted with the method argument being the method that calls Debugger.Break()
3826 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3827 * if it wants the breakpoint to not be effective in the given method.
3828 * #MONO_BREAK_POLICY_ALWAYS is the default.
3831 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3833 if (policy_callback)
3834 break_policy_func = policy_callback;
3836 break_policy_func = always_insert_breakpoint;
3840 should_insert_brekpoint (MonoMethod *method) {
3841 switch (break_policy_func (method)) {
3842 case MONO_BREAK_POLICY_ALWAYS:
3844 case MONO_BREAK_POLICY_NEVER:
3846 case MONO_BREAK_POLICY_ON_DBG:
3847 return mono_debug_using_mono_debugger ();
3849 g_warning ("Incorrect value returned from break policy callback");
3855 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3857 MonoInst *ins = NULL;
3859 static MonoClass *runtime_helpers_class = NULL;
3860 if (! runtime_helpers_class)
3861 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3862 "System.Runtime.CompilerServices", "RuntimeHelpers");
3864 if (cmethod->klass == mono_defaults.string_class) {
3865 if (strcmp (cmethod->name, "get_Chars") == 0) {
3866 int dreg = alloc_ireg (cfg);
3867 int index_reg = alloc_preg (cfg);
3868 int mult_reg = alloc_preg (cfg);
3869 int add_reg = alloc_preg (cfg);
3871 #if SIZEOF_REGISTER == 8
3872 /* The array reg is 64 bits but the index reg is only 32 */
3873 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3875 index_reg = args [1]->dreg;
3877 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3879 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3880 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3881 add_reg = ins->dreg;
3882 /* Avoid a warning */
3884 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3887 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3888 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3889 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3890 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3892 type_from_op (ins, NULL, NULL);
3894 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3895 int dreg = alloc_ireg (cfg);
3896 /* Decompose later to allow more optimizations */
3897 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3898 ins->type = STACK_I4;
3899 cfg->cbb->has_array_access = TRUE;
3900 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3903 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3904 int mult_reg = alloc_preg (cfg);
3905 int add_reg = alloc_preg (cfg);
3907 /* The corlib functions check for oob already. */
3908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3909 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3910 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3913 } else if (cmethod->klass == mono_defaults.object_class) {
3915 if (strcmp (cmethod->name, "GetType") == 0) {
3916 int dreg = alloc_preg (cfg);
3917 int vt_reg = alloc_preg (cfg);
3918 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3919 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3920 type_from_op (ins, NULL, NULL);
3923 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3924 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3925 int dreg = alloc_ireg (cfg);
3926 int t1 = alloc_ireg (cfg);
3928 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3929 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3930 ins->type = STACK_I4;
3934 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3935 MONO_INST_NEW (cfg, ins, OP_NOP);
3936 MONO_ADD_INS (cfg->cbb, ins);
3940 } else if (cmethod->klass == mono_defaults.array_class) {
3941 if (cmethod->name [0] != 'g')
3944 if (strcmp (cmethod->name, "get_Rank") == 0) {
3945 int dreg = alloc_ireg (cfg);
3946 int vtable_reg = alloc_preg (cfg);
3947 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
3948 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3949 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3950 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3951 type_from_op (ins, NULL, NULL);
3954 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3955 int dreg = alloc_ireg (cfg);
3957 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3958 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3959 type_from_op (ins, NULL, NULL);
3964 } else if (cmethod->klass == runtime_helpers_class) {
3966 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3967 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3971 } else if (cmethod->klass == mono_defaults.thread_class) {
3972 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3973 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3974 MONO_ADD_INS (cfg->cbb, ins);
3976 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3977 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3978 MONO_ADD_INS (cfg->cbb, ins);
3981 } else if (cmethod->klass == mono_defaults.monitor_class) {
3982 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3983 if (strcmp (cmethod->name, "Enter") == 0) {
3986 if (COMPILE_LLVM (cfg)) {
3988 * Pass the argument normally, the LLVM backend will handle the
3989 * calling convention problems.
3991 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
3993 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3994 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3995 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3996 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3999 return (MonoInst*)call;
4000 } else if (strcmp (cmethod->name, "Exit") == 0) {
4003 if (COMPILE_LLVM (cfg)) {
4004 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4006 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4007 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4008 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4009 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4012 return (MonoInst*)call;
4014 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4015 MonoMethod *fast_method = NULL;
4017 /* Avoid infinite recursion */
4018 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4019 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4020 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4023 if (strcmp (cmethod->name, "Enter") == 0 ||
4024 strcmp (cmethod->name, "Exit") == 0)
4025 fast_method = mono_monitor_get_fast_path (cmethod);
4029 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4031 } else if (mini_class_is_system_array (cmethod->klass) &&
4032 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4033 MonoInst *addr, *store, *load;
4034 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4036 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4037 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4038 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4040 } else if (cmethod->klass->image == mono_defaults.corlib &&
4041 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4042 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4045 #if SIZEOF_REGISTER == 8
4046 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4047 /* 64 bit reads are already atomic */
4048 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4049 ins->dreg = mono_alloc_preg (cfg);
4050 ins->inst_basereg = args [0]->dreg;
4051 ins->inst_offset = 0;
4052 MONO_ADD_INS (cfg->cbb, ins);
4056 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4057 if (strcmp (cmethod->name, "Increment") == 0) {
4058 MonoInst *ins_iconst;
4061 if (fsig->params [0]->type == MONO_TYPE_I4)
4062 opcode = OP_ATOMIC_ADD_NEW_I4;
4063 #if SIZEOF_REGISTER == 8
4064 else if (fsig->params [0]->type == MONO_TYPE_I8)
4065 opcode = OP_ATOMIC_ADD_NEW_I8;
4068 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4069 ins_iconst->inst_c0 = 1;
4070 ins_iconst->dreg = mono_alloc_ireg (cfg);
4071 MONO_ADD_INS (cfg->cbb, ins_iconst);
4073 MONO_INST_NEW (cfg, ins, opcode);
4074 ins->dreg = mono_alloc_ireg (cfg);
4075 ins->inst_basereg = args [0]->dreg;
4076 ins->inst_offset = 0;
4077 ins->sreg2 = ins_iconst->dreg;
4078 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4079 MONO_ADD_INS (cfg->cbb, ins);
4081 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4082 MonoInst *ins_iconst;
4085 if (fsig->params [0]->type == MONO_TYPE_I4)
4086 opcode = OP_ATOMIC_ADD_NEW_I4;
4087 #if SIZEOF_REGISTER == 8
4088 else if (fsig->params [0]->type == MONO_TYPE_I8)
4089 opcode = OP_ATOMIC_ADD_NEW_I8;
4092 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4093 ins_iconst->inst_c0 = -1;
4094 ins_iconst->dreg = mono_alloc_ireg (cfg);
4095 MONO_ADD_INS (cfg->cbb, ins_iconst);
4097 MONO_INST_NEW (cfg, ins, opcode);
4098 ins->dreg = mono_alloc_ireg (cfg);
4099 ins->inst_basereg = args [0]->dreg;
4100 ins->inst_offset = 0;
4101 ins->sreg2 = ins_iconst->dreg;
4102 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4103 MONO_ADD_INS (cfg->cbb, ins);
4105 } else if (strcmp (cmethod->name, "Add") == 0) {
4108 if (fsig->params [0]->type == MONO_TYPE_I4)
4109 opcode = OP_ATOMIC_ADD_NEW_I4;
4110 #if SIZEOF_REGISTER == 8
4111 else if (fsig->params [0]->type == MONO_TYPE_I8)
4112 opcode = OP_ATOMIC_ADD_NEW_I8;
4116 MONO_INST_NEW (cfg, ins, opcode);
4117 ins->dreg = mono_alloc_ireg (cfg);
4118 ins->inst_basereg = args [0]->dreg;
4119 ins->inst_offset = 0;
4120 ins->sreg2 = args [1]->dreg;
4121 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4122 MONO_ADD_INS (cfg->cbb, ins);
4125 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4127 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4128 if (strcmp (cmethod->name, "Exchange") == 0) {
4130 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4132 if (fsig->params [0]->type == MONO_TYPE_I4)
4133 opcode = OP_ATOMIC_EXCHANGE_I4;
4134 #if SIZEOF_REGISTER == 8
4135 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4136 (fsig->params [0]->type == MONO_TYPE_I))
4137 opcode = OP_ATOMIC_EXCHANGE_I8;
4139 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4140 opcode = OP_ATOMIC_EXCHANGE_I4;
4145 MONO_INST_NEW (cfg, ins, opcode);
4146 ins->dreg = mono_alloc_ireg (cfg);
4147 ins->inst_basereg = args [0]->dreg;
4148 ins->inst_offset = 0;
4149 ins->sreg2 = args [1]->dreg;
4150 MONO_ADD_INS (cfg->cbb, ins);
4152 switch (fsig->params [0]->type) {
4154 ins->type = STACK_I4;
4158 ins->type = STACK_I8;
4160 case MONO_TYPE_OBJECT:
4161 ins->type = STACK_OBJ;
4164 g_assert_not_reached ();
4167 #if HAVE_WRITE_BARRIERS
4169 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4170 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4174 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4176 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4177 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4179 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4180 if (fsig->params [1]->type == MONO_TYPE_I4)
4182 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4183 size = sizeof (gpointer);
4184 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4187 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4188 ins->dreg = alloc_ireg (cfg);
4189 ins->sreg1 = args [0]->dreg;
4190 ins->sreg2 = args [1]->dreg;
4191 ins->sreg3 = args [2]->dreg;
4192 ins->type = STACK_I4;
4193 MONO_ADD_INS (cfg->cbb, ins);
4194 } else if (size == 8) {
4195 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4196 ins->dreg = alloc_ireg (cfg);
4197 ins->sreg1 = args [0]->dreg;
4198 ins->sreg2 = args [1]->dreg;
4199 ins->sreg3 = args [2]->dreg;
4200 ins->type = STACK_I8;
4201 MONO_ADD_INS (cfg->cbb, ins);
4203 /* g_assert_not_reached (); */
4205 #if HAVE_WRITE_BARRIERS
4207 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4208 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4212 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4216 } else if (cmethod->klass->image == mono_defaults.corlib) {
4217 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4218 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4219 if (should_insert_brekpoint (cfg->method))
4220 MONO_INST_NEW (cfg, ins, OP_BREAK);
4222 MONO_INST_NEW (cfg, ins, OP_NOP);
4223 MONO_ADD_INS (cfg->cbb, ins);
4226 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4227 && strcmp (cmethod->klass->name, "Environment") == 0) {
4229 EMIT_NEW_ICONST (cfg, ins, 1);
4231 EMIT_NEW_ICONST (cfg, ins, 0);
4235 } else if (cmethod->klass == mono_defaults.math_class) {
4237 * There is general branches code for Min/Max, but it does not work for
4239 * http://everything2.com/?node_id=1051618
4243 #ifdef MONO_ARCH_SIMD_INTRINSICS
4244 if (cfg->opt & MONO_OPT_SIMD) {
4245 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4251 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4255 * This entry point could be used later for arbitrary method
4258 inline static MonoInst*
4259 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4260 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4262 if (method->klass == mono_defaults.string_class) {
4263 /* managed string allocation support */
4264 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4265 MonoInst *iargs [2];
4266 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4267 MonoMethod *managed_alloc = NULL;
4269 g_assert (vtable); /*Should not fail since it System.String*/
4270 #ifndef MONO_CROSS_COMPILE
4271 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4275 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4276 iargs [1] = args [0];
4277 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4284 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4286 MonoInst *store, *temp;
4289 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4290 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4293 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4294 * would be different than the MonoInst's used to represent arguments, and
4295 * the ldelema implementation can't deal with that.
4296 * Solution: When ldelema is used on an inline argument, create a var for
4297 * it, emit ldelema on that var, and emit the saving code below in
4298 * inline_method () if needed.
4300 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4301 cfg->args [i] = temp;
4302 /* This uses cfg->args [i] which is set by the preceeding line */
4303 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4304 store->cil_code = sp [0]->cil_code;
4309 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4310 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4312 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4314 check_inline_called_method_name_limit (MonoMethod *called_method)
4317 static char *limit = NULL;
4319 if (limit == NULL) {
4320 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4322 if (limit_string != NULL)
4323 limit = limit_string;
4325 limit = (char *) "";
4328 if (limit [0] != '\0') {
4329 char *called_method_name = mono_method_full_name (called_method, TRUE);
4331 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4332 g_free (called_method_name);
4334 //return (strncmp_result <= 0);
4335 return (strncmp_result == 0);
4342 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4344 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4347 static char *limit = NULL;
4349 if (limit == NULL) {
4350 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4351 if (limit_string != NULL) {
4352 limit = limit_string;
4354 limit = (char *) "";
4358 if (limit [0] != '\0') {
4359 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4361 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4362 g_free (caller_method_name);
4364 //return (strncmp_result <= 0);
4365 return (strncmp_result == 0);
4373 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4374 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4376 MonoInst *ins, *rvar = NULL;
4377 MonoMethodHeader *cheader;
4378 MonoBasicBlock *ebblock, *sbblock;
4380 MonoMethod *prev_inlined_method;
4381 MonoInst **prev_locals, **prev_args;
4382 MonoType **prev_arg_types;
4383 guint prev_real_offset;
4384 GHashTable *prev_cbb_hash;
4385 MonoBasicBlock **prev_cil_offset_to_bb;
4386 MonoBasicBlock *prev_cbb;
4387 unsigned char* prev_cil_start;
4388 guint32 prev_cil_offset_to_bb_len;
4389 MonoMethod *prev_current_method;
4390 MonoGenericContext *prev_generic_context;
4391 gboolean ret_var_set, prev_ret_var_set;
4393 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4395 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4396 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4399 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4400 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4404 if (cfg->verbose_level > 2)
4405 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4407 if (!cmethod->inline_info) {
4408 mono_jit_stats.inlineable_methods++;
4409 cmethod->inline_info = 1;
4411 /* allocate space to store the return value */
4412 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4413 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4416 /* allocate local variables */
4417 cheader = mono_method_get_header (cmethod);
4418 prev_locals = cfg->locals;
4419 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4420 for (i = 0; i < cheader->num_locals; ++i)
4421 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4423 /* allocate start and end blocks */
4424 /* This is needed so if the inline is aborted, we can clean up */
4425 NEW_BBLOCK (cfg, sbblock);
4426 sbblock->real_offset = real_offset;
4428 NEW_BBLOCK (cfg, ebblock);
4429 ebblock->block_num = cfg->num_bblocks++;
4430 ebblock->real_offset = real_offset;
4432 prev_args = cfg->args;
4433 prev_arg_types = cfg->arg_types;
4434 prev_inlined_method = cfg->inlined_method;
4435 cfg->inlined_method = cmethod;
4436 cfg->ret_var_set = FALSE;
4437 cfg->inline_depth ++;
4438 prev_real_offset = cfg->real_offset;
4439 prev_cbb_hash = cfg->cbb_hash;
4440 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4441 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4442 prev_cil_start = cfg->cil_start;
4443 prev_cbb = cfg->cbb;
4444 prev_current_method = cfg->current_method;
4445 prev_generic_context = cfg->generic_context;
4446 prev_ret_var_set = cfg->ret_var_set;
4448 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4450 ret_var_set = cfg->ret_var_set;
4452 cfg->inlined_method = prev_inlined_method;
4453 cfg->real_offset = prev_real_offset;
4454 cfg->cbb_hash = prev_cbb_hash;
4455 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4456 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4457 cfg->cil_start = prev_cil_start;
4458 cfg->locals = prev_locals;
4459 cfg->args = prev_args;
4460 cfg->arg_types = prev_arg_types;
4461 cfg->current_method = prev_current_method;
4462 cfg->generic_context = prev_generic_context;
4463 cfg->ret_var_set = prev_ret_var_set;
4464 cfg->inline_depth --;
4466 if ((costs >= 0 && costs < 60) || inline_allways) {
4467 if (cfg->verbose_level > 2)
4468 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4470 mono_jit_stats.inlined_methods++;
4472 /* always add some code to avoid block split failures */
4473 MONO_INST_NEW (cfg, ins, OP_NOP);
4474 MONO_ADD_INS (prev_cbb, ins);
4476 prev_cbb->next_bb = sbblock;
4477 link_bblock (cfg, prev_cbb, sbblock);
4480 * Get rid of the begin and end bblocks if possible to aid local
4483 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4485 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4486 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4488 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4489 MonoBasicBlock *prev = ebblock->in_bb [0];
4490 mono_merge_basic_blocks (cfg, prev, ebblock);
4492 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4493 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4494 cfg->cbb = prev_cbb;
4502 * If the inlined method contains only a throw, then the ret var is not
4503 * set, so set it to a dummy value.
4506 static double r8_0 = 0.0;
4508 switch (rvar->type) {
4510 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4513 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4518 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4521 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4522 ins->type = STACK_R8;
4523 ins->inst_p0 = (void*)&r8_0;
4524 ins->dreg = rvar->dreg;
4525 MONO_ADD_INS (cfg->cbb, ins);
4528 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4531 g_assert_not_reached ();
4535 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4540 if (cfg->verbose_level > 2)
4541 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4542 cfg->exception_type = MONO_EXCEPTION_NONE;
4543 mono_loader_clear_error ();
4545 /* This gets rid of the newly added bblocks */
4546 cfg->cbb = prev_cbb;
4552 * Some of these comments may well be out-of-date.
4553 * Design decisions: we do a single pass over the IL code (and we do bblock
4554 * splitting/merging in the few cases when it's required: a back jump to an IL
4555 * address that was not already seen as bblock starting point).
4556 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4557 * Complex operations are decomposed in simpler ones right away. We need to let the
4558 * arch-specific code peek and poke inside this process somehow (except when the
4559 * optimizations can take advantage of the full semantic info of coarse opcodes).
4560 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4561 * MonoInst->opcode initially is the IL opcode or some simplification of that
4562 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4563 * opcode with value bigger than OP_LAST.
4564 * At this point the IR can be handed over to an interpreter, a dumb code generator
4565 * or to the optimizing code generator that will translate it to SSA form.
4567 * Profiling directed optimizations.
4568 * We may compile by default with few or no optimizations and instrument the code
4569 * or the user may indicate what methods to optimize the most either in a config file
4570 * or through repeated runs where the compiler applies offline the optimizations to
4571 * each method and then decides if it was worth it.
4574 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4575 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4576 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4577 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4578 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4579 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4580 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4581 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4583 /* offset from br.s -> br like opcodes */
4584 #define BIG_BRANCH_OFFSET 13
4587 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4589 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4591 return b == NULL || b == bb;
4595 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4597 unsigned char *ip = start;
4598 unsigned char *target;
4601 MonoBasicBlock *bblock;
4602 const MonoOpcode *opcode;
4605 cli_addr = ip - start;
4606 i = mono_opcode_value ((const guint8 **)&ip, end);
4609 opcode = &mono_opcodes [i];
4610 switch (opcode->argument) {
4611 case MonoInlineNone:
4614 case MonoInlineString:
4615 case MonoInlineType:
4616 case MonoInlineField:
4617 case MonoInlineMethod:
4620 case MonoShortInlineR:
4627 case MonoShortInlineVar:
4628 case MonoShortInlineI:
4631 case MonoShortInlineBrTarget:
4632 target = start + cli_addr + 2 + (signed char)ip [1];
4633 GET_BBLOCK (cfg, bblock, target);
4636 GET_BBLOCK (cfg, bblock, ip);
4638 case MonoInlineBrTarget:
4639 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4640 GET_BBLOCK (cfg, bblock, target);
4643 GET_BBLOCK (cfg, bblock, ip);
4645 case MonoInlineSwitch: {
4646 guint32 n = read32 (ip + 1);
4649 cli_addr += 5 + 4 * n;
4650 target = start + cli_addr;
4651 GET_BBLOCK (cfg, bblock, target);
4653 for (j = 0; j < n; ++j) {
4654 target = start + cli_addr + (gint32)read32 (ip);
4655 GET_BBLOCK (cfg, bblock, target);
4665 g_assert_not_reached ();
4668 if (i == CEE_THROW) {
4669 unsigned char *bb_start = ip - 1;
4671 /* Find the start of the bblock containing the throw */
4673 while ((bb_start >= start) && !bblock) {
4674 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4678 bblock->out_of_line = 1;
4687 static inline MonoMethod *
4688 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4692 if (m->wrapper_type != MONO_WRAPPER_NONE)
4693 return mono_method_get_wrapper_data (m, token);
4695 method = mono_get_method_full (m->klass->image, token, klass, context);
4700 static inline MonoMethod *
4701 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4703 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4705 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4711 static inline MonoClass*
4712 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4716 if (method->wrapper_type != MONO_WRAPPER_NONE)
4717 klass = mono_method_get_wrapper_data (method, token);
4719 klass = mono_class_get_full (method->klass->image, token, context);
4721 mono_class_init (klass);
4726 * Returns TRUE if the JIT should abort inlining because "callee"
4727 * is influenced by security attributes.
4730 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4734 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4738 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4739 if (result == MONO_JIT_SECURITY_OK)
4742 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4743 /* Generate code to throw a SecurityException before the actual call/link */
4744 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4747 NEW_ICONST (cfg, args [0], 4);
4748 NEW_METHODCONST (cfg, args [1], caller);
4749 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4750 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4751 /* don't hide previous results */
4752 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4753 cfg->exception_data = result;
4761 throw_exception (void)
4763 static MonoMethod *method = NULL;
4766 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4767 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4774 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4776 MonoMethod *thrower = throw_exception ();
4779 EMIT_NEW_PCONST (cfg, args [0], ex);
4780 mono_emit_method_call (cfg, thrower, args, NULL);
4784 * Return the original method is a wrapper is specified. We can only access
4785 * the custom attributes from the original method.
4788 get_original_method (MonoMethod *method)
4790 if (method->wrapper_type == MONO_WRAPPER_NONE)
4793 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4794 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4797 /* in other cases we need to find the original method */
4798 return mono_marshal_method_from_wrapper (method);
4802 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4803 MonoBasicBlock *bblock, unsigned char *ip)
4805 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4806 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4809 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4810 caller = get_original_method (caller);
4814 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4815 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4816 emit_throw_exception (cfg, mono_get_exception_field_access ());
4820 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4821 MonoBasicBlock *bblock, unsigned char *ip)
4823 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4824 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4827 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4828 caller = get_original_method (caller);
4832 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4833 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4834 emit_throw_exception (cfg, mono_get_exception_method_access ());
4838 * Check that the IL instructions at ip are the array initialization
4839 * sequence and return the pointer to the data and the size.
4842 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4845 * newarr[System.Int32]
4847 * ldtoken field valuetype ...
4848 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4850 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4851 guint32 token = read32 (ip + 7);
4852 guint32 field_token = read32 (ip + 2);
4853 guint32 field_index = field_token & 0xffffff;
4855 const char *data_ptr;
4857 MonoMethod *cmethod;
4858 MonoClass *dummy_class;
4859 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4865 *out_field_token = field_token;
4867 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4870 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4872 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4873 case MONO_TYPE_BOOLEAN:
4877 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4878 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4879 case MONO_TYPE_CHAR:
4889 return NULL; /* stupid ARM FP swapped format */
4899 if (size > mono_type_size (field->type, &dummy_align))
4902 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4903 if (!method->klass->image->dynamic) {
4904 field_index = read32 (ip + 2) & 0xffffff;
4905 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4906 data_ptr = mono_image_rva_map (method->klass->image, rva);
4907 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4908 /* for aot code we do the lookup on load */
4909 if (aot && data_ptr)
4910 return GUINT_TO_POINTER (rva);
4912 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4914 data_ptr = mono_field_get_data (field);
4922 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4924 char *method_fname = mono_method_full_name (method, TRUE);
4927 if (mono_method_get_header (method)->code_size == 0)
4928 method_code = g_strdup ("method body is empty.");
4930 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4931 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4932 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4933 g_free (method_fname);
4934 g_free (method_code);
4938 set_exception_object (MonoCompile *cfg, MonoException *exception)
4940 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4941 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4942 cfg->exception_ptr = exception;
4946 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4950 if (cfg->generic_sharing_context)
4951 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4953 type = &klass->byval_arg;
4954 return MONO_TYPE_IS_REFERENCE (type);
4958 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
4961 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
4962 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
4963 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
4964 /* Optimize reg-reg moves away */
4966 * Can't optimize other opcodes, since sp[0] might point to
4967 * the last ins of a decomposed opcode.
4969 sp [0]->dreg = (cfg)->locals [n]->dreg;
4971 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
4976 * ldloca inhibits many optimizations so try to get rid of it in common
4979 static inline unsigned char *
4980 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
4989 local = read16 (ip + 2);
4993 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
4994 gboolean skip = FALSE;
4996 /* From the INITOBJ case */
4997 token = read32 (ip + 2);
4998 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
4999 CHECK_TYPELOAD (klass);
5000 if (generic_class_is_reference_type (cfg, klass)) {
5001 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5002 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5003 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5004 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5005 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5018 is_exception_class (MonoClass *class)
5021 if (class == mono_defaults.exception_class)
5023 class = class->parent;
5029 * mono_method_to_ir:
5031 * Translate the .net IL into linear IR.
5034 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5035 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5036 guint inline_offset, gboolean is_virtual_call)
5038 MonoInst *ins, **sp, **stack_start;
5039 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5040 MonoMethod *cmethod, *method_definition;
5041 MonoInst **arg_array;
5042 MonoMethodHeader *header;
5044 guint32 token, ins_flag;
5046 MonoClass *constrained_call = NULL;
5047 unsigned char *ip, *end, *target, *err_pos;
5048 static double r8_0 = 0.0;
5049 MonoMethodSignature *sig;
5050 MonoGenericContext *generic_context = NULL;
5051 MonoGenericContainer *generic_container = NULL;
5052 MonoType **param_types;
5053 int i, n, start_new_bblock, dreg;
5054 int num_calls = 0, inline_costs = 0;
5055 int breakpoint_id = 0;
5057 MonoBoolean security, pinvoke;
5058 MonoSecurityManager* secman = NULL;
5059 MonoDeclSecurityActions actions;
5060 GSList *class_inits = NULL;
5061 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5063 gboolean init_locals, seq_points;
5065 /* serialization and xdomain stuff may need access to private fields and methods */
5066 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5067 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5068 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5069 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5070 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5071 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5073 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5075 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5076 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5077 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5078 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5080 image = method->klass->image;
5081 header = mono_method_get_header (method);
5082 generic_container = mono_method_get_generic_container (method);
5083 sig = mono_method_signature (method);
5084 num_args = sig->hasthis + sig->param_count;
5085 ip = (unsigned char*)header->code;
5086 cfg->cil_start = ip;
5087 end = ip + header->code_size;
5088 mono_jit_stats.cil_code_size += header->code_size;
5089 init_locals = header->init_locals;
5091 seq_points = cfg->gen_seq_points && cfg->method == method;
5094 * Methods without init_locals set could cause asserts in various passes
5099 method_definition = method;
5100 while (method_definition->is_inflated) {
5101 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5102 method_definition = imethod->declaring;
5105 /* SkipVerification is not allowed if core-clr is enabled */
5106 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5108 dont_verify_stloc = TRUE;
5111 if (!dont_verify && mini_method_verify (cfg, method_definition))
5112 goto exception_exit;
5114 if (mono_debug_using_mono_debugger ())
5115 cfg->keep_cil_nops = TRUE;
5117 if (sig->is_inflated)
5118 generic_context = mono_method_get_context (method);
5119 else if (generic_container)
5120 generic_context = &generic_container->context;
5121 cfg->generic_context = generic_context;
5123 if (!cfg->generic_sharing_context)
5124 g_assert (!sig->has_type_parameters);
5126 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5127 g_assert (method->is_inflated);
5128 g_assert (mono_method_get_context (method)->method_inst);
5130 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5131 g_assert (sig->generic_param_count);
5133 if (cfg->method == method) {
5134 cfg->real_offset = 0;
5136 cfg->real_offset = inline_offset;
5139 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5140 cfg->cil_offset_to_bb_len = header->code_size;
5142 cfg->current_method = method;
5144 if (cfg->verbose_level > 2)
5145 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5147 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5149 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5150 for (n = 0; n < sig->param_count; ++n)
5151 param_types [n + sig->hasthis] = sig->params [n];
5152 cfg->arg_types = param_types;
5154 dont_inline = g_list_prepend (dont_inline, method);
5155 if (cfg->method == method) {
5157 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5158 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5161 NEW_BBLOCK (cfg, start_bblock);
5162 cfg->bb_entry = start_bblock;
5163 start_bblock->cil_code = NULL;
5164 start_bblock->cil_length = 0;
5167 NEW_BBLOCK (cfg, end_bblock);
5168 cfg->bb_exit = end_bblock;
5169 end_bblock->cil_code = NULL;
5170 end_bblock->cil_length = 0;
5171 g_assert (cfg->num_bblocks == 2);
5173 arg_array = cfg->args;
5175 if (header->num_clauses) {
5176 cfg->spvars = g_hash_table_new (NULL, NULL);
5177 cfg->exvars = g_hash_table_new (NULL, NULL);
5179 /* handle exception clauses */
5180 for (i = 0; i < header->num_clauses; ++i) {
5181 MonoBasicBlock *try_bb;
5182 MonoExceptionClause *clause = &header->clauses [i];
5183 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5184 try_bb->real_offset = clause->try_offset;
5185 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5186 tblock->real_offset = clause->handler_offset;
5187 tblock->flags |= BB_EXCEPTION_HANDLER;
5189 link_bblock (cfg, try_bb, tblock);
5191 if (*(ip + clause->handler_offset) == CEE_POP)
5192 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5194 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5195 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5196 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5197 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5198 MONO_ADD_INS (tblock, ins);
5200 /* todo: is a fault block unsafe to optimize? */
5201 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5202 tblock->flags |= BB_EXCEPTION_UNSAFE;
5206 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5208 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5210 /* catch and filter blocks get the exception object on the stack */
5211 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5212 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5213 MonoInst *dummy_use;
5215 /* mostly like handle_stack_args (), but just sets the input args */
5216 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5217 tblock->in_scount = 1;
5218 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5219 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5222 * Add a dummy use for the exvar so its liveness info will be
5226 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5228 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5229 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5230 tblock->flags |= BB_EXCEPTION_HANDLER;
5231 tblock->real_offset = clause->data.filter_offset;
5232 tblock->in_scount = 1;
5233 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5234 /* The filter block shares the exvar with the handler block */
5235 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5236 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5237 MONO_ADD_INS (tblock, ins);
5241 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5242 clause->data.catch_class &&
5243 cfg->generic_sharing_context &&
5244 mono_class_check_context_used (clause->data.catch_class)) {
5246 * In shared generic code with catch
5247 * clauses containing type variables
5248 * the exception handling code has to
5249 * be able to get to the rgctx.
5250 * Therefore we have to make sure that
5251 * the vtable/mrgctx argument (for
5252 * static or generic methods) or the
5253 * "this" argument (for non-static
5254 * methods) are live.
5256 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5257 mini_method_get_context (method)->method_inst ||
5258 method->klass->valuetype) {
5259 mono_get_vtable_var (cfg);
5261 MonoInst *dummy_use;
5263 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5268 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5269 cfg->cbb = start_bblock;
5270 cfg->args = arg_array;
5271 mono_save_args (cfg, sig, inline_args);
5274 /* FIRST CODE BLOCK */
5275 NEW_BBLOCK (cfg, bblock);
5276 bblock->cil_code = ip;
5280 ADD_BBLOCK (cfg, bblock);
5282 if (cfg->method == method) {
5283 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5284 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5285 MONO_INST_NEW (cfg, ins, OP_BREAK);
5286 MONO_ADD_INS (bblock, ins);
5290 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5291 secman = mono_security_manager_get_methods ();
5293 security = (secman && mono_method_has_declsec (method));
5294 /* at this point having security doesn't mean we have any code to generate */
5295 if (security && (cfg->method == method)) {
5296 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5297 * And we do not want to enter the next section (with allocation) if we
5298 * have nothing to generate */
5299 security = mono_declsec_get_demands (method, &actions);
5302 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5303 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5305 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5306 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5307 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5309 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5310 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5314 mono_custom_attrs_free (custom);
5317 custom = mono_custom_attrs_from_class (wrapped->klass);
5318 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5322 mono_custom_attrs_free (custom);
5325 /* not a P/Invoke after all */
5330 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5331 /* we use a separate basic block for the initialization code */
5332 NEW_BBLOCK (cfg, init_localsbb);
5333 cfg->bb_init = init_localsbb;
5334 init_localsbb->real_offset = cfg->real_offset;
5335 start_bblock->next_bb = init_localsbb;
5336 init_localsbb->next_bb = bblock;
5337 link_bblock (cfg, start_bblock, init_localsbb);
5338 link_bblock (cfg, init_localsbb, bblock);
5340 cfg->cbb = init_localsbb;
5342 start_bblock->next_bb = bblock;
5343 link_bblock (cfg, start_bblock, bblock);
5346 /* at this point we know, if security is TRUE, that some code needs to be generated */
5347 if (security && (cfg->method == method)) {
5350 mono_jit_stats.cas_demand_generation++;
5352 if (actions.demand.blob) {
5353 /* Add code for SecurityAction.Demand */
5354 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5355 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5356 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5357 mono_emit_method_call (cfg, secman->demand, args, NULL);
5359 if (actions.noncasdemand.blob) {
5360 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5361 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5362 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5363 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5364 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5365 mono_emit_method_call (cfg, secman->demand, args, NULL);
5367 if (actions.demandchoice.blob) {
5368 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5369 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5370 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5371 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5372 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5376 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5378 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5381 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5382 /* check if this is native code, e.g. an icall or a p/invoke */
5383 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5384 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5386 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5387 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5389 /* if this ia a native call then it can only be JITted from platform code */
5390 if ((icall || pinvk) && method->klass && method->klass->image) {
5391 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5392 MonoException *ex = icall ? mono_get_exception_security () :
5393 mono_get_exception_method_access ();
5394 emit_throw_exception (cfg, ex);
5401 if (header->code_size == 0)
5404 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5409 if (cfg->method == method)
5410 mono_debug_init_method (cfg, bblock, breakpoint_id);
5412 for (n = 0; n < header->num_locals; ++n) {
5413 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5418 /* We force the vtable variable here for all shared methods
5419 for the possibility that they might show up in a stack
5420 trace where their exact instantiation is needed. */
5421 if (cfg->generic_sharing_context && method == cfg->method) {
5422 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5423 mini_method_get_context (method)->method_inst ||
5424 method->klass->valuetype) {
5425 mono_get_vtable_var (cfg);
5427 /* FIXME: Is there a better way to do this?
5428 We need the variable live for the duration
5429 of the whole method. */
5430 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5434 /* add a check for this != NULL to inlined methods */
5435 if (is_virtual_call) {
5438 NEW_ARGLOAD (cfg, arg_ins, 0);
5439 MONO_ADD_INS (cfg->cbb, arg_ins);
5440 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5443 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5444 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5447 start_new_bblock = 0;
5451 if (cfg->method == method)
5452 cfg->real_offset = ip - header->code;
5454 cfg->real_offset = inline_offset;
5459 if (start_new_bblock) {
5460 bblock->cil_length = ip - bblock->cil_code;
5461 if (start_new_bblock == 2) {
5462 g_assert (ip == tblock->cil_code);
5464 GET_BBLOCK (cfg, tblock, ip);
5466 bblock->next_bb = tblock;
5469 start_new_bblock = 0;
5470 for (i = 0; i < bblock->in_scount; ++i) {
5471 if (cfg->verbose_level > 3)
5472 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5473 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5477 g_slist_free (class_inits);
5480 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5481 link_bblock (cfg, bblock, tblock);
5482 if (sp != stack_start) {
5483 handle_stack_args (cfg, stack_start, sp - stack_start);
5485 CHECK_UNVERIFIABLE (cfg);
5487 bblock->next_bb = tblock;
5490 for (i = 0; i < bblock->in_scount; ++i) {
5491 if (cfg->verbose_level > 3)
5492 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5493 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5496 g_slist_free (class_inits);
5502 * Sequence points are points where the debugger can place a breakpoint.
5503 * Currently, we generate these automatically at points where the IL
5506 if (seq_points && sp == stack_start) {
5507 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5508 MONO_ADD_INS (cfg->cbb, ins);
5511 bblock->real_offset = cfg->real_offset;
5513 if ((cfg->method == method) && cfg->coverage_info) {
5514 guint32 cil_offset = ip - header->code;
5515 cfg->coverage_info->data [cil_offset].cil_code = ip;
5517 /* TODO: Use an increment here */
5518 #if defined(TARGET_X86)
5519 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5520 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5522 MONO_ADD_INS (cfg->cbb, ins);
5524 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5525 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5529 if (cfg->verbose_level > 3)
5530 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5534 if (cfg->keep_cil_nops)
5535 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5537 MONO_INST_NEW (cfg, ins, OP_NOP);
5539 MONO_ADD_INS (bblock, ins);
5542 if (should_insert_brekpoint (cfg->method))
5543 MONO_INST_NEW (cfg, ins, OP_BREAK);
5545 MONO_INST_NEW (cfg, ins, OP_NOP);
5547 MONO_ADD_INS (bblock, ins);
5553 CHECK_STACK_OVF (1);
5554 n = (*ip)-CEE_LDARG_0;
5556 EMIT_NEW_ARGLOAD (cfg, ins, n);
5564 CHECK_STACK_OVF (1);
5565 n = (*ip)-CEE_LDLOC_0;
5567 EMIT_NEW_LOCLOAD (cfg, ins, n);
5576 n = (*ip)-CEE_STLOC_0;
5579 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5581 emit_stloc_ir (cfg, sp, header, n);
5588 CHECK_STACK_OVF (1);
5591 EMIT_NEW_ARGLOAD (cfg, ins, n);
5597 CHECK_STACK_OVF (1);
5600 NEW_ARGLOADA (cfg, ins, n);
5601 MONO_ADD_INS (cfg->cbb, ins);
5611 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5613 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5618 CHECK_STACK_OVF (1);
5621 EMIT_NEW_LOCLOAD (cfg, ins, n);
5625 case CEE_LDLOCA_S: {
5626 unsigned char *tmp_ip;
5628 CHECK_STACK_OVF (1);
5629 CHECK_LOCAL (ip [1]);
5631 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5637 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5646 CHECK_LOCAL (ip [1]);
5647 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5649 emit_stloc_ir (cfg, sp, header, ip [1]);
5654 CHECK_STACK_OVF (1);
5655 EMIT_NEW_PCONST (cfg, ins, NULL);
5656 ins->type = STACK_OBJ;
5661 CHECK_STACK_OVF (1);
5662 EMIT_NEW_ICONST (cfg, ins, -1);
5675 CHECK_STACK_OVF (1);
5676 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5682 CHECK_STACK_OVF (1);
5684 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5690 CHECK_STACK_OVF (1);
5691 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5697 CHECK_STACK_OVF (1);
5698 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5699 ins->type = STACK_I8;
5700 ins->dreg = alloc_dreg (cfg, STACK_I8);
5702 ins->inst_l = (gint64)read64 (ip);
5703 MONO_ADD_INS (bblock, ins);
5709 gboolean use_aotconst = FALSE;
5711 #ifdef TARGET_POWERPC
5712 /* FIXME: Clean this up */
5713 if (cfg->compile_aot)
5714 use_aotconst = TRUE;
5717 /* FIXME: we should really allocate this only late in the compilation process */
5718 f = mono_domain_alloc (cfg->domain, sizeof (float));
5720 CHECK_STACK_OVF (1);
5726 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5728 dreg = alloc_freg (cfg);
5729 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5730 ins->type = STACK_R8;
5732 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5733 ins->type = STACK_R8;
5734 ins->dreg = alloc_dreg (cfg, STACK_R8);
5736 MONO_ADD_INS (bblock, ins);
5746 gboolean use_aotconst = FALSE;
5748 #ifdef TARGET_POWERPC
5749 /* FIXME: Clean this up */
5750 if (cfg->compile_aot)
5751 use_aotconst = TRUE;
5754 /* FIXME: we should really allocate this only late in the compilation process */
5755 d = mono_domain_alloc (cfg->domain, sizeof (double));
5757 CHECK_STACK_OVF (1);
5763 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5765 dreg = alloc_freg (cfg);
5766 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5767 ins->type = STACK_R8;
5769 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5770 ins->type = STACK_R8;
5771 ins->dreg = alloc_dreg (cfg, STACK_R8);
5773 MONO_ADD_INS (bblock, ins);
5782 MonoInst *temp, *store;
5784 CHECK_STACK_OVF (1);
5788 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5789 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5791 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5794 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5807 if (sp [0]->type == STACK_R8)
5808 /* we need to pop the value from the x86 FP stack */
5809 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5818 if (stack_start != sp)
5820 token = read32 (ip + 1);
5821 /* FIXME: check the signature matches */
5822 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5827 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5828 GENERIC_SHARING_FAILURE (CEE_JMP);
5830 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5831 CHECK_CFG_EXCEPTION;
5833 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5835 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5838 /* Handle tail calls similarly to calls */
5839 n = fsig->param_count + fsig->hasthis;
5841 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5842 call->method = cmethod;
5843 call->tail_call = TRUE;
5844 call->signature = mono_method_signature (cmethod);
5845 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5846 call->inst.inst_p0 = cmethod;
5847 for (i = 0; i < n; ++i)
5848 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5850 mono_arch_emit_call (cfg, call);
5851 MONO_ADD_INS (bblock, (MonoInst*)call);
5854 for (i = 0; i < num_args; ++i)
5855 /* Prevent arguments from being optimized away */
5856 arg_array [i]->flags |= MONO_INST_VOLATILE;
5858 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5859 ins = (MonoInst*)call;
5860 ins->inst_p0 = cmethod;
5861 MONO_ADD_INS (bblock, ins);
5865 start_new_bblock = 1;
5870 case CEE_CALLVIRT: {
5871 MonoInst *addr = NULL;
5872 MonoMethodSignature *fsig = NULL;
5874 int virtual = *ip == CEE_CALLVIRT;
5875 int calli = *ip == CEE_CALLI;
5876 gboolean pass_imt_from_rgctx = FALSE;
5877 MonoInst *imt_arg = NULL;
5878 gboolean pass_vtable = FALSE;
5879 gboolean pass_mrgctx = FALSE;
5880 MonoInst *vtable_arg = NULL;
5881 gboolean check_this = FALSE;
5882 gboolean supported_tail_call = FALSE;
5885 token = read32 (ip + 1);
5892 if (method->wrapper_type != MONO_WRAPPER_NONE)
5893 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5895 fsig = mono_metadata_parse_signature (image, token);
5897 n = fsig->param_count + fsig->hasthis;
5899 if (method->dynamic && fsig->pinvoke) {
5903 * This is a call through a function pointer using a pinvoke
5904 * signature. Have to create a wrapper and call that instead.
5905 * FIXME: This is very slow, need to create a wrapper at JIT time
5906 * instead based on the signature.
5908 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
5909 EMIT_NEW_PCONST (cfg, args [1], fsig);
5911 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
5914 MonoMethod *cil_method;
5916 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5917 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5918 cil_method = cmethod;
5919 } else if (constrained_call) {
5920 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
5922 * This is needed since get_method_constrained can't find
5923 * the method in klass representing a type var.
5924 * The type var is guaranteed to be a reference type in this
5927 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5928 cil_method = cmethod;
5929 g_assert (!cmethod->klass->valuetype);
5931 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5934 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5935 cil_method = cmethod;
5940 if (!dont_verify && !cfg->skip_visibility) {
5941 MonoMethod *target_method = cil_method;
5942 if (method->is_inflated) {
5943 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5945 if (!mono_method_can_access_method (method_definition, target_method) &&
5946 !mono_method_can_access_method (method, cil_method))
5947 METHOD_ACCESS_FAILURE;
5950 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5951 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5953 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5954 /* MS.NET seems to silently convert this to a callvirt */
5957 if (!cmethod->klass->inited)
5958 if (!mono_class_init (cmethod->klass))
5961 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
5962 mini_class_is_system_array (cmethod->klass)) {
5963 array_rank = cmethod->klass->rank;
5964 fsig = mono_method_signature (cmethod);
5966 if (mono_method_signature (cmethod)->pinvoke) {
5967 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
5968 check_for_pending_exc, FALSE);
5969 fsig = mono_method_signature (wrapper);
5970 } else if (constrained_call) {
5971 fsig = mono_method_signature (cmethod);
5973 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
5977 mono_save_token_info (cfg, image, token, cil_method);
5979 n = fsig->param_count + fsig->hasthis;
5981 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
5982 if (check_linkdemand (cfg, method, cmethod))
5984 CHECK_CFG_EXCEPTION;
5987 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
5988 g_assert_not_reached ();
5991 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
5994 if (!cfg->generic_sharing_context && cmethod)
5995 g_assert (!mono_method_check_context_used (cmethod));
5999 //g_assert (!virtual || fsig->hasthis);
6003 if (constrained_call) {
6005 * We have the `constrained.' prefix opcode.
6007 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6009 * The type parameter is instantiated as a valuetype,
6010 * but that type doesn't override the method we're
6011 * calling, so we need to box `this'.
6013 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6014 ins->klass = constrained_call;
6015 sp [0] = handle_box (cfg, ins, constrained_call);
6016 CHECK_CFG_EXCEPTION;
6017 } else if (!constrained_call->valuetype) {
6018 int dreg = alloc_preg (cfg);
6021 * The type parameter is instantiated as a reference
6022 * type. We have a managed pointer on the stack, so
6023 * we need to dereference it here.
6025 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6026 ins->type = STACK_OBJ;
6028 } else if (cmethod->klass->valuetype)
6030 constrained_call = NULL;
6033 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6037 * If the callee is a shared method, then its static cctor
6038 * might not get called after the call was patched.
6040 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6041 emit_generic_class_init (cfg, cmethod->klass);
6042 CHECK_TYPELOAD (cmethod->klass);
6045 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6046 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6047 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6048 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6049 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6052 * Pass vtable iff target method might
6053 * be shared, which means that sharing
6054 * is enabled for its class and its
6055 * context is sharable (and it's not a
6058 if (sharing_enabled && context_sharable &&
6059 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6063 if (cmethod && mini_method_get_context (cmethod) &&
6064 mini_method_get_context (cmethod)->method_inst) {
6065 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6066 MonoGenericContext *context = mini_method_get_context (cmethod);
6067 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6069 g_assert (!pass_vtable);
6071 if (sharing_enabled && context_sharable)
6075 if (cfg->generic_sharing_context && cmethod) {
6076 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6078 context_used = mono_method_check_context_used (cmethod);
6080 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6081 /* Generic method interface
6082 calls are resolved via a
6083 helper function and don't
6085 if (!cmethod_context || !cmethod_context->method_inst)
6086 pass_imt_from_rgctx = TRUE;
6090 * If a shared method calls another
6091 * shared method then the caller must
6092 * have a generic sharing context
6093 * because the magic trampoline
6094 * requires it. FIXME: We shouldn't
6095 * have to force the vtable/mrgctx
6096 * variable here. Instead there
6097 * should be a flag in the cfg to
6098 * request a generic sharing context.
6101 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6102 mono_get_vtable_var (cfg);
6107 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6109 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6111 CHECK_TYPELOAD (cmethod->klass);
6112 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6117 g_assert (!vtable_arg);
6119 if (!cfg->compile_aot) {
6121 * emit_get_rgctx_method () calls mono_class_vtable () so check
6122 * for type load errors before.
6124 mono_class_vtable (cfg->domain, cmethod->klass);
6125 CHECK_TYPELOAD (cmethod->klass);
6128 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6130 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6131 MONO_METHOD_IS_FINAL (cmethod)) {
6138 if (pass_imt_from_rgctx) {
6139 g_assert (!pass_vtable);
6142 imt_arg = emit_get_rgctx_method (cfg, context_used,
6143 cmethod, MONO_RGCTX_INFO_METHOD);
6147 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6149 /* Calling virtual generic methods */
6150 if (cmethod && virtual &&
6151 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6152 !(MONO_METHOD_IS_FINAL (cmethod) &&
6153 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6154 mono_method_signature (cmethod)->generic_param_count) {
6155 MonoInst *this_temp, *this_arg_temp, *store;
6156 MonoInst *iargs [4];
6158 g_assert (mono_method_signature (cmethod)->is_inflated);
6160 /* Prevent inlining of methods that contain indirect calls */
6163 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6164 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6165 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6166 g_assert (!imt_arg);
6168 g_assert (cmethod->is_inflated);
6169 imt_arg = emit_get_rgctx_method (cfg, context_used,
6170 cmethod, MONO_RGCTX_INFO_METHOD);
6171 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6175 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6176 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6177 MONO_ADD_INS (bblock, store);
6179 /* FIXME: This should be a managed pointer */
6180 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6182 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6183 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6184 cmethod, MONO_RGCTX_INFO_METHOD);
6185 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6186 addr = mono_emit_jit_icall (cfg,
6187 mono_helper_compile_generic_method, iargs);
6189 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6191 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6194 if (!MONO_TYPE_IS_VOID (fsig->ret))
6195 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6202 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6203 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6205 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6209 /* FIXME: runtime generic context pointer for jumps? */
6210 /* FIXME: handle this for generic sharing eventually */
6211 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6214 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6217 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6218 /* Handle tail calls similarly to calls */
6219 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6221 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6222 call->tail_call = TRUE;
6223 call->method = cmethod;
6224 call->signature = mono_method_signature (cmethod);
6227 * We implement tail calls by storing the actual arguments into the
6228 * argument variables, then emitting a CEE_JMP.
6230 for (i = 0; i < n; ++i) {
6231 /* Prevent argument from being register allocated */
6232 arg_array [i]->flags |= MONO_INST_VOLATILE;
6233 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6237 ins = (MonoInst*)call;
6238 ins->inst_p0 = cmethod;
6239 ins->inst_p1 = arg_array [0];
6240 MONO_ADD_INS (bblock, ins);
6241 link_bblock (cfg, bblock, end_bblock);
6242 start_new_bblock = 1;
6243 /* skip CEE_RET as well */
6249 /* Conversion to a JIT intrinsic */
6250 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6251 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6252 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6263 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6264 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6265 mono_method_check_inlining (cfg, cmethod) &&
6266 !g_list_find (dont_inline, cmethod)) {
6268 gboolean allways = FALSE;
6270 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6271 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6272 /* Prevent inlining of methods that call wrappers */
6274 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6278 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6280 cfg->real_offset += 5;
6283 if (!MONO_TYPE_IS_VOID (fsig->ret))
6284 /* *sp is already set by inline_method */
6287 inline_costs += costs;
6293 inline_costs += 10 * num_calls++;
6295 /* Tail recursion elimination */
6296 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6297 gboolean has_vtargs = FALSE;
6300 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6303 /* keep it simple */
6304 for (i = fsig->param_count - 1; i >= 0; i--) {
6305 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6310 for (i = 0; i < n; ++i)
6311 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6312 MONO_INST_NEW (cfg, ins, OP_BR);
6313 MONO_ADD_INS (bblock, ins);
6314 tblock = start_bblock->out_bb [0];
6315 link_bblock (cfg, bblock, tblock);
6316 ins->inst_target_bb = tblock;
6317 start_new_bblock = 1;
6319 /* skip the CEE_RET, too */
6320 if (ip_in_bb (cfg, bblock, ip + 5))
6330 /* Generic sharing */
6331 /* FIXME: only do this for generic methods if
6332 they are not shared! */
6333 if (context_used && !imt_arg && !array_rank &&
6334 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6335 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6336 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6337 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6340 g_assert (cfg->generic_sharing_context && cmethod);
6344 * We are compiling a call to a
6345 * generic method from shared code,
6346 * which means that we have to look up
6347 * the method in the rgctx and do an
6350 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6353 /* Indirect calls */
6355 g_assert (!imt_arg);
6357 if (*ip == CEE_CALL)
6358 g_assert (context_used);
6359 else if (*ip == CEE_CALLI)
6360 g_assert (!vtable_arg);
6362 /* FIXME: what the hell is this??? */
6363 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6364 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6366 /* Prevent inlining of methods with indirect calls */
6370 #ifdef MONO_ARCH_RGCTX_REG
6372 int rgctx_reg = mono_alloc_preg (cfg);
6374 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6375 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6376 call = (MonoCallInst*)ins;
6377 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6378 cfg->uses_rgctx_reg = TRUE;
6379 call->rgctx_reg = TRUE;
6384 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6386 * Instead of emitting an indirect call, emit a direct call
6387 * with the contents of the aotconst as the patch info.
6389 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6391 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6392 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6395 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6398 if (!MONO_TYPE_IS_VOID (fsig->ret))
6399 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6410 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6411 if (sp [fsig->param_count]->type == STACK_OBJ) {
6412 MonoInst *iargs [2];
6415 iargs [1] = sp [fsig->param_count];
6417 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6420 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6421 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6422 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6423 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6425 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6428 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6429 if (!cmethod->klass->element_class->valuetype && !readonly)
6430 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6431 CHECK_TYPELOAD (cmethod->klass);
6434 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6437 g_assert_not_reached ();
6445 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6447 if (!MONO_TYPE_IS_VOID (fsig->ret))
6448 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6458 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6460 } else if (imt_arg) {
6461 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6463 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6466 if (!MONO_TYPE_IS_VOID (fsig->ret))
6467 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6474 if (cfg->method != method) {
6475 /* return from inlined method */
6477 * If in_count == 0, that means the ret is unreachable due to
6478 * being preceeded by a throw. In that case, inline_method () will
6479 * handle setting the return value
6480 * (test case: test_0_inline_throw ()).
6482 if (return_var && cfg->cbb->in_count) {
6486 //g_assert (returnvar != -1);
6487 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6488 cfg->ret_var_set = TRUE;
6492 MonoType *ret_type = mono_method_signature (method)->ret;
6494 g_assert (!return_var);
6497 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6500 if (!cfg->vret_addr) {
6503 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6505 EMIT_NEW_RETLOADA (cfg, ret_addr);
6507 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6508 ins->klass = mono_class_from_mono_type (ret_type);
6511 #ifdef MONO_ARCH_SOFT_FLOAT
6512 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6513 MonoInst *iargs [1];
6517 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6518 mono_arch_emit_setret (cfg, method, conv);
6520 mono_arch_emit_setret (cfg, method, *sp);
6523 mono_arch_emit_setret (cfg, method, *sp);
6528 if (sp != stack_start)
6530 MONO_INST_NEW (cfg, ins, OP_BR);
6532 ins->inst_target_bb = end_bblock;
6533 MONO_ADD_INS (bblock, ins);
6534 link_bblock (cfg, bblock, end_bblock);
6535 start_new_bblock = 1;
6539 MONO_INST_NEW (cfg, ins, OP_BR);
6541 target = ip + 1 + (signed char)(*ip);
6543 GET_BBLOCK (cfg, tblock, target);
6544 link_bblock (cfg, bblock, tblock);
6545 ins->inst_target_bb = tblock;
6546 if (sp != stack_start) {
6547 handle_stack_args (cfg, stack_start, sp - stack_start);
6549 CHECK_UNVERIFIABLE (cfg);
6551 MONO_ADD_INS (bblock, ins);
6552 start_new_bblock = 1;
6553 inline_costs += BRANCH_COST;
6567 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6569 target = ip + 1 + *(signed char*)ip;
6575 inline_costs += BRANCH_COST;
6579 MONO_INST_NEW (cfg, ins, OP_BR);
6582 target = ip + 4 + (gint32)read32(ip);
6584 GET_BBLOCK (cfg, tblock, target);
6585 link_bblock (cfg, bblock, tblock);
6586 ins->inst_target_bb = tblock;
6587 if (sp != stack_start) {
6588 handle_stack_args (cfg, stack_start, sp - stack_start);
6590 CHECK_UNVERIFIABLE (cfg);
6593 MONO_ADD_INS (bblock, ins);
6595 start_new_bblock = 1;
6596 inline_costs += BRANCH_COST;
6603 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6604 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6605 guint32 opsize = is_short ? 1 : 4;
6607 CHECK_OPSIZE (opsize);
6609 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6612 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6617 GET_BBLOCK (cfg, tblock, target);
6618 link_bblock (cfg, bblock, tblock);
6619 GET_BBLOCK (cfg, tblock, ip);
6620 link_bblock (cfg, bblock, tblock);
6622 if (sp != stack_start) {
6623 handle_stack_args (cfg, stack_start, sp - stack_start);
6624 CHECK_UNVERIFIABLE (cfg);
6627 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6628 cmp->sreg1 = sp [0]->dreg;
6629 type_from_op (cmp, sp [0], NULL);
6632 #if SIZEOF_REGISTER == 4
6633 if (cmp->opcode == OP_LCOMPARE_IMM) {
6634 /* Convert it to OP_LCOMPARE */
6635 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6636 ins->type = STACK_I8;
6637 ins->dreg = alloc_dreg (cfg, STACK_I8);
6639 MONO_ADD_INS (bblock, ins);
6640 cmp->opcode = OP_LCOMPARE;
6641 cmp->sreg2 = ins->dreg;
6644 MONO_ADD_INS (bblock, cmp);
6646 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6647 type_from_op (ins, sp [0], NULL);
6648 MONO_ADD_INS (bblock, ins);
6649 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6650 GET_BBLOCK (cfg, tblock, target);
6651 ins->inst_true_bb = tblock;
6652 GET_BBLOCK (cfg, tblock, ip);
6653 ins->inst_false_bb = tblock;
6654 start_new_bblock = 2;
6657 inline_costs += BRANCH_COST;
6672 MONO_INST_NEW (cfg, ins, *ip);
6674 target = ip + 4 + (gint32)read32(ip);
6680 inline_costs += BRANCH_COST;
6684 MonoBasicBlock **targets;
6685 MonoBasicBlock *default_bblock;
6686 MonoJumpInfoBBTable *table;
6687 int offset_reg = alloc_preg (cfg);
6688 int target_reg = alloc_preg (cfg);
6689 int table_reg = alloc_preg (cfg);
6690 int sum_reg = alloc_preg (cfg);
6691 gboolean use_op_switch;
6695 n = read32 (ip + 1);
6698 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6702 CHECK_OPSIZE (n * sizeof (guint32));
6703 target = ip + n * sizeof (guint32);
6705 GET_BBLOCK (cfg, default_bblock, target);
6707 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6708 for (i = 0; i < n; ++i) {
6709 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6710 targets [i] = tblock;
6714 if (sp != stack_start) {
6716 * Link the current bb with the targets as well, so handle_stack_args
6717 * will set their in_stack correctly.
6719 link_bblock (cfg, bblock, default_bblock);
6720 for (i = 0; i < n; ++i)
6721 link_bblock (cfg, bblock, targets [i]);
6723 handle_stack_args (cfg, stack_start, sp - stack_start);
6725 CHECK_UNVERIFIABLE (cfg);
6728 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6729 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6732 for (i = 0; i < n; ++i)
6733 link_bblock (cfg, bblock, targets [i]);
6735 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6736 table->table = targets;
6737 table->table_size = n;
6739 use_op_switch = FALSE;
6741 /* ARM implements SWITCH statements differently */
6742 /* FIXME: Make it use the generic implementation */
6743 if (!cfg->compile_aot)
6744 use_op_switch = TRUE;
6747 if (COMPILE_LLVM (cfg))
6748 use_op_switch = TRUE;
6750 cfg->cbb->has_jump_table = 1;
6752 if (use_op_switch) {
6753 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6754 ins->sreg1 = src1->dreg;
6755 ins->inst_p0 = table;
6756 ins->inst_many_bb = targets;
6757 ins->klass = GUINT_TO_POINTER (n);
6758 MONO_ADD_INS (cfg->cbb, ins);
6760 if (sizeof (gpointer) == 8)
6761 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6765 #if SIZEOF_REGISTER == 8
6766 /* The upper word might not be zero, and we add it to a 64 bit address later */
6767 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6770 if (cfg->compile_aot) {
6771 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6773 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6774 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6775 ins->inst_p0 = table;
6776 ins->dreg = table_reg;
6777 MONO_ADD_INS (cfg->cbb, ins);
6780 /* FIXME: Use load_memindex */
6781 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6782 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6783 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6785 start_new_bblock = 1;
6786 inline_costs += (BRANCH_COST * 2);
6806 dreg = alloc_freg (cfg);
6809 dreg = alloc_lreg (cfg);
6812 dreg = alloc_preg (cfg);
6815 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6816 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6817 ins->flags |= ins_flag;
6819 MONO_ADD_INS (bblock, ins);
6834 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6835 ins->flags |= ins_flag;
6837 MONO_ADD_INS (bblock, ins);
6839 #if HAVE_WRITE_BARRIERS
6840 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6841 /* insert call to write barrier */
6842 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6843 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6854 MONO_INST_NEW (cfg, ins, (*ip));
6856 ins->sreg1 = sp [0]->dreg;
6857 ins->sreg2 = sp [1]->dreg;
6858 type_from_op (ins, sp [0], sp [1]);
6860 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6862 /* Use the immediate opcodes if possible */
6863 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6864 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6865 if (imm_opcode != -1) {
6866 ins->opcode = imm_opcode;
6867 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6870 sp [1]->opcode = OP_NOP;
6874 MONO_ADD_INS ((cfg)->cbb, (ins));
6876 *sp++ = mono_decompose_opcode (cfg, ins);
6893 MONO_INST_NEW (cfg, ins, (*ip));
6895 ins->sreg1 = sp [0]->dreg;
6896 ins->sreg2 = sp [1]->dreg;
6897 type_from_op (ins, sp [0], sp [1]);
6899 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6900 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6902 /* FIXME: Pass opcode to is_inst_imm */
6904 /* Use the immediate opcodes if possible */
6905 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6908 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6909 if (imm_opcode != -1) {
6910 ins->opcode = imm_opcode;
6911 if (sp [1]->opcode == OP_I8CONST) {
6912 #if SIZEOF_REGISTER == 8
6913 ins->inst_imm = sp [1]->inst_l;
6915 ins->inst_ls_word = sp [1]->inst_ls_word;
6916 ins->inst_ms_word = sp [1]->inst_ms_word;
6920 ins->inst_imm = (gssize)(sp [1]->inst_c0);
6923 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6924 if (sp [1]->next == NULL)
6925 sp [1]->opcode = OP_NOP;
6928 MONO_ADD_INS ((cfg)->cbb, (ins));
6930 *sp++ = mono_decompose_opcode (cfg, ins);
6943 case CEE_CONV_OVF_I8:
6944 case CEE_CONV_OVF_U8:
6948 /* Special case this earlier so we have long constants in the IR */
6949 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
6950 int data = sp [-1]->inst_c0;
6951 sp [-1]->opcode = OP_I8CONST;
6952 sp [-1]->type = STACK_I8;
6953 #if SIZEOF_REGISTER == 8
6954 if ((*ip) == CEE_CONV_U8)
6955 sp [-1]->inst_c0 = (guint32)data;
6957 sp [-1]->inst_c0 = data;
6959 sp [-1]->inst_ls_word = data;
6960 if ((*ip) == CEE_CONV_U8)
6961 sp [-1]->inst_ms_word = 0;
6963 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
6965 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
6972 case CEE_CONV_OVF_I4:
6973 case CEE_CONV_OVF_I1:
6974 case CEE_CONV_OVF_I2:
6975 case CEE_CONV_OVF_I:
6976 case CEE_CONV_OVF_U:
6979 if (sp [-1]->type == STACK_R8) {
6980 ADD_UNOP (CEE_CONV_OVF_I8);
6987 case CEE_CONV_OVF_U1:
6988 case CEE_CONV_OVF_U2:
6989 case CEE_CONV_OVF_U4:
6992 if (sp [-1]->type == STACK_R8) {
6993 ADD_UNOP (CEE_CONV_OVF_U8);
7000 case CEE_CONV_OVF_I1_UN:
7001 case CEE_CONV_OVF_I2_UN:
7002 case CEE_CONV_OVF_I4_UN:
7003 case CEE_CONV_OVF_I8_UN:
7004 case CEE_CONV_OVF_U1_UN:
7005 case CEE_CONV_OVF_U2_UN:
7006 case CEE_CONV_OVF_U4_UN:
7007 case CEE_CONV_OVF_U8_UN:
7008 case CEE_CONV_OVF_I_UN:
7009 case CEE_CONV_OVF_U_UN:
7019 case CEE_ADD_OVF_UN:
7021 case CEE_MUL_OVF_UN:
7023 case CEE_SUB_OVF_UN:
7031 token = read32 (ip + 1);
7032 klass = mini_get_class (method, token, generic_context);
7033 CHECK_TYPELOAD (klass);
7035 if (generic_class_is_reference_type (cfg, klass)) {
7036 MonoInst *store, *load;
7037 int dreg = alloc_preg (cfg);
7039 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7040 load->flags |= ins_flag;
7041 MONO_ADD_INS (cfg->cbb, load);
7043 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7044 store->flags |= ins_flag;
7045 MONO_ADD_INS (cfg->cbb, store);
7047 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7059 token = read32 (ip + 1);
7060 klass = mini_get_class (method, token, generic_context);
7061 CHECK_TYPELOAD (klass);
7063 /* Optimize the common ldobj+stloc combination */
7073 loc_index = ip [5] - CEE_STLOC_0;
7080 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7081 CHECK_LOCAL (loc_index);
7083 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7084 ins->dreg = cfg->locals [loc_index]->dreg;
7090 /* Optimize the ldobj+stobj combination */
7091 /* The reference case ends up being a load+store anyway */
7092 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7097 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7104 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7113 CHECK_STACK_OVF (1);
7115 n = read32 (ip + 1);
7117 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7118 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7119 ins->type = STACK_OBJ;
7122 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7123 MonoInst *iargs [1];
7125 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7126 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7128 if (cfg->opt & MONO_OPT_SHARED) {
7129 MonoInst *iargs [3];
7131 if (cfg->compile_aot) {
7132 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7134 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7135 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7136 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7137 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7138 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7140 if (bblock->out_of_line) {
7141 MonoInst *iargs [2];
7143 if (image == mono_defaults.corlib) {
7145 * Avoid relocations in AOT and save some space by using a
7146 * version of helper_ldstr specialized to mscorlib.
7148 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7149 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7151 /* Avoid creating the string object */
7152 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7153 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7154 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7158 if (cfg->compile_aot) {
7159 NEW_LDSTRCONST (cfg, ins, image, n);
7161 MONO_ADD_INS (bblock, ins);
7164 NEW_PCONST (cfg, ins, NULL);
7165 ins->type = STACK_OBJ;
7166 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7168 MONO_ADD_INS (bblock, ins);
7177 MonoInst *iargs [2];
7178 MonoMethodSignature *fsig;
7181 MonoInst *vtable_arg = NULL;
7184 token = read32 (ip + 1);
7185 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7188 fsig = mono_method_get_signature (cmethod, image, token);
7192 mono_save_token_info (cfg, image, token, cmethod);
7194 if (!mono_class_init (cmethod->klass))
7197 if (cfg->generic_sharing_context)
7198 context_used = mono_method_check_context_used (cmethod);
7200 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7201 if (check_linkdemand (cfg, method, cmethod))
7203 CHECK_CFG_EXCEPTION;
7204 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7205 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7208 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7209 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7210 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7211 mono_class_vtable (cfg->domain, cmethod->klass);
7212 CHECK_TYPELOAD (cmethod->klass);
7214 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7215 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7218 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7219 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7221 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7223 CHECK_TYPELOAD (cmethod->klass);
7224 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7229 n = fsig->param_count;
7233 * Generate smaller code for the common newobj <exception> instruction in
7234 * argument checking code.
7236 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7237 is_exception_class (cmethod->klass) && n <= 2 &&
7238 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7239 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7240 MonoInst *iargs [3];
7242 g_assert (!vtable_arg);
7246 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7249 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7253 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7258 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7261 g_assert_not_reached ();
7269 /* move the args to allow room for 'this' in the first position */
7275 /* check_call_signature () requires sp[0] to be set */
7276 this_ins.type = STACK_OBJ;
7278 if (check_call_signature (cfg, fsig, sp))
7283 if (mini_class_is_system_array (cmethod->klass)) {
7284 g_assert (!vtable_arg);
7286 *sp = emit_get_rgctx_method (cfg, context_used,
7287 cmethod, MONO_RGCTX_INFO_METHOD);
7289 /* Avoid varargs in the common case */
7290 if (fsig->param_count == 1)
7291 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7292 else if (fsig->param_count == 2)
7293 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7294 else if (fsig->param_count == 3)
7295 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7297 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7298 } else if (cmethod->string_ctor) {
7299 g_assert (!context_used);
7300 g_assert (!vtable_arg);
7301 /* we simply pass a null pointer */
7302 EMIT_NEW_PCONST (cfg, *sp, NULL);
7303 /* now call the string ctor */
7304 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7306 MonoInst* callvirt_this_arg = NULL;
7308 if (cmethod->klass->valuetype) {
7309 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7310 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7311 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7316 * The code generated by mini_emit_virtual_call () expects
7317 * iargs [0] to be a boxed instance, but luckily the vcall
7318 * will be transformed into a normal call there.
7320 } else if (context_used) {
7324 if (cfg->opt & MONO_OPT_SHARED)
7325 rgctx_info = MONO_RGCTX_INFO_KLASS;
7327 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7328 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7330 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7333 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7335 CHECK_TYPELOAD (cmethod->klass);
7338 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7339 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7340 * As a workaround, we call class cctors before allocating objects.
7342 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7343 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7344 if (cfg->verbose_level > 2)
7345 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7346 class_inits = g_slist_prepend (class_inits, vtable);
7349 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7352 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7355 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7357 /* Now call the actual ctor */
7358 /* Avoid virtual calls to ctors if possible */
7359 if (cmethod->klass->marshalbyref)
7360 callvirt_this_arg = sp [0];
7362 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7363 mono_method_check_inlining (cfg, cmethod) &&
7364 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7365 !g_list_find (dont_inline, cmethod)) {
7368 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7369 cfg->real_offset += 5;
7372 inline_costs += costs - 5;
7375 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7377 } else if (context_used &&
7378 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7379 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7380 MonoInst *cmethod_addr;
7382 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7383 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7385 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7388 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7389 callvirt_this_arg, NULL, vtable_arg);
7393 if (alloc == NULL) {
7395 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7396 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7410 token = read32 (ip + 1);
7411 klass = mini_get_class (method, token, generic_context);
7412 CHECK_TYPELOAD (klass);
7413 if (sp [0]->type != STACK_OBJ)
7416 if (cfg->generic_sharing_context)
7417 context_used = mono_class_check_context_used (klass);
7426 args [1] = emit_get_rgctx_klass (cfg, context_used,
7427 klass, MONO_RGCTX_INFO_KLASS);
7429 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7433 } else if (mono_class_has_variant_generic_params (klass)) {
7440 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7442 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7446 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7447 MonoMethod *mono_castclass;
7448 MonoInst *iargs [1];
7451 mono_castclass = mono_marshal_get_castclass (klass);
7454 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7455 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7456 g_assert (costs > 0);
7459 cfg->real_offset += 5;
7464 inline_costs += costs;
7467 ins = handle_castclass (cfg, klass, *sp);
7468 CHECK_CFG_EXCEPTION;
7478 token = read32 (ip + 1);
7479 klass = mini_get_class (method, token, generic_context);
7480 CHECK_TYPELOAD (klass);
7481 if (sp [0]->type != STACK_OBJ)
7484 if (cfg->generic_sharing_context)
7485 context_used = mono_class_check_context_used (klass);
7494 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7496 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7500 } else if (mono_class_has_variant_generic_params (klass)) {
7507 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7509 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7513 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7514 MonoMethod *mono_isinst;
7515 MonoInst *iargs [1];
7518 mono_isinst = mono_marshal_get_isinst (klass);
7521 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7522 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7523 g_assert (costs > 0);
7526 cfg->real_offset += 5;
7531 inline_costs += costs;
7534 ins = handle_isinst (cfg, klass, *sp);
7535 CHECK_CFG_EXCEPTION;
7542 case CEE_UNBOX_ANY: {
7546 token = read32 (ip + 1);
7547 klass = mini_get_class (method, token, generic_context);
7548 CHECK_TYPELOAD (klass);
7550 mono_save_token_info (cfg, image, token, klass);
7552 if (cfg->generic_sharing_context)
7553 context_used = mono_class_check_context_used (klass);
7555 if (generic_class_is_reference_type (cfg, klass)) {
7556 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7558 MonoInst *iargs [2];
7563 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7564 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7568 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7569 MonoMethod *mono_castclass;
7570 MonoInst *iargs [1];
7573 mono_castclass = mono_marshal_get_castclass (klass);
7576 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7577 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7579 g_assert (costs > 0);
7582 cfg->real_offset += 5;
7586 inline_costs += costs;
7588 ins = handle_castclass (cfg, klass, *sp);
7589 CHECK_CFG_EXCEPTION;
7597 if (mono_class_is_nullable (klass)) {
7598 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7605 ins = handle_unbox (cfg, klass, sp, context_used);
7611 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7624 token = read32 (ip + 1);
7625 klass = mini_get_class (method, token, generic_context);
7626 CHECK_TYPELOAD (klass);
7628 mono_save_token_info (cfg, image, token, klass);
7630 if (cfg->generic_sharing_context)
7631 context_used = mono_class_check_context_used (klass);
7633 if (generic_class_is_reference_type (cfg, klass)) {
7639 if (klass == mono_defaults.void_class)
7641 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7643 /* frequent check in generic code: box (struct), brtrue */
7644 if (!mono_class_is_nullable (klass) &&
7645 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7646 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7648 MONO_INST_NEW (cfg, ins, OP_BR);
7649 if (*ip == CEE_BRTRUE_S) {
7652 target = ip + 1 + (signed char)(*ip);
7657 target = ip + 4 + (gint)(read32 (ip));
7660 GET_BBLOCK (cfg, tblock, target);
7661 link_bblock (cfg, bblock, tblock);
7662 ins->inst_target_bb = tblock;
7663 GET_BBLOCK (cfg, tblock, ip);
7665 * This leads to some inconsistency, since the two bblocks are
7666 * not really connected, but it is needed for handling stack
7667 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7668 * FIXME: This should only be needed if sp != stack_start, but that
7669 * doesn't work for some reason (test failure in mcs/tests on x86).
7671 link_bblock (cfg, bblock, tblock);
7672 if (sp != stack_start) {
7673 handle_stack_args (cfg, stack_start, sp - stack_start);
7675 CHECK_UNVERIFIABLE (cfg);
7677 MONO_ADD_INS (bblock, ins);
7678 start_new_bblock = 1;
7686 if (cfg->opt & MONO_OPT_SHARED)
7687 rgctx_info = MONO_RGCTX_INFO_KLASS;
7689 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7690 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7691 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7693 *sp++ = handle_box (cfg, val, klass);
7696 CHECK_CFG_EXCEPTION;
7705 token = read32 (ip + 1);
7706 klass = mini_get_class (method, token, generic_context);
7707 CHECK_TYPELOAD (klass);
7709 mono_save_token_info (cfg, image, token, klass);
7711 if (cfg->generic_sharing_context)
7712 context_used = mono_class_check_context_used (klass);
7714 if (mono_class_is_nullable (klass)) {
7717 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7718 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7722 ins = handle_unbox (cfg, klass, sp, context_used);
7732 MonoClassField *field;
7736 if (*ip == CEE_STFLD) {
7743 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7745 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7748 token = read32 (ip + 1);
7749 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7750 field = mono_method_get_wrapper_data (method, token);
7751 klass = field->parent;
7754 field = mono_field_from_token (image, token, &klass, generic_context);
7758 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7759 FIELD_ACCESS_FAILURE;
7760 mono_class_init (klass);
7762 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7763 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7764 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7765 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7768 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7769 if (*ip == CEE_STFLD) {
7770 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7772 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7773 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7774 MonoInst *iargs [5];
7777 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7778 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7779 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7783 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7784 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7785 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7786 g_assert (costs > 0);
7788 cfg->real_offset += 5;
7791 inline_costs += costs;
7793 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7798 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7800 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7802 #if HAVE_WRITE_BARRIERS
7803 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7804 /* insert call to write barrier */
7805 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7806 MonoInst *iargs [2];
7809 dreg = alloc_preg (cfg);
7810 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7812 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7816 store->flags |= ins_flag;
7823 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7824 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7825 MonoInst *iargs [4];
7828 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7829 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7830 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7831 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7832 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7833 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7835 g_assert (costs > 0);
7837 cfg->real_offset += 5;
7841 inline_costs += costs;
7843 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7847 if (sp [0]->type == STACK_VTYPE) {
7850 /* Have to compute the address of the variable */
7852 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7854 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7856 g_assert (var->klass == klass);
7858 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7862 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7864 if (*ip == CEE_LDFLDA) {
7865 dreg = alloc_preg (cfg);
7867 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7868 ins->klass = mono_class_from_mono_type (field->type);
7869 ins->type = STACK_MP;
7874 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7875 load->flags |= ins_flag;
7876 load->flags |= MONO_INST_FAULT;
7887 MonoClassField *field;
7888 gpointer addr = NULL;
7889 gboolean is_special_static;
7892 token = read32 (ip + 1);
7894 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7895 field = mono_method_get_wrapper_data (method, token);
7896 klass = field->parent;
7899 field = mono_field_from_token (image, token, &klass, generic_context);
7902 mono_class_init (klass);
7903 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7904 FIELD_ACCESS_FAILURE;
7906 /* if the class is Critical then transparent code cannot access it's fields */
7907 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7908 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7911 * We can only support shared generic static
7912 * field access on architectures where the
7913 * trampoline code has been extended to handle
7914 * the generic class init.
7916 #ifndef MONO_ARCH_VTABLE_REG
7917 GENERIC_SHARING_FAILURE (*ip);
7920 if (cfg->generic_sharing_context)
7921 context_used = mono_class_check_context_used (klass);
7923 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7925 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7926 * to be called here.
7928 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7929 mono_class_vtable (cfg->domain, klass);
7930 CHECK_TYPELOAD (klass);
7932 mono_domain_lock (cfg->domain);
7933 if (cfg->domain->special_static_fields)
7934 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7935 mono_domain_unlock (cfg->domain);
7937 is_special_static = mono_class_field_is_special_static (field);
7939 /* Generate IR to compute the field address */
7941 if ((cfg->opt & MONO_OPT_SHARED) ||
7942 (cfg->compile_aot && is_special_static) ||
7943 (context_used && is_special_static)) {
7944 MonoInst *iargs [2];
7946 g_assert (field->parent);
7947 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7949 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7950 field, MONO_RGCTX_INFO_CLASS_FIELD);
7952 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7954 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7955 } else if (context_used) {
7956 MonoInst *static_data;
7959 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7960 method->klass->name_space, method->klass->name, method->name,
7961 depth, field->offset);
7964 if (mono_class_needs_cctor_run (klass, method)) {
7968 vtable = emit_get_rgctx_klass (cfg, context_used,
7969 klass, MONO_RGCTX_INFO_VTABLE);
7971 // FIXME: This doesn't work since it tries to pass the argument
7972 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
7974 * The vtable pointer is always passed in a register regardless of
7975 * the calling convention, so assign it manually, and make a call
7976 * using a signature without parameters.
7978 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
7979 #ifdef MONO_ARCH_VTABLE_REG
7980 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
7981 cfg->uses_vtable_reg = TRUE;
7988 * The pointer we're computing here is
7990 * super_info.static_data + field->offset
7992 static_data = emit_get_rgctx_klass (cfg, context_used,
7993 klass, MONO_RGCTX_INFO_STATIC_DATA);
7995 if (field->offset == 0) {
7998 int addr_reg = mono_alloc_preg (cfg);
7999 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8001 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8002 MonoInst *iargs [2];
8004 g_assert (field->parent);
8005 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8006 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8007 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8009 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8011 CHECK_TYPELOAD (klass);
8013 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8014 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8015 if (cfg->verbose_level > 2)
8016 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8017 class_inits = g_slist_prepend (class_inits, vtable);
8019 if (cfg->run_cctors) {
8021 /* This makes so that inline cannot trigger */
8022 /* .cctors: too many apps depend on them */
8023 /* running with a specific order... */
8024 if (! vtable->initialized)
8026 ex = mono_runtime_class_init_full (vtable, FALSE);
8028 set_exception_object (cfg, ex);
8029 goto exception_exit;
8033 addr = (char*)vtable->data + field->offset;
8035 if (cfg->compile_aot)
8036 EMIT_NEW_SFLDACONST (cfg, ins, field);
8038 EMIT_NEW_PCONST (cfg, ins, addr);
8041 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8042 * This could be later optimized to do just a couple of
8043 * memory dereferences with constant offsets.
8045 MonoInst *iargs [1];
8046 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8047 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8051 /* Generate IR to do the actual load/store operation */
8053 if (*ip == CEE_LDSFLDA) {
8054 ins->klass = mono_class_from_mono_type (field->type);
8055 ins->type = STACK_PTR;
8057 } else if (*ip == CEE_STSFLD) {
8062 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8063 store->flags |= ins_flag;
8065 gboolean is_const = FALSE;
8066 MonoVTable *vtable = NULL;
8068 if (!context_used) {
8069 vtable = mono_class_vtable (cfg->domain, klass);
8070 CHECK_TYPELOAD (klass);
8072 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8073 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8074 gpointer addr = (char*)vtable->data + field->offset;
8075 int ro_type = field->type->type;
8076 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8077 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8079 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8082 case MONO_TYPE_BOOLEAN:
8084 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8088 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8091 case MONO_TYPE_CHAR:
8093 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8097 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8102 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8106 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8109 #ifndef HAVE_MOVING_COLLECTOR
8112 case MONO_TYPE_STRING:
8113 case MONO_TYPE_OBJECT:
8114 case MONO_TYPE_CLASS:
8115 case MONO_TYPE_SZARRAY:
8117 case MONO_TYPE_FNPTR:
8118 case MONO_TYPE_ARRAY:
8119 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8120 type_to_eval_stack_type ((cfg), field->type, *sp);
8126 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8131 case MONO_TYPE_VALUETYPE:
8141 CHECK_STACK_OVF (1);
8143 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8144 load->flags |= ins_flag;
8157 token = read32 (ip + 1);
8158 klass = mini_get_class (method, token, generic_context);
8159 CHECK_TYPELOAD (klass);
8160 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8161 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8172 const char *data_ptr;
8174 guint32 field_token;
8180 token = read32 (ip + 1);
8182 klass = mini_get_class (method, token, generic_context);
8183 CHECK_TYPELOAD (klass);
8185 if (cfg->generic_sharing_context)
8186 context_used = mono_class_check_context_used (klass);
8188 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8189 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8190 ins->sreg1 = sp [0]->dreg;
8191 ins->type = STACK_I4;
8192 ins->dreg = alloc_ireg (cfg);
8193 MONO_ADD_INS (cfg->cbb, ins);
8194 *sp = mono_decompose_opcode (cfg, ins);
8199 MonoClass *array_class = mono_array_class_get (klass, 1);
8200 /* FIXME: we cannot get a managed
8201 allocator because we can't get the
8202 open generic class's vtable. We
8203 have the same problem in
8204 handle_alloc_from_inst(). This
8205 needs to be solved so that we can
8206 have managed allocs of shared
8209 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8210 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8212 MonoMethod *managed_alloc = NULL;
8214 /* FIXME: Decompose later to help abcrem */
8217 args [0] = emit_get_rgctx_klass (cfg, context_used,
8218 array_class, MONO_RGCTX_INFO_VTABLE);
8223 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8225 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8227 if (cfg->opt & MONO_OPT_SHARED) {
8228 /* Decompose now to avoid problems with references to the domainvar */
8229 MonoInst *iargs [3];
8231 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8232 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8235 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8237 /* Decompose later since it is needed by abcrem */
8238 MonoClass *array_type = mono_array_class_get (klass, 1);
8239 mono_class_vtable (cfg->domain, array_type);
8240 CHECK_TYPELOAD (array_type);
8242 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8243 ins->dreg = alloc_preg (cfg);
8244 ins->sreg1 = sp [0]->dreg;
8245 ins->inst_newa_class = klass;
8246 ins->type = STACK_OBJ;
8248 MONO_ADD_INS (cfg->cbb, ins);
8249 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8250 cfg->cbb->has_array_access = TRUE;
8252 /* Needed so mono_emit_load_get_addr () gets called */
8253 mono_get_got_var (cfg);
8263 * we inline/optimize the initialization sequence if possible.
8264 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8265 * for small sizes open code the memcpy
8266 * ensure the rva field is big enough
8268 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8269 MonoMethod *memcpy_method = get_memcpy_method ();
8270 MonoInst *iargs [3];
8271 int add_reg = alloc_preg (cfg);
8273 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8274 if (cfg->compile_aot) {
8275 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8277 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8279 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8280 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8289 if (sp [0]->type != STACK_OBJ)
8292 dreg = alloc_preg (cfg);
8293 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8294 ins->dreg = alloc_preg (cfg);
8295 ins->sreg1 = sp [0]->dreg;
8296 ins->type = STACK_I4;
8297 MONO_ADD_INS (cfg->cbb, ins);
8298 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8299 cfg->cbb->has_array_access = TRUE;
8307 if (sp [0]->type != STACK_OBJ)
8310 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8312 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8313 CHECK_TYPELOAD (klass);
8314 /* we need to make sure that this array is exactly the type it needs
8315 * to be for correctness. the wrappers are lax with their usage
8316 * so we need to ignore them here
8318 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8319 MonoClass *array_class = mono_array_class_get (klass, 1);
8320 mini_emit_check_array_type (cfg, sp [0], array_class);
8321 CHECK_TYPELOAD (array_class);
8325 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8340 case CEE_LDELEM_REF: {
8346 if (*ip == CEE_LDELEM) {
8348 token = read32 (ip + 1);
8349 klass = mini_get_class (method, token, generic_context);
8350 CHECK_TYPELOAD (klass);
8351 mono_class_init (klass);
8354 klass = array_access_to_klass (*ip);
8356 if (sp [0]->type != STACK_OBJ)
8359 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8361 if (sp [1]->opcode == OP_ICONST) {
8362 int array_reg = sp [0]->dreg;
8363 int index_reg = sp [1]->dreg;
8364 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8366 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8367 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8369 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8370 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8373 if (*ip == CEE_LDELEM)
8386 case CEE_STELEM_REF:
8393 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8395 if (*ip == CEE_STELEM) {
8397 token = read32 (ip + 1);
8398 klass = mini_get_class (method, token, generic_context);
8399 CHECK_TYPELOAD (klass);
8400 mono_class_init (klass);
8403 klass = array_access_to_klass (*ip);
8405 if (sp [0]->type != STACK_OBJ)
8408 /* storing a NULL doesn't need any of the complex checks in stelemref */
8409 if (generic_class_is_reference_type (cfg, klass) &&
8410 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8411 MonoMethod* helper = mono_marshal_get_stelemref ();
8412 MonoInst *iargs [3];
8414 if (sp [0]->type != STACK_OBJ)
8416 if (sp [2]->type != STACK_OBJ)
8423 mono_emit_method_call (cfg, helper, iargs, NULL);
8425 if (sp [1]->opcode == OP_ICONST) {
8426 int array_reg = sp [0]->dreg;
8427 int index_reg = sp [1]->dreg;
8428 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8430 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8431 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8433 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8434 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8438 if (*ip == CEE_STELEM)
8445 case CEE_CKFINITE: {
8449 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8450 ins->sreg1 = sp [0]->dreg;
8451 ins->dreg = alloc_freg (cfg);
8452 ins->type = STACK_R8;
8453 MONO_ADD_INS (bblock, ins);
8455 *sp++ = mono_decompose_opcode (cfg, ins);
8460 case CEE_REFANYVAL: {
8461 MonoInst *src_var, *src;
8463 int klass_reg = alloc_preg (cfg);
8464 int dreg = alloc_preg (cfg);
8467 MONO_INST_NEW (cfg, ins, *ip);
8470 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8471 CHECK_TYPELOAD (klass);
8472 mono_class_init (klass);
8474 if (cfg->generic_sharing_context)
8475 context_used = mono_class_check_context_used (klass);
8478 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8480 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8481 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8482 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8485 MonoInst *klass_ins;
8487 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8488 klass, MONO_RGCTX_INFO_KLASS);
8491 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8492 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8494 mini_emit_class_check (cfg, klass_reg, klass);
8496 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8497 ins->type = STACK_MP;
8502 case CEE_MKREFANY: {
8503 MonoInst *loc, *addr;
8506 MONO_INST_NEW (cfg, ins, *ip);
8509 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8510 CHECK_TYPELOAD (klass);
8511 mono_class_init (klass);
8513 if (cfg->generic_sharing_context)
8514 context_used = mono_class_check_context_used (klass);
8516 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8517 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8520 MonoInst *const_ins;
8521 int type_reg = alloc_preg (cfg);
8523 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8524 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8525 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8526 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8527 } else if (cfg->compile_aot) {
8528 int const_reg = alloc_preg (cfg);
8529 int type_reg = alloc_preg (cfg);
8531 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8532 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8536 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8537 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8539 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8541 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8542 ins->type = STACK_VTYPE;
8543 ins->klass = mono_defaults.typed_reference_class;
8550 MonoClass *handle_class;
8552 CHECK_STACK_OVF (1);
8555 n = read32 (ip + 1);
8557 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8558 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8559 handle = mono_method_get_wrapper_data (method, n);
8560 handle_class = mono_method_get_wrapper_data (method, n + 1);
8561 if (handle_class == mono_defaults.typehandle_class)
8562 handle = &((MonoClass*)handle)->byval_arg;
8565 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8569 mono_class_init (handle_class);
8570 if (cfg->generic_sharing_context) {
8571 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8572 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8573 /* This case handles ldtoken
8574 of an open type, like for
8577 } else if (handle_class == mono_defaults.typehandle_class) {
8578 /* If we get a MONO_TYPE_CLASS
8579 then we need to provide the
8581 instantiation of it. */
8582 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8585 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8586 } else if (handle_class == mono_defaults.fieldhandle_class)
8587 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8588 else if (handle_class == mono_defaults.methodhandle_class)
8589 context_used = mono_method_check_context_used (handle);
8591 g_assert_not_reached ();
8594 if ((cfg->opt & MONO_OPT_SHARED) &&
8595 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8596 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8597 MonoInst *addr, *vtvar, *iargs [3];
8598 int method_context_used;
8600 if (cfg->generic_sharing_context)
8601 method_context_used = mono_method_check_context_used (method);
8603 method_context_used = 0;
8605 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8607 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8608 EMIT_NEW_ICONST (cfg, iargs [1], n);
8609 if (method_context_used) {
8610 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8611 method, MONO_RGCTX_INFO_METHOD);
8612 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8614 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8615 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8617 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8619 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8621 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8623 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8624 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8625 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8626 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8627 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8628 MonoClass *tclass = mono_class_from_mono_type (handle);
8630 mono_class_init (tclass);
8632 ins = emit_get_rgctx_klass (cfg, context_used,
8633 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8634 } else if (cfg->compile_aot) {
8635 if (method->wrapper_type) {
8636 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8637 /* Special case for static synchronized wrappers */
8638 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8640 /* FIXME: n is not a normal token */
8641 cfg->disable_aot = TRUE;
8642 EMIT_NEW_PCONST (cfg, ins, NULL);
8645 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8648 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8650 ins->type = STACK_OBJ;
8651 ins->klass = cmethod->klass;
8654 MonoInst *addr, *vtvar;
8656 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8659 if (handle_class == mono_defaults.typehandle_class) {
8660 ins = emit_get_rgctx_klass (cfg, context_used,
8661 mono_class_from_mono_type (handle),
8662 MONO_RGCTX_INFO_TYPE);
8663 } else if (handle_class == mono_defaults.methodhandle_class) {
8664 ins = emit_get_rgctx_method (cfg, context_used,
8665 handle, MONO_RGCTX_INFO_METHOD);
8666 } else if (handle_class == mono_defaults.fieldhandle_class) {
8667 ins = emit_get_rgctx_field (cfg, context_used,
8668 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8670 g_assert_not_reached ();
8672 } else if (cfg->compile_aot) {
8673 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8675 EMIT_NEW_PCONST (cfg, ins, handle);
8677 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8679 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8689 MONO_INST_NEW (cfg, ins, OP_THROW);
8691 ins->sreg1 = sp [0]->dreg;
8693 bblock->out_of_line = TRUE;
8694 MONO_ADD_INS (bblock, ins);
8695 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8696 MONO_ADD_INS (bblock, ins);
8699 link_bblock (cfg, bblock, end_bblock);
8700 start_new_bblock = 1;
8702 case CEE_ENDFINALLY:
8703 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8704 MONO_ADD_INS (bblock, ins);
8706 start_new_bblock = 1;
8709 * Control will leave the method so empty the stack, otherwise
8710 * the next basic block will start with a nonempty stack.
8712 while (sp != stack_start) {
8720 if (*ip == CEE_LEAVE) {
8722 target = ip + 5 + (gint32)read32(ip + 1);
8725 target = ip + 2 + (signed char)(ip [1]);
8728 /* empty the stack */
8729 while (sp != stack_start) {
8734 * If this leave statement is in a catch block, check for a
8735 * pending exception, and rethrow it if necessary.
8736 * We avoid doing this in runtime invoke wrappers, since those are called
8737 * by native code which excepts the wrapper to catch all exceptions.
8739 for (i = 0; i < header->num_clauses; ++i) {
8740 MonoExceptionClause *clause = &header->clauses [i];
8743 * Use <= in the final comparison to handle clauses with multiple
8744 * leave statements, like in bug #78024.
8745 * The ordering of the exception clauses guarantees that we find the
8748 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8750 MonoBasicBlock *dont_throw;
8755 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8758 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8760 NEW_BBLOCK (cfg, dont_throw);
8763 * Currently, we allways rethrow the abort exception, despite the
8764 * fact that this is not correct. See thread6.cs for an example.
8765 * But propagating the abort exception is more important than
8766 * getting the sematics right.
8768 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8770 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8772 MONO_START_BB (cfg, dont_throw);
8777 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8779 for (tmp = handlers; tmp; tmp = tmp->next) {
8781 link_bblock (cfg, bblock, tblock);
8782 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8783 ins->inst_target_bb = tblock;
8784 MONO_ADD_INS (bblock, ins);
8785 bblock->has_call_handler = 1;
8786 if (COMPILE_LLVM (cfg)) {
8787 MonoBasicBlock *target_bb;
8790 * Link the finally bblock with the target, since it will
8791 * conceptually branch there.
8792 * FIXME: Have to link the bblock containing the endfinally.
8794 GET_BBLOCK (cfg, target_bb, target);
8795 link_bblock (cfg, tblock, target_bb);
8798 g_list_free (handlers);
8801 MONO_INST_NEW (cfg, ins, OP_BR);
8802 MONO_ADD_INS (bblock, ins);
8803 GET_BBLOCK (cfg, tblock, target);
8804 link_bblock (cfg, bblock, tblock);
8805 ins->inst_target_bb = tblock;
8806 start_new_bblock = 1;
8808 if (*ip == CEE_LEAVE)
8817 * Mono specific opcodes
8819 case MONO_CUSTOM_PREFIX: {
8821 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8825 case CEE_MONO_ICALL: {
8827 MonoJitICallInfo *info;
8829 token = read32 (ip + 2);
8830 func = mono_method_get_wrapper_data (method, token);
8831 info = mono_find_jit_icall_by_addr (func);
8834 CHECK_STACK (info->sig->param_count);
8835 sp -= info->sig->param_count;
8837 ins = mono_emit_jit_icall (cfg, info->func, sp);
8838 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8842 inline_costs += 10 * num_calls++;
8846 case CEE_MONO_LDPTR: {
8849 CHECK_STACK_OVF (1);
8851 token = read32 (ip + 2);
8853 ptr = mono_method_get_wrapper_data (method, token);
8854 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8855 MonoJitICallInfo *callinfo;
8856 const char *icall_name;
8858 icall_name = method->name + strlen ("__icall_wrapper_");
8859 g_assert (icall_name);
8860 callinfo = mono_find_jit_icall_by_name (icall_name);
8861 g_assert (callinfo);
8863 if (ptr == callinfo->func) {
8864 /* Will be transformed into an AOTCONST later */
8865 EMIT_NEW_PCONST (cfg, ins, ptr);
8871 /* FIXME: Generalize this */
8872 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8873 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8878 EMIT_NEW_PCONST (cfg, ins, ptr);
8881 inline_costs += 10 * num_calls++;
8882 /* Can't embed random pointers into AOT code */
8883 cfg->disable_aot = 1;
8886 case CEE_MONO_ICALL_ADDR: {
8887 MonoMethod *cmethod;
8890 CHECK_STACK_OVF (1);
8892 token = read32 (ip + 2);
8894 cmethod = mono_method_get_wrapper_data (method, token);
8896 if (cfg->compile_aot) {
8897 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8899 ptr = mono_lookup_internal_call (cmethod);
8901 EMIT_NEW_PCONST (cfg, ins, ptr);
8907 case CEE_MONO_VTADDR: {
8908 MonoInst *src_var, *src;
8914 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8915 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8920 case CEE_MONO_NEWOBJ: {
8921 MonoInst *iargs [2];
8923 CHECK_STACK_OVF (1);
8925 token = read32 (ip + 2);
8926 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8927 mono_class_init (klass);
8928 NEW_DOMAINCONST (cfg, iargs [0]);
8929 MONO_ADD_INS (cfg->cbb, iargs [0]);
8930 NEW_CLASSCONST (cfg, iargs [1], klass);
8931 MONO_ADD_INS (cfg->cbb, iargs [1]);
8932 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8934 inline_costs += 10 * num_calls++;
8937 case CEE_MONO_OBJADDR:
8940 MONO_INST_NEW (cfg, ins, OP_MOVE);
8941 ins->dreg = alloc_preg (cfg);
8942 ins->sreg1 = sp [0]->dreg;
8943 ins->type = STACK_MP;
8944 MONO_ADD_INS (cfg->cbb, ins);
8948 case CEE_MONO_LDNATIVEOBJ:
8950 * Similar to LDOBJ, but instead load the unmanaged
8951 * representation of the vtype to the stack.
8956 token = read32 (ip + 2);
8957 klass = mono_method_get_wrapper_data (method, token);
8958 g_assert (klass->valuetype);
8959 mono_class_init (klass);
8962 MonoInst *src, *dest, *temp;
8965 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8966 temp->backend.is_pinvoke = 1;
8967 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8968 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8970 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8971 dest->type = STACK_VTYPE;
8972 dest->klass = klass;
8978 case CEE_MONO_RETOBJ: {
8980 * Same as RET, but return the native representation of a vtype
8983 g_assert (cfg->ret);
8984 g_assert (mono_method_signature (method)->pinvoke);
8989 token = read32 (ip + 2);
8990 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8992 if (!cfg->vret_addr) {
8993 g_assert (cfg->ret_var_is_local);
8995 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8997 EMIT_NEW_RETLOADA (cfg, ins);
8999 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9001 if (sp != stack_start)
9004 MONO_INST_NEW (cfg, ins, OP_BR);
9005 ins->inst_target_bb = end_bblock;
9006 MONO_ADD_INS (bblock, ins);
9007 link_bblock (cfg, bblock, end_bblock);
9008 start_new_bblock = 1;
9012 case CEE_MONO_CISINST:
9013 case CEE_MONO_CCASTCLASS: {
9018 token = read32 (ip + 2);
9019 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9020 if (ip [1] == CEE_MONO_CISINST)
9021 ins = handle_cisinst (cfg, klass, sp [0]);
9023 ins = handle_ccastclass (cfg, klass, sp [0]);
9029 case CEE_MONO_SAVE_LMF:
9030 case CEE_MONO_RESTORE_LMF:
9031 #ifdef MONO_ARCH_HAVE_LMF_OPS
9032 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9033 MONO_ADD_INS (bblock, ins);
9034 cfg->need_lmf_area = TRUE;
9038 case CEE_MONO_CLASSCONST:
9039 CHECK_STACK_OVF (1);
9041 token = read32 (ip + 2);
9042 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9045 inline_costs += 10 * num_calls++;
9047 case CEE_MONO_NOT_TAKEN:
9048 bblock->out_of_line = TRUE;
9052 CHECK_STACK_OVF (1);
9054 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9055 ins->dreg = alloc_preg (cfg);
9056 ins->inst_offset = (gint32)read32 (ip + 2);
9057 ins->type = STACK_PTR;
9058 MONO_ADD_INS (bblock, ins);
9062 case CEE_MONO_DYN_CALL: {
9065 /* It would be easier to call a trampoline, but that would put an
9066 * extra frame on the stack, confusing exception handling. So
9067 * implement it inline using an opcode for now.
9070 if (!cfg->dyn_call_var) {
9071 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9072 /* prevent it from being register allocated */
9073 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9076 /* Has to use a call inst since it local regalloc expects it */
9077 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9078 ins = (MonoInst*)call;
9080 ins->sreg1 = sp [0]->dreg;
9081 ins->sreg2 = sp [1]->dreg;
9082 MONO_ADD_INS (bblock, ins);
9084 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9085 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9089 inline_costs += 10 * num_calls++;
9094 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9104 /* somewhat similar to LDTOKEN */
9105 MonoInst *addr, *vtvar;
9106 CHECK_STACK_OVF (1);
9107 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9109 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9110 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9112 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9113 ins->type = STACK_VTYPE;
9114 ins->klass = mono_defaults.argumenthandle_class;
9127 * The following transforms:
9128 * CEE_CEQ into OP_CEQ
9129 * CEE_CGT into OP_CGT
9130 * CEE_CGT_UN into OP_CGT_UN
9131 * CEE_CLT into OP_CLT
9132 * CEE_CLT_UN into OP_CLT_UN
9134 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9136 MONO_INST_NEW (cfg, ins, cmp->opcode);
9138 cmp->sreg1 = sp [0]->dreg;
9139 cmp->sreg2 = sp [1]->dreg;
9140 type_from_op (cmp, sp [0], sp [1]);
9142 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9143 cmp->opcode = OP_LCOMPARE;
9144 else if (sp [0]->type == STACK_R8)
9145 cmp->opcode = OP_FCOMPARE;
9147 cmp->opcode = OP_ICOMPARE;
9148 MONO_ADD_INS (bblock, cmp);
9149 ins->type = STACK_I4;
9150 ins->dreg = alloc_dreg (cfg, ins->type);
9151 type_from_op (ins, sp [0], sp [1]);
9153 if (cmp->opcode == OP_FCOMPARE) {
9155 * The backends expect the fceq opcodes to do the
9158 cmp->opcode = OP_NOP;
9159 ins->sreg1 = cmp->sreg1;
9160 ins->sreg2 = cmp->sreg2;
9162 MONO_ADD_INS (bblock, ins);
9169 MonoMethod *cil_method;
9170 gboolean needs_static_rgctx_invoke;
9172 CHECK_STACK_OVF (1);
9174 n = read32 (ip + 2);
9175 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9178 mono_class_init (cmethod->klass);
9180 mono_save_token_info (cfg, image, n, cmethod);
9182 if (cfg->generic_sharing_context)
9183 context_used = mono_method_check_context_used (cmethod);
9185 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9187 cil_method = cmethod;
9188 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9189 METHOD_ACCESS_FAILURE;
9191 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9192 if (check_linkdemand (cfg, method, cmethod))
9194 CHECK_CFG_EXCEPTION;
9195 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9196 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9200 * Optimize the common case of ldftn+delegate creation
9202 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9203 /* FIXME: SGEN support */
9204 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9205 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9206 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9207 MonoInst *target_ins;
9209 int invoke_context_used = 0;
9211 invoke = mono_get_delegate_invoke (ctor_method->klass);
9212 if (!invoke || !mono_method_signature (invoke))
9215 if (cfg->generic_sharing_context)
9216 invoke_context_used = mono_method_check_context_used (invoke);
9218 if (invoke_context_used == 0) {
9220 if (cfg->verbose_level > 3)
9221 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9222 target_ins = sp [-1];
9224 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9225 CHECK_CFG_EXCEPTION;
9234 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9235 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9239 inline_costs += 10 * num_calls++;
9242 case CEE_LDVIRTFTN: {
9247 n = read32 (ip + 2);
9248 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9251 mono_class_init (cmethod->klass);
9253 if (cfg->generic_sharing_context)
9254 context_used = mono_method_check_context_used (cmethod);
9256 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9257 if (check_linkdemand (cfg, method, cmethod))
9259 CHECK_CFG_EXCEPTION;
9260 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9261 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9267 args [1] = emit_get_rgctx_method (cfg, context_used,
9268 cmethod, MONO_RGCTX_INFO_METHOD);
9271 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9273 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9276 inline_costs += 10 * num_calls++;
9280 CHECK_STACK_OVF (1);
9282 n = read16 (ip + 2);
9284 EMIT_NEW_ARGLOAD (cfg, ins, n);
9289 CHECK_STACK_OVF (1);
9291 n = read16 (ip + 2);
9293 NEW_ARGLOADA (cfg, ins, n);
9294 MONO_ADD_INS (cfg->cbb, ins);
9302 n = read16 (ip + 2);
9304 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9306 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9310 CHECK_STACK_OVF (1);
9312 n = read16 (ip + 2);
9314 EMIT_NEW_LOCLOAD (cfg, ins, n);
9319 unsigned char *tmp_ip;
9320 CHECK_STACK_OVF (1);
9322 n = read16 (ip + 2);
9325 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9331 EMIT_NEW_LOCLOADA (cfg, ins, n);
9340 n = read16 (ip + 2);
9342 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9344 emit_stloc_ir (cfg, sp, header, n);
9351 if (sp != stack_start)
9353 if (cfg->method != method)
9355 * Inlining this into a loop in a parent could lead to
9356 * stack overflows which is different behavior than the
9357 * non-inlined case, thus disable inlining in this case.
9359 goto inline_failure;
9361 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9362 ins->dreg = alloc_preg (cfg);
9363 ins->sreg1 = sp [0]->dreg;
9364 ins->type = STACK_PTR;
9365 MONO_ADD_INS (cfg->cbb, ins);
9367 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9369 ins->flags |= MONO_INST_INIT;
9374 case CEE_ENDFILTER: {
9375 MonoExceptionClause *clause, *nearest;
9376 int cc, nearest_num;
9380 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9382 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9383 ins->sreg1 = (*sp)->dreg;
9384 MONO_ADD_INS (bblock, ins);
9385 start_new_bblock = 1;
9390 for (cc = 0; cc < header->num_clauses; ++cc) {
9391 clause = &header->clauses [cc];
9392 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9393 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9394 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9400 if ((ip - header->code) != nearest->handler_offset)
9405 case CEE_UNALIGNED_:
9406 ins_flag |= MONO_INST_UNALIGNED;
9407 /* FIXME: record alignment? we can assume 1 for now */
9412 ins_flag |= MONO_INST_VOLATILE;
9416 ins_flag |= MONO_INST_TAILCALL;
9417 cfg->flags |= MONO_CFG_HAS_TAIL;
9418 /* Can't inline tail calls at this time */
9419 inline_costs += 100000;
9426 token = read32 (ip + 2);
9427 klass = mini_get_class (method, token, generic_context);
9428 CHECK_TYPELOAD (klass);
9429 if (generic_class_is_reference_type (cfg, klass))
9430 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9432 mini_emit_initobj (cfg, *sp, NULL, klass);
9436 case CEE_CONSTRAINED_:
9438 token = read32 (ip + 2);
9439 if (method->wrapper_type != MONO_WRAPPER_NONE)
9440 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9442 constrained_call = mono_class_get_full (image, token, generic_context);
9443 CHECK_TYPELOAD (constrained_call);
9448 MonoInst *iargs [3];
9452 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9453 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9454 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9455 /* emit_memset only works when val == 0 */
9456 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9461 if (ip [1] == CEE_CPBLK) {
9462 MonoMethod *memcpy_method = get_memcpy_method ();
9463 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9465 MonoMethod *memset_method = get_memset_method ();
9466 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9476 ins_flag |= MONO_INST_NOTYPECHECK;
9478 ins_flag |= MONO_INST_NORANGECHECK;
9479 /* we ignore the no-nullcheck for now since we
9480 * really do it explicitly only when doing callvirt->call
9486 int handler_offset = -1;
9488 for (i = 0; i < header->num_clauses; ++i) {
9489 MonoExceptionClause *clause = &header->clauses [i];
9490 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9491 handler_offset = clause->handler_offset;
9496 bblock->flags |= BB_EXCEPTION_UNSAFE;
9498 g_assert (handler_offset != -1);
9500 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9501 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9502 ins->sreg1 = load->dreg;
9503 MONO_ADD_INS (bblock, ins);
9505 link_bblock (cfg, bblock, end_bblock);
9506 start_new_bblock = 1;
9514 CHECK_STACK_OVF (1);
9516 token = read32 (ip + 2);
9517 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9518 MonoType *type = mono_type_create_from_typespec (image, token);
9519 token = mono_type_size (type, &ialign);
9521 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9522 CHECK_TYPELOAD (klass);
9523 mono_class_init (klass);
9524 token = mono_class_value_size (klass, &align);
9526 EMIT_NEW_ICONST (cfg, ins, token);
9531 case CEE_REFANYTYPE: {
9532 MonoInst *src_var, *src;
9538 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9540 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9541 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9542 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9560 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9570 g_warning ("opcode 0x%02x not handled", *ip);
9574 if (start_new_bblock != 1)
9577 bblock->cil_length = ip - bblock->cil_code;
9578 bblock->next_bb = end_bblock;
9580 if (cfg->method == method && cfg->domainvar) {
9582 MonoInst *get_domain;
9584 cfg->cbb = init_localsbb;
9586 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9587 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9590 get_domain->dreg = alloc_preg (cfg);
9591 MONO_ADD_INS (cfg->cbb, get_domain);
9593 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9594 MONO_ADD_INS (cfg->cbb, store);
9597 #ifdef TARGET_POWERPC
9598 if (cfg->compile_aot)
9599 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9600 mono_get_got_var (cfg);
9603 if (cfg->method == method && cfg->got_var)
9604 mono_emit_load_got_addr (cfg);
9609 cfg->cbb = init_localsbb;
9611 for (i = 0; i < header->num_locals; ++i) {
9612 MonoType *ptype = header->locals [i];
9613 int t = ptype->type;
9614 dreg = cfg->locals [i]->dreg;
9616 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9617 t = mono_class_enum_basetype (ptype->data.klass)->type;
9619 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9620 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9621 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9622 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9623 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9624 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9625 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9626 ins->type = STACK_R8;
9627 ins->inst_p0 = (void*)&r8_0;
9628 ins->dreg = alloc_dreg (cfg, STACK_R8);
9629 MONO_ADD_INS (init_localsbb, ins);
9630 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9631 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9632 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9633 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9635 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9640 if (cfg->init_ref_vars && cfg->method == method) {
9641 /* Emit initialization for ref vars */
9642 // FIXME: Avoid duplication initialization for IL locals.
9643 for (i = 0; i < cfg->num_varinfo; ++i) {
9644 MonoInst *ins = cfg->varinfo [i];
9646 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9647 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9651 /* Add a sequence point for method entry/exit events */
9653 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9654 MONO_ADD_INS (init_localsbb, ins);
9655 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9656 MONO_ADD_INS (cfg->bb_exit, ins);
9661 if (cfg->method == method) {
9663 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9664 bb->region = mono_find_block_region (cfg, bb->real_offset);
9666 mono_create_spvar_for_region (cfg, bb->region);
9667 if (cfg->verbose_level > 2)
9668 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9672 g_slist_free (class_inits);
9673 dont_inline = g_list_remove (dont_inline, method);
9675 if (inline_costs < 0) {
9678 /* Method is too large */
9679 mname = mono_method_full_name (method, TRUE);
9680 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9681 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9686 if ((cfg->verbose_level > 2) && (cfg->method == method))
9687 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9689 return inline_costs;
9692 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9693 g_slist_free (class_inits);
9694 dont_inline = g_list_remove (dont_inline, method);
9698 g_slist_free (class_inits);
9699 dont_inline = g_list_remove (dont_inline, method);
9703 g_slist_free (class_inits);
9704 dont_inline = g_list_remove (dont_inline, method);
9705 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9709 g_slist_free (class_inits);
9710 dont_inline = g_list_remove (dont_inline, method);
9711 set_exception_type_from_invalid_il (cfg, method, ip);
9716 store_membase_reg_to_store_membase_imm (int opcode)
9719 case OP_STORE_MEMBASE_REG:
9720 return OP_STORE_MEMBASE_IMM;
9721 case OP_STOREI1_MEMBASE_REG:
9722 return OP_STOREI1_MEMBASE_IMM;
9723 case OP_STOREI2_MEMBASE_REG:
9724 return OP_STOREI2_MEMBASE_IMM;
9725 case OP_STOREI4_MEMBASE_REG:
9726 return OP_STOREI4_MEMBASE_IMM;
9727 case OP_STOREI8_MEMBASE_REG:
9728 return OP_STOREI8_MEMBASE_IMM;
9730 g_assert_not_reached ();
9736 #endif /* DISABLE_JIT */
9739 mono_op_to_op_imm (int opcode)
9749 return OP_IDIV_UN_IMM;
9753 return OP_IREM_UN_IMM;
9767 return OP_ISHR_UN_IMM;
9784 return OP_LSHR_UN_IMM;
9787 return OP_COMPARE_IMM;
9789 return OP_ICOMPARE_IMM;
9791 return OP_LCOMPARE_IMM;
9793 case OP_STORE_MEMBASE_REG:
9794 return OP_STORE_MEMBASE_IMM;
9795 case OP_STOREI1_MEMBASE_REG:
9796 return OP_STOREI1_MEMBASE_IMM;
9797 case OP_STOREI2_MEMBASE_REG:
9798 return OP_STOREI2_MEMBASE_IMM;
9799 case OP_STOREI4_MEMBASE_REG:
9800 return OP_STOREI4_MEMBASE_IMM;
9802 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9804 return OP_X86_PUSH_IMM;
9805 case OP_X86_COMPARE_MEMBASE_REG:
9806 return OP_X86_COMPARE_MEMBASE_IMM;
9808 #if defined(TARGET_AMD64)
9809 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9810 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9812 case OP_VOIDCALL_REG:
9821 return OP_LOCALLOC_IMM;
9828 ldind_to_load_membase (int opcode)
9832 return OP_LOADI1_MEMBASE;
9834 return OP_LOADU1_MEMBASE;
9836 return OP_LOADI2_MEMBASE;
9838 return OP_LOADU2_MEMBASE;
9840 return OP_LOADI4_MEMBASE;
9842 return OP_LOADU4_MEMBASE;
9844 return OP_LOAD_MEMBASE;
9846 return OP_LOAD_MEMBASE;
9848 return OP_LOADI8_MEMBASE;
9850 return OP_LOADR4_MEMBASE;
9852 return OP_LOADR8_MEMBASE;
9854 g_assert_not_reached ();
9861 stind_to_store_membase (int opcode)
9865 return OP_STOREI1_MEMBASE_REG;
9867 return OP_STOREI2_MEMBASE_REG;
9869 return OP_STOREI4_MEMBASE_REG;
9872 return OP_STORE_MEMBASE_REG;
9874 return OP_STOREI8_MEMBASE_REG;
9876 return OP_STORER4_MEMBASE_REG;
9878 return OP_STORER8_MEMBASE_REG;
9880 g_assert_not_reached ();
9887 mono_load_membase_to_load_mem (int opcode)
9889 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9890 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9892 case OP_LOAD_MEMBASE:
9894 case OP_LOADU1_MEMBASE:
9895 return OP_LOADU1_MEM;
9896 case OP_LOADU2_MEMBASE:
9897 return OP_LOADU2_MEM;
9898 case OP_LOADI4_MEMBASE:
9899 return OP_LOADI4_MEM;
9900 case OP_LOADU4_MEMBASE:
9901 return OP_LOADU4_MEM;
9902 #if SIZEOF_REGISTER == 8
9903 case OP_LOADI8_MEMBASE:
9904 return OP_LOADI8_MEM;
9913 op_to_op_dest_membase (int store_opcode, int opcode)
9915 #if defined(TARGET_X86)
9916 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9921 return OP_X86_ADD_MEMBASE_REG;
9923 return OP_X86_SUB_MEMBASE_REG;
9925 return OP_X86_AND_MEMBASE_REG;
9927 return OP_X86_OR_MEMBASE_REG;
9929 return OP_X86_XOR_MEMBASE_REG;
9932 return OP_X86_ADD_MEMBASE_IMM;
9935 return OP_X86_SUB_MEMBASE_IMM;
9938 return OP_X86_AND_MEMBASE_IMM;
9941 return OP_X86_OR_MEMBASE_IMM;
9944 return OP_X86_XOR_MEMBASE_IMM;
9950 #if defined(TARGET_AMD64)
9951 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9956 return OP_X86_ADD_MEMBASE_REG;
9958 return OP_X86_SUB_MEMBASE_REG;
9960 return OP_X86_AND_MEMBASE_REG;
9962 return OP_X86_OR_MEMBASE_REG;
9964 return OP_X86_XOR_MEMBASE_REG;
9966 return OP_X86_ADD_MEMBASE_IMM;
9968 return OP_X86_SUB_MEMBASE_IMM;
9970 return OP_X86_AND_MEMBASE_IMM;
9972 return OP_X86_OR_MEMBASE_IMM;
9974 return OP_X86_XOR_MEMBASE_IMM;
9976 return OP_AMD64_ADD_MEMBASE_REG;
9978 return OP_AMD64_SUB_MEMBASE_REG;
9980 return OP_AMD64_AND_MEMBASE_REG;
9982 return OP_AMD64_OR_MEMBASE_REG;
9984 return OP_AMD64_XOR_MEMBASE_REG;
9987 return OP_AMD64_ADD_MEMBASE_IMM;
9990 return OP_AMD64_SUB_MEMBASE_IMM;
9993 return OP_AMD64_AND_MEMBASE_IMM;
9996 return OP_AMD64_OR_MEMBASE_IMM;
9999 return OP_AMD64_XOR_MEMBASE_IMM;
10009 op_to_op_store_membase (int store_opcode, int opcode)
10011 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10014 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10015 return OP_X86_SETEQ_MEMBASE;
10017 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10018 return OP_X86_SETNE_MEMBASE;
10026 op_to_op_src1_membase (int load_opcode, int opcode)
10029 /* FIXME: This has sign extension issues */
10031 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10032 return OP_X86_COMPARE_MEMBASE8_IMM;
10035 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10040 return OP_X86_PUSH_MEMBASE;
10041 case OP_COMPARE_IMM:
10042 case OP_ICOMPARE_IMM:
10043 return OP_X86_COMPARE_MEMBASE_IMM;
10046 return OP_X86_COMPARE_MEMBASE_REG;
10050 #ifdef TARGET_AMD64
10051 /* FIXME: This has sign extension issues */
10053 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10054 return OP_X86_COMPARE_MEMBASE8_IMM;
10059 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10060 return OP_X86_PUSH_MEMBASE;
10062 /* FIXME: This only works for 32 bit immediates
10063 case OP_COMPARE_IMM:
10064 case OP_LCOMPARE_IMM:
10065 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10066 return OP_AMD64_COMPARE_MEMBASE_IMM;
10068 case OP_ICOMPARE_IMM:
10069 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10070 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10074 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10075 return OP_AMD64_COMPARE_MEMBASE_REG;
10078 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10079 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10088 op_to_op_src2_membase (int load_opcode, int opcode)
10091 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10097 return OP_X86_COMPARE_REG_MEMBASE;
10099 return OP_X86_ADD_REG_MEMBASE;
10101 return OP_X86_SUB_REG_MEMBASE;
10103 return OP_X86_AND_REG_MEMBASE;
10105 return OP_X86_OR_REG_MEMBASE;
10107 return OP_X86_XOR_REG_MEMBASE;
10111 #ifdef TARGET_AMD64
10114 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10115 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10119 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10120 return OP_AMD64_COMPARE_REG_MEMBASE;
10123 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10124 return OP_X86_ADD_REG_MEMBASE;
10126 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10127 return OP_X86_SUB_REG_MEMBASE;
10129 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10130 return OP_X86_AND_REG_MEMBASE;
10132 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10133 return OP_X86_OR_REG_MEMBASE;
10135 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10136 return OP_X86_XOR_REG_MEMBASE;
10138 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10139 return OP_AMD64_ADD_REG_MEMBASE;
10141 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10142 return OP_AMD64_SUB_REG_MEMBASE;
10144 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10145 return OP_AMD64_AND_REG_MEMBASE;
10147 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10148 return OP_AMD64_OR_REG_MEMBASE;
10150 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10151 return OP_AMD64_XOR_REG_MEMBASE;
10159 mono_op_to_op_imm_noemul (int opcode)
10162 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10167 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10175 return mono_op_to_op_imm (opcode);
10179 #ifndef DISABLE_JIT
10182 * mono_handle_global_vregs:
10184 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10188 mono_handle_global_vregs (MonoCompile *cfg)
10190 gint32 *vreg_to_bb;
10191 MonoBasicBlock *bb;
10194 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10196 #ifdef MONO_ARCH_SIMD_INTRINSICS
10197 if (cfg->uses_simd_intrinsics)
10198 mono_simd_simplify_indirection (cfg);
10201 /* Find local vregs used in more than one bb */
10202 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10203 MonoInst *ins = bb->code;
10204 int block_num = bb->block_num;
10206 if (cfg->verbose_level > 2)
10207 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10210 for (; ins; ins = ins->next) {
10211 const char *spec = INS_INFO (ins->opcode);
10212 int regtype = 0, regindex;
10215 if (G_UNLIKELY (cfg->verbose_level > 2))
10216 mono_print_ins (ins);
10218 g_assert (ins->opcode >= MONO_CEE_LAST);
10220 for (regindex = 0; regindex < 4; regindex ++) {
10223 if (regindex == 0) {
10224 regtype = spec [MONO_INST_DEST];
10225 if (regtype == ' ')
10228 } else if (regindex == 1) {
10229 regtype = spec [MONO_INST_SRC1];
10230 if (regtype == ' ')
10233 } else if (regindex == 2) {
10234 regtype = spec [MONO_INST_SRC2];
10235 if (regtype == ' ')
10238 } else if (regindex == 3) {
10239 regtype = spec [MONO_INST_SRC3];
10240 if (regtype == ' ')
10245 #if SIZEOF_REGISTER == 4
10246 /* In the LLVM case, the long opcodes are not decomposed */
10247 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10249 * Since some instructions reference the original long vreg,
10250 * and some reference the two component vregs, it is quite hard
10251 * to determine when it needs to be global. So be conservative.
10253 if (!get_vreg_to_inst (cfg, vreg)) {
10254 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10256 if (cfg->verbose_level > 2)
10257 printf ("LONG VREG R%d made global.\n", vreg);
10261 * Make the component vregs volatile since the optimizations can
10262 * get confused otherwise.
10264 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10265 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10269 g_assert (vreg != -1);
10271 prev_bb = vreg_to_bb [vreg];
10272 if (prev_bb == 0) {
10273 /* 0 is a valid block num */
10274 vreg_to_bb [vreg] = block_num + 1;
10275 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10276 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10279 if (!get_vreg_to_inst (cfg, vreg)) {
10280 if (G_UNLIKELY (cfg->verbose_level > 2))
10281 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10285 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10288 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10291 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10294 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10297 g_assert_not_reached ();
10301 /* Flag as having been used in more than one bb */
10302 vreg_to_bb [vreg] = -1;
10308 /* If a variable is used in only one bblock, convert it into a local vreg */
10309 for (i = 0; i < cfg->num_varinfo; i++) {
10310 MonoInst *var = cfg->varinfo [i];
10311 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10313 switch (var->type) {
10319 #if SIZEOF_REGISTER == 8
10322 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10323 /* Enabling this screws up the fp stack on x86 */
10326 /* Arguments are implicitly global */
10327 /* Putting R4 vars into registers doesn't work currently */
10328 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10330 * Make that the variable's liveness interval doesn't contain a call, since
10331 * that would cause the lvreg to be spilled, making the whole optimization
10334 /* This is too slow for JIT compilation */
10336 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10338 int def_index, call_index, ins_index;
10339 gboolean spilled = FALSE;
10344 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10345 const char *spec = INS_INFO (ins->opcode);
10347 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10348 def_index = ins_index;
10350 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10351 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10352 if (call_index > def_index) {
10358 if (MONO_IS_CALL (ins))
10359 call_index = ins_index;
10369 if (G_UNLIKELY (cfg->verbose_level > 2))
10370 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10371 var->flags |= MONO_INST_IS_DEAD;
10372 cfg->vreg_to_inst [var->dreg] = NULL;
10379 * Compress the varinfo and vars tables so the liveness computation is faster and
10380 * takes up less space.
10383 for (i = 0; i < cfg->num_varinfo; ++i) {
10384 MonoInst *var = cfg->varinfo [i];
10385 if (pos < i && cfg->locals_start == i)
10386 cfg->locals_start = pos;
10387 if (!(var->flags & MONO_INST_IS_DEAD)) {
10389 cfg->varinfo [pos] = cfg->varinfo [i];
10390 cfg->varinfo [pos]->inst_c0 = pos;
10391 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10392 cfg->vars [pos].idx = pos;
10393 #if SIZEOF_REGISTER == 4
10394 if (cfg->varinfo [pos]->type == STACK_I8) {
10395 /* Modify the two component vars too */
10398 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10399 var1->inst_c0 = pos;
10400 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10401 var1->inst_c0 = pos;
10408 cfg->num_varinfo = pos;
10409 if (cfg->locals_start > cfg->num_varinfo)
10410 cfg->locals_start = cfg->num_varinfo;
10414 * mono_spill_global_vars:
10416 * Generate spill code for variables which are not allocated to registers,
10417 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10418 * code is generated which could be optimized by the local optimization passes.
10421 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10423 MonoBasicBlock *bb;
10425 int orig_next_vreg;
10426 guint32 *vreg_to_lvreg;
10428 guint32 i, lvregs_len;
10429 gboolean dest_has_lvreg = FALSE;
10430 guint32 stacktypes [128];
10431 MonoInst **live_range_start, **live_range_end;
10432 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10434 *need_local_opts = FALSE;
10436 memset (spec2, 0, sizeof (spec2));
10438 /* FIXME: Move this function to mini.c */
10439 stacktypes ['i'] = STACK_PTR;
10440 stacktypes ['l'] = STACK_I8;
10441 stacktypes ['f'] = STACK_R8;
10442 #ifdef MONO_ARCH_SIMD_INTRINSICS
10443 stacktypes ['x'] = STACK_VTYPE;
10446 #if SIZEOF_REGISTER == 4
10447 /* Create MonoInsts for longs */
10448 for (i = 0; i < cfg->num_varinfo; i++) {
10449 MonoInst *ins = cfg->varinfo [i];
10451 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10452 switch (ins->type) {
10457 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10460 g_assert (ins->opcode == OP_REGOFFSET);
10462 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10464 tree->opcode = OP_REGOFFSET;
10465 tree->inst_basereg = ins->inst_basereg;
10466 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10468 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10470 tree->opcode = OP_REGOFFSET;
10471 tree->inst_basereg = ins->inst_basereg;
10472 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10482 /* FIXME: widening and truncation */
10485 * As an optimization, when a variable allocated to the stack is first loaded into
10486 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10487 * the variable again.
10489 orig_next_vreg = cfg->next_vreg;
10490 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10491 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10495 * These arrays contain the first and last instructions accessing a given
10497 * Since we emit bblocks in the same order we process them here, and we
10498 * don't split live ranges, these will precisely describe the live range of
10499 * the variable, i.e. the instruction range where a valid value can be found
10500 * in the variables location.
10501 * The live range is computed using the liveness info computed by the liveness pass.
10502 * We can't use vmv->range, since that is an abstract live range, and we need
10503 * one which is instruction precise.
10504 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10506 /* FIXME: Only do this if debugging info is requested */
10507 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10508 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10509 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10510 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10512 /* Add spill loads/stores */
10513 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10516 if (cfg->verbose_level > 2)
10517 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10519 /* Clear vreg_to_lvreg array */
10520 for (i = 0; i < lvregs_len; i++)
10521 vreg_to_lvreg [lvregs [i]] = 0;
10525 MONO_BB_FOR_EACH_INS (bb, ins) {
10526 const char *spec = INS_INFO (ins->opcode);
10527 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10528 gboolean store, no_lvreg;
10529 int sregs [MONO_MAX_SRC_REGS];
10531 if (G_UNLIKELY (cfg->verbose_level > 2))
10532 mono_print_ins (ins);
10534 if (ins->opcode == OP_NOP)
10538 * We handle LDADDR here as well, since it can only be decomposed
10539 * when variable addresses are known.
10541 if (ins->opcode == OP_LDADDR) {
10542 MonoInst *var = ins->inst_p0;
10544 if (var->opcode == OP_VTARG_ADDR) {
10545 /* Happens on SPARC/S390 where vtypes are passed by reference */
10546 MonoInst *vtaddr = var->inst_left;
10547 if (vtaddr->opcode == OP_REGVAR) {
10548 ins->opcode = OP_MOVE;
10549 ins->sreg1 = vtaddr->dreg;
10551 else if (var->inst_left->opcode == OP_REGOFFSET) {
10552 ins->opcode = OP_LOAD_MEMBASE;
10553 ins->inst_basereg = vtaddr->inst_basereg;
10554 ins->inst_offset = vtaddr->inst_offset;
10558 g_assert (var->opcode == OP_REGOFFSET);
10560 ins->opcode = OP_ADD_IMM;
10561 ins->sreg1 = var->inst_basereg;
10562 ins->inst_imm = var->inst_offset;
10565 *need_local_opts = TRUE;
10566 spec = INS_INFO (ins->opcode);
10569 if (ins->opcode < MONO_CEE_LAST) {
10570 mono_print_ins (ins);
10571 g_assert_not_reached ();
10575 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10579 if (MONO_IS_STORE_MEMBASE (ins)) {
10580 tmp_reg = ins->dreg;
10581 ins->dreg = ins->sreg2;
10582 ins->sreg2 = tmp_reg;
10585 spec2 [MONO_INST_DEST] = ' ';
10586 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10587 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10588 spec2 [MONO_INST_SRC3] = ' ';
10590 } else if (MONO_IS_STORE_MEMINDEX (ins))
10591 g_assert_not_reached ();
10596 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10597 printf ("\t %.3s %d", spec, ins->dreg);
10598 num_sregs = mono_inst_get_src_registers (ins, sregs);
10599 for (srcindex = 0; srcindex < 3; ++srcindex)
10600 printf (" %d", sregs [srcindex]);
10607 regtype = spec [MONO_INST_DEST];
10608 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10611 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10612 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10613 MonoInst *store_ins;
10615 MonoInst *def_ins = ins;
10616 int dreg = ins->dreg; /* The original vreg */
10618 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10620 if (var->opcode == OP_REGVAR) {
10621 ins->dreg = var->dreg;
10622 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10624 * Instead of emitting a load+store, use a _membase opcode.
10626 g_assert (var->opcode == OP_REGOFFSET);
10627 if (ins->opcode == OP_MOVE) {
10631 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10632 ins->inst_basereg = var->inst_basereg;
10633 ins->inst_offset = var->inst_offset;
10636 spec = INS_INFO (ins->opcode);
10640 g_assert (var->opcode == OP_REGOFFSET);
10642 prev_dreg = ins->dreg;
10644 /* Invalidate any previous lvreg for this vreg */
10645 vreg_to_lvreg [ins->dreg] = 0;
10649 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10651 store_opcode = OP_STOREI8_MEMBASE_REG;
10654 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10656 if (regtype == 'l') {
10657 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10658 mono_bblock_insert_after_ins (bb, ins, store_ins);
10659 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10660 mono_bblock_insert_after_ins (bb, ins, store_ins);
10661 def_ins = store_ins;
10664 g_assert (store_opcode != OP_STOREV_MEMBASE);
10666 /* Try to fuse the store into the instruction itself */
10667 /* FIXME: Add more instructions */
10668 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10669 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10670 ins->inst_imm = ins->inst_c0;
10671 ins->inst_destbasereg = var->inst_basereg;
10672 ins->inst_offset = var->inst_offset;
10673 spec = INS_INFO (ins->opcode);
10674 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10675 ins->opcode = store_opcode;
10676 ins->inst_destbasereg = var->inst_basereg;
10677 ins->inst_offset = var->inst_offset;
10681 tmp_reg = ins->dreg;
10682 ins->dreg = ins->sreg2;
10683 ins->sreg2 = tmp_reg;
10686 spec2 [MONO_INST_DEST] = ' ';
10687 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10688 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10689 spec2 [MONO_INST_SRC3] = ' ';
10691 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10692 // FIXME: The backends expect the base reg to be in inst_basereg
10693 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10695 ins->inst_basereg = var->inst_basereg;
10696 ins->inst_offset = var->inst_offset;
10697 spec = INS_INFO (ins->opcode);
10699 /* printf ("INS: "); mono_print_ins (ins); */
10700 /* Create a store instruction */
10701 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10703 /* Insert it after the instruction */
10704 mono_bblock_insert_after_ins (bb, ins, store_ins);
10706 def_ins = store_ins;
10709 * We can't assign ins->dreg to var->dreg here, since the
10710 * sregs could use it. So set a flag, and do it after
10713 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10714 dest_has_lvreg = TRUE;
10719 if (def_ins && !live_range_start [dreg]) {
10720 live_range_start [dreg] = def_ins;
10721 live_range_start_bb [dreg] = bb;
10728 num_sregs = mono_inst_get_src_registers (ins, sregs);
10729 for (srcindex = 0; srcindex < 3; ++srcindex) {
10730 regtype = spec [MONO_INST_SRC1 + srcindex];
10731 sreg = sregs [srcindex];
10733 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10734 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10735 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10736 MonoInst *use_ins = ins;
10737 MonoInst *load_ins;
10738 guint32 load_opcode;
10740 if (var->opcode == OP_REGVAR) {
10741 sregs [srcindex] = var->dreg;
10742 //mono_inst_set_src_registers (ins, sregs);
10743 live_range_end [sreg] = use_ins;
10744 live_range_end_bb [sreg] = bb;
10748 g_assert (var->opcode == OP_REGOFFSET);
10750 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10752 g_assert (load_opcode != OP_LOADV_MEMBASE);
10754 if (vreg_to_lvreg [sreg]) {
10755 g_assert (vreg_to_lvreg [sreg] != -1);
10757 /* The variable is already loaded to an lvreg */
10758 if (G_UNLIKELY (cfg->verbose_level > 2))
10759 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10760 sregs [srcindex] = vreg_to_lvreg [sreg];
10761 //mono_inst_set_src_registers (ins, sregs);
10765 /* Try to fuse the load into the instruction */
10766 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10767 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10768 sregs [0] = var->inst_basereg;
10769 //mono_inst_set_src_registers (ins, sregs);
10770 ins->inst_offset = var->inst_offset;
10771 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10772 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10773 sregs [1] = var->inst_basereg;
10774 //mono_inst_set_src_registers (ins, sregs);
10775 ins->inst_offset = var->inst_offset;
10777 if (MONO_IS_REAL_MOVE (ins)) {
10778 ins->opcode = OP_NOP;
10781 //printf ("%d ", srcindex); mono_print_ins (ins);
10783 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10785 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10786 if (var->dreg == prev_dreg) {
10788 * sreg refers to the value loaded by the load
10789 * emitted below, but we need to use ins->dreg
10790 * since it refers to the store emitted earlier.
10794 g_assert (sreg != -1);
10795 vreg_to_lvreg [var->dreg] = sreg;
10796 g_assert (lvregs_len < 1024);
10797 lvregs [lvregs_len ++] = var->dreg;
10801 sregs [srcindex] = sreg;
10802 //mono_inst_set_src_registers (ins, sregs);
10804 if (regtype == 'l') {
10805 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10806 mono_bblock_insert_before_ins (bb, ins, load_ins);
10807 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10808 mono_bblock_insert_before_ins (bb, ins, load_ins);
10809 use_ins = load_ins;
10812 #if SIZEOF_REGISTER == 4
10813 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10815 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10816 mono_bblock_insert_before_ins (bb, ins, load_ins);
10817 use_ins = load_ins;
10821 if (var->dreg < orig_next_vreg) {
10822 live_range_end [var->dreg] = use_ins;
10823 live_range_end_bb [var->dreg] = bb;
10827 mono_inst_set_src_registers (ins, sregs);
10829 if (dest_has_lvreg) {
10830 g_assert (ins->dreg != -1);
10831 vreg_to_lvreg [prev_dreg] = ins->dreg;
10832 g_assert (lvregs_len < 1024);
10833 lvregs [lvregs_len ++] = prev_dreg;
10834 dest_has_lvreg = FALSE;
10838 tmp_reg = ins->dreg;
10839 ins->dreg = ins->sreg2;
10840 ins->sreg2 = tmp_reg;
10843 if (MONO_IS_CALL (ins)) {
10844 /* Clear vreg_to_lvreg array */
10845 for (i = 0; i < lvregs_len; i++)
10846 vreg_to_lvreg [lvregs [i]] = 0;
10848 } else if (ins->opcode == OP_NOP) {
10850 MONO_INST_NULLIFY_SREGS (ins);
10853 if (cfg->verbose_level > 2)
10854 mono_print_ins_index (1, ins);
10857 /* Extend the live range based on the liveness info */
10858 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
10859 for (i = 0; i < cfg->num_varinfo; i ++) {
10860 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
10862 if (vreg_is_volatile (cfg, vi->vreg))
10863 /* The liveness info is incomplete */
10866 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
10867 /* Live from at least the first ins of this bb */
10868 live_range_start [vi->vreg] = bb->code;
10869 live_range_start_bb [vi->vreg] = bb;
10872 if (mono_bitset_test_fast (bb->live_out_set, i)) {
10873 /* Live at least until the last ins of this bb */
10874 live_range_end [vi->vreg] = bb->last_ins;
10875 live_range_end_bb [vi->vreg] = bb;
10881 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10883 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10884 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10886 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
10887 for (i = 0; i < cfg->num_varinfo; ++i) {
10888 int vreg = MONO_VARINFO (cfg, i)->vreg;
10891 if (live_range_start [vreg]) {
10892 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10894 ins->inst_c1 = vreg;
10895 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10897 if (live_range_end [vreg]) {
10898 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10900 ins->inst_c1 = vreg;
10901 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
10902 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
10904 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10910 g_free (live_range_start);
10911 g_free (live_range_end);
10912 g_free (live_range_start_bb);
10913 g_free (live_range_end_bb);
10918 * - use 'iadd' instead of 'int_add'
10919 * - handling ovf opcodes: decompose in method_to_ir.
10920 * - unify iregs/fregs
10921 * -> partly done, the missing parts are:
10922 * - a more complete unification would involve unifying the hregs as well, so
10923 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10924 * would no longer map to the machine hregs, so the code generators would need to
10925 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10926 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10927 * fp/non-fp branches speeds it up by about 15%.
10928 * - use sext/zext opcodes instead of shifts
10930 * - get rid of TEMPLOADs if possible and use vregs instead
10931 * - clean up usage of OP_P/OP_ opcodes
10932 * - cleanup usage of DUMMY_USE
10933 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10935 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10936 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10937 * - make sure handle_stack_args () is called before the branch is emitted
10938 * - when the new IR is done, get rid of all unused stuff
10939 * - COMPARE/BEQ as separate instructions or unify them ?
10940 * - keeping them separate allows specialized compare instructions like
10941 * compare_imm, compare_membase
10942 * - most back ends unify fp compare+branch, fp compare+ceq
10943 * - integrate mono_save_args into inline_method
10944 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10945 * - handle long shift opts on 32 bit platforms somehow: they require
10946 * 3 sregs (2 for arg1 and 1 for arg2)
10947 * - make byref a 'normal' type.
10948 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10949 * variable if needed.
10950 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10951 * like inline_method.
10952 * - remove inlining restrictions
10953 * - fix LNEG and enable cfold of INEG
10954 * - generalize x86 optimizations like ldelema as a peephole optimization
10955 * - add store_mem_imm for amd64
10956 * - optimize the loading of the interruption flag in the managed->native wrappers
10957 * - avoid special handling of OP_NOP in passes
10958 * - move code inserting instructions into one function/macro.
10959 * - try a coalescing phase after liveness analysis
10960 * - add float -> vreg conversion + local optimizations on !x86
10961 * - figure out how to handle decomposed branches during optimizations, ie.
10962 * compare+branch, op_jump_table+op_br etc.
10963 * - promote RuntimeXHandles to vregs
10964 * - vtype cleanups:
10965 * - add a NEW_VARLOADA_VREG macro
10966 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10967 * accessing vtype fields.
10968 * - get rid of I8CONST on 64 bit platforms
10969 * - dealing with the increase in code size due to branches created during opcode
10971 * - use extended basic blocks
10972 * - all parts of the JIT
10973 * - handle_global_vregs () && local regalloc
10974 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10975 * - sources of increase in code size:
10978 * - isinst and castclass
10979 * - lvregs not allocated to global registers even if used multiple times
10980 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10982 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10983 * - add all micro optimizations from the old JIT
10984 * - put tree optimizations into the deadce pass
10985 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10986 * specific function.
10987 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10988 * fcompare + branchCC.
10989 * - create a helper function for allocating a stack slot, taking into account
10990 * MONO_CFG_HAS_SPILLUP.
10992 * - merge the ia64 switch changes.
10993 * - optimize mono_regstate2_alloc_int/float.
10994 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10995 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10996 * parts of the tree could be separated by other instructions, killing the tree
10997 * arguments, or stores killing loads etc. Also, should we fold loads into other
10998 * instructions if the result of the load is used multiple times ?
10999 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11000 * - LAST MERGE: 108395.
11001 * - when returning vtypes in registers, generate IR and append it to the end of the
11002 * last bb instead of doing it in the epilog.
11003 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11011 - When to decompose opcodes:
11012 - earlier: this makes some optimizations hard to implement, since the low level IR
11013 no longer contains the neccessary information. But it is easier to do.
11014 - later: harder to implement, enables more optimizations.
11015 - Branches inside bblocks:
11016 - created when decomposing complex opcodes.
11017 - branches to another bblock: harmless, but not tracked by the branch
11018 optimizations, so need to branch to a label at the start of the bblock.
11019 - branches to inside the same bblock: very problematic, trips up the local
11020 reg allocator. Can be fixed by spitting the current bblock, but that is a
11021 complex operation, since some local vregs can become global vregs etc.
11022 - Local/global vregs:
11023 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11024 local register allocator.
11025 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11026 structure, created by mono_create_var (). Assigned to hregs or the stack by
11027 the global register allocator.
11028 - When to do optimizations like alu->alu_imm:
11029 - earlier -> saves work later on since the IR will be smaller/simpler
11030 - later -> can work on more instructions
11031 - Handling of valuetypes:
11032 - When a vtype is pushed on the stack, a new temporary is created, an
11033 instruction computing its address (LDADDR) is emitted and pushed on
11034 the stack. Need to optimize cases when the vtype is used immediately as in
11035 argument passing, stloc etc.
11036 - Instead of the to_end stuff in the old JIT, simply call the function handling
11037 the values on the stack before emitting the last instruction of the bb.
11040 #endif /* DISABLE_JIT */