2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
60 #include "debugger-agent.h"
62 #define BRANCH_COST 100
63 #define INLINE_LENGTH_LIMIT 20
64 #define INLINE_FAILURE do {\
65 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
68 #define CHECK_CFG_EXCEPTION do {\
69 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
72 #define METHOD_ACCESS_FAILURE do { \
73 char *method_fname = mono_method_full_name (method, TRUE); \
74 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
75 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
76 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
77 g_free (method_fname); \
78 g_free (cil_method_fname); \
79 goto exception_exit; \
81 #define FIELD_ACCESS_FAILURE do { \
82 char *method_fname = mono_method_full_name (method, TRUE); \
83 char *field_fname = mono_field_full_name (field); \
84 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
85 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
86 g_free (method_fname); \
87 g_free (field_fname); \
88 goto exception_exit; \
90 #define GENERIC_SHARING_FAILURE(opcode) do { \
91 if (cfg->generic_sharing_context) { \
92 if (cfg->verbose_level > 2) \
93 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
94 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
95 goto exception_exit; \
99 /* Determine whenever 'ins' represents a load of the 'this' argument */
100 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
102 static int ldind_to_load_membase (int opcode);
103 static int stind_to_store_membase (int opcode);
105 int mono_op_to_op_imm (int opcode);
106 int mono_op_to_op_imm_noemul (int opcode);
108 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
109 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
110 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
112 /* helper methods signature */
113 extern MonoMethodSignature *helper_sig_class_init_trampoline;
114 extern MonoMethodSignature *helper_sig_domain_get;
115 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
117 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
118 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
121 * Instruction metadata
129 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
130 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
136 #if SIZEOF_REGISTER == 8
141 /* keep in sync with the enum in mini.h */
144 #include "mini-ops.h"
149 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
150 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
152 * This should contain the index of the last sreg + 1. This is not the same
153 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
155 const gint8 ins_sreg_counts[] = {
156 #include "mini-ops.h"
161 extern GHashTable *jit_icall_name_hash;
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethod *method = cfg->method;
468 MonoMethodHeader *header = mono_method_get_header (method);
469 MonoExceptionClause *clause;
472 for (i = 0; i < header->num_clauses; ++i) {
473 clause = &header->clauses [i];
474 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
475 (offset < (clause->handler_offset)))
476 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
478 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
479 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
480 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
481 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
482 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
484 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
487 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
488 return ((i + 1) << 8) | clause->flags;
495 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
497 MonoMethod *method = cfg->method;
498 MonoMethodHeader *header = mono_method_get_header (method);
499 MonoExceptionClause *clause;
500 MonoBasicBlock *handler;
504 for (i = 0; i < header->num_clauses; ++i) {
505 clause = &header->clauses [i];
506 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
507 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
508 if (clause->flags == type) {
509 handler = cfg->cil_offset_to_bb [clause->handler_offset];
511 res = g_list_append (res, handler);
519 mono_create_spvar_for_region (MonoCompile *cfg, int region)
523 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
527 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
528 /* prevent it from being register allocated */
529 var->flags |= MONO_INST_INDIRECT;
531 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
535 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
537 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
541 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
545 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
549 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
550 /* prevent it from being register allocated */
551 var->flags |= MONO_INST_INDIRECT;
553 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
559 * Returns the type used in the eval stack when @type is loaded.
560 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
563 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
567 inst->klass = klass = mono_class_from_mono_type (type);
569 inst->type = STACK_MP;
574 switch (type->type) {
576 inst->type = STACK_INV;
580 case MONO_TYPE_BOOLEAN:
586 inst->type = STACK_I4;
591 case MONO_TYPE_FNPTR:
592 inst->type = STACK_PTR;
594 case MONO_TYPE_CLASS:
595 case MONO_TYPE_STRING:
596 case MONO_TYPE_OBJECT:
597 case MONO_TYPE_SZARRAY:
598 case MONO_TYPE_ARRAY:
599 inst->type = STACK_OBJ;
603 inst->type = STACK_I8;
607 inst->type = STACK_R8;
609 case MONO_TYPE_VALUETYPE:
610 if (type->data.klass->enumtype) {
611 type = mono_class_enum_basetype (type->data.klass);
615 inst->type = STACK_VTYPE;
618 case MONO_TYPE_TYPEDBYREF:
619 inst->klass = mono_defaults.typed_reference_class;
620 inst->type = STACK_VTYPE;
622 case MONO_TYPE_GENERICINST:
623 type = &type->data.generic_class->container_class->byval_arg;
626 case MONO_TYPE_MVAR :
627 /* FIXME: all the arguments must be references for now,
628 * later look inside cfg and see if the arg num is
631 g_assert (cfg->generic_sharing_context);
632 inst->type = STACK_OBJ;
635 g_error ("unknown type 0x%02x in eval stack type", type->type);
640 * The following tables are used to quickly validate the IL code in type_from_op ().
643 bin_num_table [STACK_MAX] [STACK_MAX] = {
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
656 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
659 /* reduce the size of this table */
661 bin_int_table [STACK_MAX] [STACK_MAX] = {
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
673 bin_comp_table [STACK_MAX] [STACK_MAX] = {
674 /* Inv i L p F & O vt */
676 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
677 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
678 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
679 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
680 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
681 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
682 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
685 /* reduce the size of this table */
687 shift_table [STACK_MAX] [STACK_MAX] = {
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
699 * Tables to map from the non-specific opcode to the matching
700 * type-specific opcode.
702 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
704 binops_op_map [STACK_MAX] = {
705 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
708 /* handles from CEE_NEG to CEE_CONV_U8 */
710 unops_op_map [STACK_MAX] = {
711 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
714 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
716 ovfops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
720 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
722 ovf2ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
726 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
728 ovf3ops_op_map [STACK_MAX] = {
729 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
732 /* handles from CEE_BEQ to CEE_BLT_UN */
734 beqops_op_map [STACK_MAX] = {
735 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
738 /* handles from CEE_CEQ to CEE_CLT_UN */
740 ceqops_op_map [STACK_MAX] = {
741 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
745 * Sets ins->type (the type on the eval stack) according to the
746 * type of the opcode and the arguments to it.
747 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
749 * FIXME: this function sets ins->type unconditionally in some cases, but
750 * it should set it to invalid for some types (a conv.x on an object)
753 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
755 switch (ins->opcode) {
762 /* FIXME: check unverifiable args for STACK_MP */
763 ins->type = bin_num_table [src1->type] [src2->type];
764 ins->opcode += binops_op_map [ins->type];
771 ins->type = bin_int_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = shift_table [src1->type] [src2->type];
778 ins->opcode += binops_op_map [ins->type];
783 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
784 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
785 ins->opcode = OP_LCOMPARE;
786 else if (src1->type == STACK_R8)
787 ins->opcode = OP_FCOMPARE;
789 ins->opcode = OP_ICOMPARE;
791 case OP_ICOMPARE_IMM:
792 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
793 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
794 ins->opcode = OP_LCOMPARE_IMM;
806 ins->opcode += beqops_op_map [src1->type];
809 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
810 ins->opcode += ceqops_op_map [src1->type];
816 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
817 ins->opcode += ceqops_op_map [src1->type];
821 ins->type = neg_table [src1->type];
822 ins->opcode += unops_op_map [ins->type];
825 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
826 ins->type = src1->type;
828 ins->type = STACK_INV;
829 ins->opcode += unops_op_map [ins->type];
835 ins->type = STACK_I4;
836 ins->opcode += unops_op_map [src1->type];
839 ins->type = STACK_R8;
840 switch (src1->type) {
843 ins->opcode = OP_ICONV_TO_R_UN;
846 ins->opcode = OP_LCONV_TO_R_UN;
850 case CEE_CONV_OVF_I1:
851 case CEE_CONV_OVF_U1:
852 case CEE_CONV_OVF_I2:
853 case CEE_CONV_OVF_U2:
854 case CEE_CONV_OVF_I4:
855 case CEE_CONV_OVF_U4:
856 ins->type = STACK_I4;
857 ins->opcode += ovf3ops_op_map [src1->type];
859 case CEE_CONV_OVF_I_UN:
860 case CEE_CONV_OVF_U_UN:
861 ins->type = STACK_PTR;
862 ins->opcode += ovf2ops_op_map [src1->type];
864 case CEE_CONV_OVF_I1_UN:
865 case CEE_CONV_OVF_I2_UN:
866 case CEE_CONV_OVF_I4_UN:
867 case CEE_CONV_OVF_U1_UN:
868 case CEE_CONV_OVF_U2_UN:
869 case CEE_CONV_OVF_U4_UN:
870 ins->type = STACK_I4;
871 ins->opcode += ovf2ops_op_map [src1->type];
874 ins->type = STACK_PTR;
875 switch (src1->type) {
877 ins->opcode = OP_ICONV_TO_U;
881 #if SIZEOF_REGISTER == 8
882 ins->opcode = OP_LCONV_TO_U;
884 ins->opcode = OP_MOVE;
888 ins->opcode = OP_LCONV_TO_U;
891 ins->opcode = OP_FCONV_TO_U;
897 ins->type = STACK_I8;
898 ins->opcode += unops_op_map [src1->type];
900 case CEE_CONV_OVF_I8:
901 case CEE_CONV_OVF_U8:
902 ins->type = STACK_I8;
903 ins->opcode += ovf3ops_op_map [src1->type];
905 case CEE_CONV_OVF_U8_UN:
906 case CEE_CONV_OVF_I8_UN:
907 ins->type = STACK_I8;
908 ins->opcode += ovf2ops_op_map [src1->type];
912 ins->type = STACK_R8;
913 ins->opcode += unops_op_map [src1->type];
916 ins->type = STACK_R8;
920 ins->type = STACK_I4;
921 ins->opcode += ovfops_op_map [src1->type];
926 ins->type = STACK_PTR;
927 ins->opcode += ovfops_op_map [src1->type];
935 ins->type = bin_num_table [src1->type] [src2->type];
936 ins->opcode += ovfops_op_map [src1->type];
937 if (ins->type == STACK_R8)
938 ins->type = STACK_INV;
940 case OP_LOAD_MEMBASE:
941 ins->type = STACK_PTR;
943 case OP_LOADI1_MEMBASE:
944 case OP_LOADU1_MEMBASE:
945 case OP_LOADI2_MEMBASE:
946 case OP_LOADU2_MEMBASE:
947 case OP_LOADI4_MEMBASE:
948 case OP_LOADU4_MEMBASE:
949 ins->type = STACK_PTR;
951 case OP_LOADI8_MEMBASE:
952 ins->type = STACK_I8;
954 case OP_LOADR4_MEMBASE:
955 case OP_LOADR8_MEMBASE:
956 ins->type = STACK_R8;
959 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
963 if (ins->type == STACK_MP)
964 ins->klass = mono_defaults.object_class;
969 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
975 param_table [STACK_MAX] [STACK_MAX] = {
980 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
984 switch (args->type) {
994 for (i = 0; i < sig->param_count; ++i) {
995 switch (args [i].type) {
999 if (!sig->params [i]->byref)
1003 if (sig->params [i]->byref)
1005 switch (sig->params [i]->type) {
1006 case MONO_TYPE_CLASS:
1007 case MONO_TYPE_STRING:
1008 case MONO_TYPE_OBJECT:
1009 case MONO_TYPE_SZARRAY:
1010 case MONO_TYPE_ARRAY:
1017 if (sig->params [i]->byref)
1019 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1028 /*if (!param_table [args [i].type] [sig->params [i]->type])
1036 * When we need a pointer to the current domain many times in a method, we
1037 * call mono_domain_get() once and we store the result in a local variable.
1038 * This function returns the variable that represents the MonoDomain*.
1040 inline static MonoInst *
1041 mono_get_domainvar (MonoCompile *cfg)
1043 if (!cfg->domainvar)
1044 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1045 return cfg->domainvar;
1049 * The got_var contains the address of the Global Offset Table when AOT
1053 mono_get_got_var (MonoCompile *cfg)
1055 #ifdef MONO_ARCH_NEED_GOT_VAR
1056 if (!cfg->compile_aot)
1058 if (!cfg->got_var) {
1059 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1061 return cfg->got_var;
1068 mono_get_vtable_var (MonoCompile *cfg)
1070 g_assert (cfg->generic_sharing_context);
1072 if (!cfg->rgctx_var) {
1073 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1074 /* force the var to be stack allocated */
1075 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1078 return cfg->rgctx_var;
1082 type_from_stack_type (MonoInst *ins) {
1083 switch (ins->type) {
1084 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1085 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1086 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1087 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1089 return &ins->klass->this_arg;
1090 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1091 case STACK_VTYPE: return &ins->klass->byval_arg;
1093 g_error ("stack type %d to monotype not handled\n", ins->type);
1098 static G_GNUC_UNUSED int
1099 type_to_stack_type (MonoType *t)
1101 t = mono_type_get_underlying_type (t);
1105 case MONO_TYPE_BOOLEAN:
1108 case MONO_TYPE_CHAR:
1115 case MONO_TYPE_FNPTR:
1117 case MONO_TYPE_CLASS:
1118 case MONO_TYPE_STRING:
1119 case MONO_TYPE_OBJECT:
1120 case MONO_TYPE_SZARRAY:
1121 case MONO_TYPE_ARRAY:
1129 case MONO_TYPE_VALUETYPE:
1130 case MONO_TYPE_TYPEDBYREF:
1132 case MONO_TYPE_GENERICINST:
1133 if (mono_type_generic_inst_is_valuetype (t))
1139 g_assert_not_reached ();
1146 array_access_to_klass (int opcode)
1150 return mono_defaults.byte_class;
1152 return mono_defaults.uint16_class;
1155 return mono_defaults.int_class;
1158 return mono_defaults.sbyte_class;
1161 return mono_defaults.int16_class;
1164 return mono_defaults.int32_class;
1166 return mono_defaults.uint32_class;
1169 return mono_defaults.int64_class;
1172 return mono_defaults.single_class;
1175 return mono_defaults.double_class;
1176 case CEE_LDELEM_REF:
1177 case CEE_STELEM_REF:
1178 return mono_defaults.object_class;
1180 g_assert_not_reached ();
1186 * We try to share variables when possible
1189 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1194 /* inlining can result in deeper stacks */
1195 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1196 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1198 pos = ins->type - 1 + slot * STACK_MAX;
1200 switch (ins->type) {
1207 if ((vnum = cfg->intvars [pos]))
1208 return cfg->varinfo [vnum];
1209 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1210 cfg->intvars [pos] = res->inst_c0;
1213 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1219 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1222 * Don't use this if a generic_context is set, since that means AOT can't
1223 * look up the method using just the image+token.
1224 * table == 0 means this is a reference made from a wrapper.
1226 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1227 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1228 jump_info_token->image = image;
1229 jump_info_token->token = token;
1230 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1235 * This function is called to handle items that are left on the evaluation stack
1236 * at basic block boundaries. What happens is that we save the values to local variables
1237 * and we reload them later when first entering the target basic block (with the
1238 * handle_loaded_temps () function).
1239 * A single joint point will use the same variables (stored in the array bb->out_stack or
1240 * bb->in_stack, if the basic block is before or after the joint point).
1242 * This function needs to be called _before_ emitting the last instruction of
1243 * the bb (i.e. before emitting a branch).
1244 * If the stack merge fails at a join point, cfg->unverifiable is set.
1247 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1250 MonoBasicBlock *bb = cfg->cbb;
1251 MonoBasicBlock *outb;
1252 MonoInst *inst, **locals;
1257 if (cfg->verbose_level > 3)
1258 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1259 if (!bb->out_scount) {
1260 bb->out_scount = count;
1261 //printf ("bblock %d has out:", bb->block_num);
1263 for (i = 0; i < bb->out_count; ++i) {
1264 outb = bb->out_bb [i];
1265 /* exception handlers are linked, but they should not be considered for stack args */
1266 if (outb->flags & BB_EXCEPTION_HANDLER)
1268 //printf (" %d", outb->block_num);
1269 if (outb->in_stack) {
1271 bb->out_stack = outb->in_stack;
1277 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1278 for (i = 0; i < count; ++i) {
1280 * try to reuse temps already allocated for this purpouse, if they occupy the same
1281 * stack slot and if they are of the same type.
1282 * This won't cause conflicts since if 'local' is used to
1283 * store one of the values in the in_stack of a bblock, then
1284 * the same variable will be used for the same outgoing stack
1286 * This doesn't work when inlining methods, since the bblocks
1287 * in the inlined methods do not inherit their in_stack from
1288 * the bblock they are inlined to. See bug #58863 for an
1291 if (cfg->inlined_method)
1292 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1294 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1299 for (i = 0; i < bb->out_count; ++i) {
1300 outb = bb->out_bb [i];
1301 /* exception handlers are linked, but they should not be considered for stack args */
1302 if (outb->flags & BB_EXCEPTION_HANDLER)
1304 if (outb->in_scount) {
1305 if (outb->in_scount != bb->out_scount) {
1306 cfg->unverifiable = TRUE;
1309 continue; /* check they are the same locals */
1311 outb->in_scount = count;
1312 outb->in_stack = bb->out_stack;
1315 locals = bb->out_stack;
1317 for (i = 0; i < count; ++i) {
1318 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1319 inst->cil_code = sp [i]->cil_code;
1320 sp [i] = locals [i];
1321 if (cfg->verbose_level > 3)
1322 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1326 * It is possible that the out bblocks already have in_stack assigned, and
1327 * the in_stacks differ. In this case, we will store to all the different
1334 /* Find a bblock which has a different in_stack */
1336 while (bindex < bb->out_count) {
1337 outb = bb->out_bb [bindex];
1338 /* exception handlers are linked, but they should not be considered for stack args */
1339 if (outb->flags & BB_EXCEPTION_HANDLER) {
1343 if (outb->in_stack != locals) {
1344 for (i = 0; i < count; ++i) {
1345 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1346 inst->cil_code = sp [i]->cil_code;
1347 sp [i] = locals [i];
1348 if (cfg->verbose_level > 3)
1349 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1351 locals = outb->in_stack;
1360 /* Emit code which loads interface_offsets [klass->interface_id]
1361 * The array is stored in memory before vtable.
1364 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1366 if (cfg->compile_aot) {
1367 int ioffset_reg = alloc_preg (cfg);
1368 int iid_reg = alloc_preg (cfg);
1370 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1371 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1375 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1380 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1381 * stored in "klass_reg" implements the interface "klass".
1384 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1386 int ibitmap_reg = alloc_preg (cfg);
1387 int ibitmap_byte_reg = alloc_preg (cfg);
1389 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1391 if (cfg->compile_aot) {
1392 int iid_reg = alloc_preg (cfg);
1393 int shifted_iid_reg = alloc_preg (cfg);
1394 int ibitmap_byte_address_reg = alloc_preg (cfg);
1395 int masked_iid_reg = alloc_preg (cfg);
1396 int iid_one_bit_reg = alloc_preg (cfg);
1397 int iid_bit_reg = alloc_preg (cfg);
1398 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1399 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1400 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1403 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1404 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1413 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1414 * stored in "vtable_reg" implements the interface "klass".
1417 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1419 int ibitmap_reg = alloc_preg (cfg);
1420 int ibitmap_byte_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1424 if (cfg->compile_aot) {
1425 int iid_reg = alloc_preg (cfg);
1426 int shifted_iid_reg = alloc_preg (cfg);
1427 int ibitmap_byte_address_reg = alloc_preg (cfg);
1428 int masked_iid_reg = alloc_preg (cfg);
1429 int iid_one_bit_reg = alloc_preg (cfg);
1430 int iid_bit_reg = alloc_preg (cfg);
1431 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1436 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1437 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1440 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1441 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1446 * Emit code which checks whenever the interface id of @klass is smaller than
1447 * than the value given by max_iid_reg.
1450 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1451 MonoBasicBlock *false_target)
1453 if (cfg->compile_aot) {
1454 int iid_reg = alloc_preg (cfg);
1455 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1456 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1459 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1461 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1463 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1466 /* Same as above, but obtains max_iid from a vtable */
1468 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1469 MonoBasicBlock *false_target)
1471 int max_iid_reg = alloc_preg (cfg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1474 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1477 /* Same as above, but obtains max_iid from a klass */
1479 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1480 MonoBasicBlock *false_target)
1482 int max_iid_reg = alloc_preg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1485 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1489 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1491 int idepth_reg = alloc_preg (cfg);
1492 int stypes_reg = alloc_preg (cfg);
1493 int stype = alloc_preg (cfg);
1495 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1496 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1497 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1498 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1500 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1502 if (cfg->compile_aot) {
1503 int const_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1507 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1509 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1513 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1515 int intf_reg = alloc_preg (cfg);
1517 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1518 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1521 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1523 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1527 * Variant of the above that takes a register to the class, not the vtable.
1530 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1532 int intf_bit_reg = alloc_preg (cfg);
1534 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1535 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1536 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1538 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1540 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1544 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1546 if (cfg->compile_aot) {
1547 int const_reg = alloc_preg (cfg);
1548 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1549 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1553 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1557 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1559 if (cfg->compile_aot) {
1560 int const_reg = alloc_preg (cfg);
1561 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1562 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1566 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1570 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1573 int rank_reg = alloc_preg (cfg);
1574 int eclass_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1578 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1579 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1580 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1581 if (klass->cast_class == mono_defaults.object_class) {
1582 int parent_reg = alloc_preg (cfg);
1583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1584 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1587 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1588 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1589 } else if (klass->cast_class == mono_defaults.enum_class) {
1590 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1591 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1592 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1594 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1595 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1598 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1599 /* Check that the object is a vector too */
1600 int bounds_reg = alloc_preg (cfg);
1601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1602 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1603 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1606 int idepth_reg = alloc_preg (cfg);
1607 int stypes_reg = alloc_preg (cfg);
1608 int stype = alloc_preg (cfg);
1610 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1611 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1612 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1613 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1615 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1617 mini_emit_class_check (cfg, stype, klass);
1622 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1626 g_assert (val == 0);
1631 if ((size <= 4) && (size <= align)) {
1634 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1640 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1642 #if SIZEOF_REGISTER == 8
1644 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1650 val_reg = alloc_preg (cfg);
1652 if (SIZEOF_REGISTER == 8)
1653 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1655 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1658 /* This could be optimized further if neccesary */
1660 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1667 #if !NO_UNALIGNED_ACCESS
1668 if (SIZEOF_REGISTER == 8) {
1670 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1699 #endif /* DISABLE_JIT */
1702 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1709 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1710 g_assert (size < 10000);
1713 /* This could be optimized further if neccesary */
1715 cur_reg = alloc_preg (cfg);
1716 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1717 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1724 #if !NO_UNALIGNED_ACCESS
1725 if (SIZEOF_REGISTER == 8) {
1727 cur_reg = alloc_preg (cfg);
1728 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1738 cur_reg = alloc_preg (cfg);
1739 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1740 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1746 cur_reg = alloc_preg (cfg);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1748 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1754 cur_reg = alloc_preg (cfg);
1755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1756 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1766 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1769 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1772 type = mini_get_basic_type_from_generic (gsctx, type);
1773 switch (type->type) {
1774 case MONO_TYPE_VOID:
1775 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1778 case MONO_TYPE_BOOLEAN:
1781 case MONO_TYPE_CHAR:
1784 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1788 case MONO_TYPE_FNPTR:
1789 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1790 case MONO_TYPE_CLASS:
1791 case MONO_TYPE_STRING:
1792 case MONO_TYPE_OBJECT:
1793 case MONO_TYPE_SZARRAY:
1794 case MONO_TYPE_ARRAY:
1795 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1798 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1801 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1802 case MONO_TYPE_VALUETYPE:
1803 if (type->data.klass->enumtype) {
1804 type = mono_class_enum_basetype (type->data.klass);
1807 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1808 case MONO_TYPE_TYPEDBYREF:
1809 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1810 case MONO_TYPE_GENERICINST:
1811 type = &type->data.generic_class->container_class->byval_arg;
1814 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1820 * target_type_is_incompatible:
1821 * @cfg: MonoCompile context
1823 * Check that the item @arg on the evaluation stack can be stored
1824 * in the target type (can be a local, or field, etc).
1825 * The cfg arg can be used to check if we need verification or just
1828 * Returns: non-0 value if arg can't be stored on a target.
1831 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1833 MonoType *simple_type;
1836 if (target->byref) {
1837 /* FIXME: check that the pointed to types match */
1838 if (arg->type == STACK_MP)
1839 return arg->klass != mono_class_from_mono_type (target);
1840 if (arg->type == STACK_PTR)
1845 simple_type = mono_type_get_underlying_type (target);
1846 switch (simple_type->type) {
1847 case MONO_TYPE_VOID:
1851 case MONO_TYPE_BOOLEAN:
1854 case MONO_TYPE_CHAR:
1857 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1861 /* STACK_MP is needed when setting pinned locals */
1862 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1867 case MONO_TYPE_FNPTR:
1868 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1871 case MONO_TYPE_CLASS:
1872 case MONO_TYPE_STRING:
1873 case MONO_TYPE_OBJECT:
1874 case MONO_TYPE_SZARRAY:
1875 case MONO_TYPE_ARRAY:
1876 if (arg->type != STACK_OBJ)
1878 /* FIXME: check type compatibility */
1882 if (arg->type != STACK_I8)
1887 if (arg->type != STACK_R8)
1890 case MONO_TYPE_VALUETYPE:
1891 if (arg->type != STACK_VTYPE)
1893 klass = mono_class_from_mono_type (simple_type);
1894 if (klass != arg->klass)
1897 case MONO_TYPE_TYPEDBYREF:
1898 if (arg->type != STACK_VTYPE)
1900 klass = mono_class_from_mono_type (simple_type);
1901 if (klass != arg->klass)
1904 case MONO_TYPE_GENERICINST:
1905 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1906 if (arg->type != STACK_VTYPE)
1908 klass = mono_class_from_mono_type (simple_type);
1909 if (klass != arg->klass)
1913 if (arg->type != STACK_OBJ)
1915 /* FIXME: check type compatibility */
1919 case MONO_TYPE_MVAR:
1920 /* FIXME: all the arguments must be references for now,
1921 * later look inside cfg and see if the arg num is
1922 * really a reference
1924 g_assert (cfg->generic_sharing_context);
1925 if (arg->type != STACK_OBJ)
1929 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1935 * Prepare arguments for passing to a function call.
1936 * Return a non-zero value if the arguments can't be passed to the given
1938 * The type checks are not yet complete and some conversions may need
1939 * casts on 32 or 64 bit architectures.
1941 * FIXME: implement this using target_type_is_incompatible ()
1944 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1946 MonoType *simple_type;
1950 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1954 for (i = 0; i < sig->param_count; ++i) {
1955 if (sig->params [i]->byref) {
1956 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1960 simple_type = sig->params [i];
1961 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1963 switch (simple_type->type) {
1964 case MONO_TYPE_VOID:
1969 case MONO_TYPE_BOOLEAN:
1972 case MONO_TYPE_CHAR:
1975 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1981 case MONO_TYPE_FNPTR:
1982 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1985 case MONO_TYPE_CLASS:
1986 case MONO_TYPE_STRING:
1987 case MONO_TYPE_OBJECT:
1988 case MONO_TYPE_SZARRAY:
1989 case MONO_TYPE_ARRAY:
1990 if (args [i]->type != STACK_OBJ)
1995 if (args [i]->type != STACK_I8)
2000 if (args [i]->type != STACK_R8)
2003 case MONO_TYPE_VALUETYPE:
2004 if (simple_type->data.klass->enumtype) {
2005 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2008 if (args [i]->type != STACK_VTYPE)
2011 case MONO_TYPE_TYPEDBYREF:
2012 if (args [i]->type != STACK_VTYPE)
2015 case MONO_TYPE_GENERICINST:
2016 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2020 g_error ("unknown type 0x%02x in check_call_signature",
2028 callvirt_to_call (int opcode)
2033 case OP_VOIDCALLVIRT:
2042 g_assert_not_reached ();
2049 callvirt_to_call_membase (int opcode)
2053 return OP_CALL_MEMBASE;
2054 case OP_VOIDCALLVIRT:
2055 return OP_VOIDCALL_MEMBASE;
2057 return OP_FCALL_MEMBASE;
2059 return OP_LCALL_MEMBASE;
2061 return OP_VCALL_MEMBASE;
2063 g_assert_not_reached ();
2069 #ifdef MONO_ARCH_HAVE_IMT
2071 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2073 #ifdef MONO_ARCH_IMT_REG
2074 int method_reg = alloc_preg (cfg);
2077 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2078 } else if (cfg->compile_aot) {
2079 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2082 MONO_INST_NEW (cfg, ins, OP_PCONST);
2083 ins->inst_p0 = call->method;
2084 ins->dreg = method_reg;
2085 MONO_ADD_INS (cfg->cbb, ins);
2088 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2090 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2095 static MonoJumpInfo *
2096 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2098 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2102 ji->data.target = target;
2107 inline static MonoInst*
2108 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2110 inline static MonoCallInst *
2111 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2112 MonoInst **args, int calli, int virtual, int tail)
2115 #ifdef MONO_ARCH_SOFT_FLOAT
2120 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2122 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2125 call->signature = sig;
2127 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2130 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2131 call->vret_var = cfg->vret_addr;
2132 //g_assert_not_reached ();
2134 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2135 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2138 temp->backend.is_pinvoke = sig->pinvoke;
2141 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2142 * address of return value to increase optimization opportunities.
2143 * Before vtype decomposition, the dreg of the call ins itself represents the
2144 * fact the call modifies the return value. After decomposition, the call will
2145 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2146 * will be transformed into an LDADDR.
2148 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2149 loada->dreg = alloc_preg (cfg);
2150 loada->inst_p0 = temp;
2151 /* We reference the call too since call->dreg could change during optimization */
2152 loada->inst_p1 = call;
2153 MONO_ADD_INS (cfg->cbb, loada);
2155 call->inst.dreg = temp->dreg;
2157 call->vret_var = loada;
2158 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2159 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2161 #ifdef MONO_ARCH_SOFT_FLOAT
2163 * If the call has a float argument, we would need to do an r8->r4 conversion using
2164 * an icall, but that cannot be done during the call sequence since it would clobber
2165 * the call registers + the stack. So we do it before emitting the call.
2167 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2169 MonoInst *in = call->args [i];
2171 if (i >= sig->hasthis)
2172 t = sig->params [i - sig->hasthis];
2174 t = &mono_defaults.int_class->byval_arg;
2175 t = mono_type_get_underlying_type (t);
2177 if (!t->byref && t->type == MONO_TYPE_R4) {
2178 MonoInst *iargs [1];
2182 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2184 /* The result will be in an int vreg */
2185 call->args [i] = conv;
2191 if (COMPILE_LLVM (cfg))
2192 mono_llvm_emit_call (cfg, call);
2194 mono_arch_emit_call (cfg, call);
2196 mono_arch_emit_call (cfg, call);
2199 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2200 cfg->flags |= MONO_CFG_HAS_CALLS;
2205 inline static MonoInst*
2206 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2208 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2210 call->inst.sreg1 = addr->dreg;
2212 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2214 return (MonoInst*)call;
2217 inline static MonoInst*
2218 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2220 #ifdef MONO_ARCH_RGCTX_REG
2225 rgctx_reg = mono_alloc_preg (cfg);
2226 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2228 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2230 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2231 cfg->uses_rgctx_reg = TRUE;
2232 call->rgctx_reg = TRUE;
2234 return (MonoInst*)call;
2236 g_assert_not_reached ();
2242 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2244 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2247 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2248 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2250 gboolean might_be_remote;
2251 gboolean virtual = this != NULL;
2252 gboolean enable_for_aot = TRUE;
2256 if (method->string_ctor) {
2257 /* Create the real signature */
2258 /* FIXME: Cache these */
2259 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2260 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2265 might_be_remote = this && sig->hasthis &&
2266 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2267 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2269 context_used = mono_method_check_context_used (method);
2270 if (might_be_remote && context_used) {
2273 g_assert (cfg->generic_sharing_context);
2275 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2277 return mono_emit_calli (cfg, sig, args, addr);
2280 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2282 if (might_be_remote)
2283 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2285 call->method = method;
2286 call->inst.flags |= MONO_INST_HAS_METHOD;
2287 call->inst.inst_left = this;
2290 int vtable_reg, slot_reg, this_reg;
2292 this_reg = this->dreg;
2294 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2296 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2297 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2298 /* Make a call to delegate->invoke_impl */
2299 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2300 call->inst.inst_basereg = this_reg;
2301 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2302 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2304 return (MonoInst*)call;
2308 if ((!cfg->compile_aot || enable_for_aot) &&
2309 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2310 (MONO_METHOD_IS_FINAL (method) &&
2311 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2312 !(method->klass->marshalbyref && context_used)) {
2314 * the method is not virtual, we just need to ensure this is not null
2315 * and then we can call the method directly.
2317 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2319 * The check above ensures method is not gshared, this is needed since
2320 * gshared methods can't have wrappers.
2322 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2325 if (!method->string_ctor)
2326 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2328 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2330 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2332 return (MonoInst*)call;
2335 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2337 * the method is virtual, but we can statically dispatch since either
2338 * it's class or the method itself are sealed.
2339 * But first we need to ensure it's not a null reference.
2341 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2343 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2344 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2346 return (MonoInst*)call;
2349 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2351 vtable_reg = alloc_preg (cfg);
2352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2353 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2355 #ifdef MONO_ARCH_HAVE_IMT
2357 guint32 imt_slot = mono_method_get_imt_slot (method);
2358 emit_imt_argument (cfg, call, imt_arg);
2359 slot_reg = vtable_reg;
2360 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2363 if (slot_reg == -1) {
2364 slot_reg = alloc_preg (cfg);
2365 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2366 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2369 slot_reg = vtable_reg;
2370 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2371 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2372 #ifdef MONO_ARCH_HAVE_IMT
2374 g_assert (mono_method_signature (method)->generic_param_count);
2375 emit_imt_argument (cfg, call, imt_arg);
2380 call->inst.sreg1 = slot_reg;
2381 call->virtual = TRUE;
2384 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2386 return (MonoInst*)call;
2390 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2391 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2393 #ifdef MONO_ARCH_RGCTX_REG
2400 #ifdef MONO_ARCH_RGCTX_REG
2401 rgctx_reg = mono_alloc_preg (cfg);
2402 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2407 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2409 call = (MonoCallInst*)ins;
2411 #ifdef MONO_ARCH_RGCTX_REG
2412 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2413 cfg->uses_rgctx_reg = TRUE;
2414 call->rgctx_reg = TRUE;
2423 static inline MonoInst*
2424 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2426 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2430 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2437 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2440 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2442 return (MonoInst*)call;
2445 inline static MonoInst*
2446 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2448 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2452 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2456 * mono_emit_abs_call:
2458 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2460 inline static MonoInst*
2461 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2462 MonoMethodSignature *sig, MonoInst **args)
2464 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2468 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2471 if (cfg->abs_patches == NULL)
2472 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2473 g_hash_table_insert (cfg->abs_patches, ji, ji);
2474 ins = mono_emit_native_call (cfg, ji, sig, args);
2475 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2480 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2482 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2483 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2487 * Native code might return non register sized integers
2488 * without initializing the upper bits.
2490 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2491 case OP_LOADI1_MEMBASE:
2492 widen_op = OP_ICONV_TO_I1;
2494 case OP_LOADU1_MEMBASE:
2495 widen_op = OP_ICONV_TO_U1;
2497 case OP_LOADI2_MEMBASE:
2498 widen_op = OP_ICONV_TO_I2;
2500 case OP_LOADU2_MEMBASE:
2501 widen_op = OP_ICONV_TO_U2;
2507 if (widen_op != -1) {
2508 int dreg = alloc_preg (cfg);
2511 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2512 widen->type = ins->type;
2522 get_memcpy_method (void)
2524 static MonoMethod *memcpy_method = NULL;
2525 if (!memcpy_method) {
2526 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2528 g_error ("Old corlib found. Install a new one");
2530 return memcpy_method;
2534 * Emit code to copy a valuetype of type @klass whose address is stored in
2535 * @src->dreg to memory whose address is stored at @dest->dreg.
2538 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2540 MonoInst *iargs [3];
2543 MonoMethod *memcpy_method;
2547 * This check breaks with spilled vars... need to handle it during verification anyway.
2548 * g_assert (klass && klass == src->klass && klass == dest->klass);
2552 n = mono_class_native_size (klass, &align);
2554 n = mono_class_value_size (klass, &align);
2556 #if HAVE_WRITE_BARRIERS
2557 /* if native is true there should be no references in the struct */
2558 if (klass->has_references && !native) {
2559 /* Avoid barriers when storing to the stack */
2560 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2561 (dest->opcode == OP_LDADDR))) {
2562 int context_used = 0;
2567 if (cfg->generic_sharing_context)
2568 context_used = mono_class_check_context_used (klass);
2570 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2572 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2573 mono_class_compute_gc_descriptor (klass);
2576 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2581 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2582 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2583 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2587 EMIT_NEW_ICONST (cfg, iargs [2], n);
2589 memcpy_method = get_memcpy_method ();
2590 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2595 get_memset_method (void)
2597 static MonoMethod *memset_method = NULL;
2598 if (!memset_method) {
2599 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2601 g_error ("Old corlib found. Install a new one");
2603 return memset_method;
2607 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2609 MonoInst *iargs [3];
2612 MonoMethod *memset_method;
2614 /* FIXME: Optimize this for the case when dest is an LDADDR */
2616 mono_class_init (klass);
2617 n = mono_class_value_size (klass, &align);
2619 if (n <= sizeof (gpointer) * 5) {
2620 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2623 memset_method = get_memset_method ();
2625 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2626 EMIT_NEW_ICONST (cfg, iargs [2], n);
2627 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2632 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2634 MonoInst *this = NULL;
2636 g_assert (cfg->generic_sharing_context);
2638 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2639 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2640 !method->klass->valuetype)
2641 EMIT_NEW_ARGLOAD (cfg, this, 0);
2643 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2644 MonoInst *mrgctx_loc, *mrgctx_var;
2647 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2649 mrgctx_loc = mono_get_vtable_var (cfg);
2650 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2653 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2654 MonoInst *vtable_loc, *vtable_var;
2658 vtable_loc = mono_get_vtable_var (cfg);
2659 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2661 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2662 MonoInst *mrgctx_var = vtable_var;
2665 vtable_reg = alloc_preg (cfg);
2666 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2667 vtable_var->type = STACK_PTR;
2673 int vtable_reg, res_reg;
2675 vtable_reg = alloc_preg (cfg);
2676 res_reg = alloc_preg (cfg);
2677 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2682 static MonoJumpInfoRgctxEntry *
2683 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2685 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2686 res->method = method;
2687 res->in_mrgctx = in_mrgctx;
2688 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2689 res->data->type = patch_type;
2690 res->data->data.target = patch_data;
2691 res->info_type = info_type;
2696 static inline MonoInst*
2697 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2699 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2703 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2704 MonoClass *klass, int rgctx_type)
2706 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2707 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2709 return emit_rgctx_fetch (cfg, rgctx, entry);
2713 * emit_get_rgctx_method:
2715 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2716 * normal constants, else emit a load from the rgctx.
2719 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2720 MonoMethod *cmethod, int rgctx_type)
2722 if (!context_used) {
2725 switch (rgctx_type) {
2726 case MONO_RGCTX_INFO_METHOD:
2727 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2729 case MONO_RGCTX_INFO_METHOD_RGCTX:
2730 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2733 g_assert_not_reached ();
2736 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2737 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2739 return emit_rgctx_fetch (cfg, rgctx, entry);
2744 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2745 MonoClassField *field, int rgctx_type)
2747 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2748 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2750 return emit_rgctx_fetch (cfg, rgctx, entry);
2754 * On return the caller must check @klass for load errors.
2757 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2759 MonoInst *vtable_arg;
2761 int context_used = 0;
2763 if (cfg->generic_sharing_context)
2764 context_used = mono_class_check_context_used (klass);
2767 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2768 klass, MONO_RGCTX_INFO_VTABLE);
2770 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2774 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2777 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2778 #ifdef MONO_ARCH_VTABLE_REG
2779 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2780 cfg->uses_vtable_reg = TRUE;
2787 * On return the caller must check @array_class for load errors
2790 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2792 int vtable_reg = alloc_preg (cfg);
2793 int context_used = 0;
2795 if (cfg->generic_sharing_context)
2796 context_used = mono_class_check_context_used (array_class);
2798 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2800 if (cfg->opt & MONO_OPT_SHARED) {
2801 int class_reg = alloc_preg (cfg);
2802 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2803 if (cfg->compile_aot) {
2804 int klass_reg = alloc_preg (cfg);
2805 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2806 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2808 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2810 } else if (context_used) {
2811 MonoInst *vtable_ins;
2813 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2814 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2816 if (cfg->compile_aot) {
2820 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2822 vt_reg = alloc_preg (cfg);
2823 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2824 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2827 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2833 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2837 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2839 if (mini_get_debug_options ()->better_cast_details) {
2840 int to_klass_reg = alloc_preg (cfg);
2841 int vtable_reg = alloc_preg (cfg);
2842 int klass_reg = alloc_preg (cfg);
2843 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2846 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2850 MONO_ADD_INS (cfg->cbb, tls_get);
2851 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2852 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2854 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2855 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2856 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2861 reset_cast_details (MonoCompile *cfg)
2863 /* Reset the variables holding the cast details */
2864 if (mini_get_debug_options ()->better_cast_details) {
2865 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2867 MONO_ADD_INS (cfg->cbb, tls_get);
2868 /* It is enough to reset the from field */
2869 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2874 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2875 * generic code is generated.
2878 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2880 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2883 MonoInst *rgctx, *addr;
2885 /* FIXME: What if the class is shared? We might not
2886 have to get the address of the method from the
2888 addr = emit_get_rgctx_method (cfg, context_used, method,
2889 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2891 rgctx = emit_get_rgctx (cfg, method, context_used);
2893 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2895 return mono_emit_method_call (cfg, method, &val, NULL);
2900 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2904 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2905 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2906 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2907 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2909 obj_reg = sp [0]->dreg;
2910 MONO_EMIT_NULL_CHECK (cfg, obj_reg);
2911 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2912 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2914 /* FIXME: generics */
2915 g_assert (klass->rank == 0);
2918 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2919 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2921 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2922 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2925 MonoInst *element_class;
2927 /* This assertion is from the unboxcast insn */
2928 g_assert (klass->rank == 0);
2930 element_class = emit_get_rgctx_klass (cfg, context_used,
2931 klass->element_class, MONO_RGCTX_INFO_KLASS);
2933 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2934 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2936 save_cast_details (cfg, klass->element_class, obj_reg);
2937 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2938 reset_cast_details (cfg);
2941 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2942 MONO_ADD_INS (cfg->cbb, add);
2943 add->type = STACK_MP;
2950 * Returns NULL and set the cfg exception on error.
2953 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2955 MonoInst *iargs [2];
2958 if (cfg->opt & MONO_OPT_SHARED) {
2959 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2960 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2962 alloc_ftn = mono_object_new;
2963 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2964 /* This happens often in argument checking code, eg. throw new FooException... */
2965 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2966 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2967 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2969 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2970 MonoMethod *managed_alloc = NULL;
2974 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2975 cfg->exception_ptr = klass;
2979 #ifndef MONO_CROSS_COMPILE
2980 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2983 if (managed_alloc) {
2984 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2985 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2987 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2989 guint32 lw = vtable->klass->instance_size;
2990 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2991 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2992 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2995 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2999 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3003 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3006 MonoInst *iargs [2];
3007 MonoMethod *managed_alloc = NULL;
3011 FIXME: we cannot get managed_alloc here because we can't get
3012 the class's vtable (because it's not a closed class)
3014 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3015 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3018 if (cfg->opt & MONO_OPT_SHARED) {
3019 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3020 iargs [1] = data_inst;
3021 alloc_ftn = mono_object_new;
3023 if (managed_alloc) {
3024 iargs [0] = data_inst;
3025 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3028 iargs [0] = data_inst;
3029 alloc_ftn = mono_object_new_specific;
3032 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3036 * Returns NULL and set the cfg exception on error.
3039 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3041 MonoInst *alloc, *ins;
3043 if (mono_class_is_nullable (klass)) {
3044 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3045 return mono_emit_method_call (cfg, method, &val, NULL);
3048 alloc = handle_alloc (cfg, klass, TRUE);
3052 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3058 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3060 MonoInst *alloc, *ins;
3062 if (mono_class_is_nullable (klass)) {
3063 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3064 /* FIXME: What if the class is shared? We might not
3065 have to get the method address from the RGCTX. */
3066 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3067 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3068 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3070 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3072 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3074 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3081 * Returns NULL and set the cfg exception on error.
3084 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3086 MonoBasicBlock *is_null_bb;
3087 int obj_reg = src->dreg;
3088 int vtable_reg = alloc_preg (cfg);
3090 NEW_BBLOCK (cfg, is_null_bb);
3092 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3093 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3095 save_cast_details (cfg, klass, obj_reg);
3097 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3098 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3099 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3101 int klass_reg = alloc_preg (cfg);
3103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3105 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3106 /* the remoting code is broken, access the class for now */
3107 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3108 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3110 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3111 cfg->exception_ptr = klass;
3114 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3117 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3119 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3122 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3126 MONO_START_BB (cfg, is_null_bb);
3128 reset_cast_details (cfg);
3134 * Returns NULL and set the cfg exception on error.
3137 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3140 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3141 int obj_reg = src->dreg;
3142 int vtable_reg = alloc_preg (cfg);
3143 int res_reg = alloc_preg (cfg);
3145 NEW_BBLOCK (cfg, is_null_bb);
3146 NEW_BBLOCK (cfg, false_bb);
3147 NEW_BBLOCK (cfg, end_bb);
3149 /* Do the assignment at the beginning, so the other assignment can be if converted */
3150 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3151 ins->type = STACK_OBJ;
3154 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3157 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3158 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3159 /* the is_null_bb target simply copies the input register to the output */
3160 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3162 int klass_reg = alloc_preg (cfg);
3164 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3167 int rank_reg = alloc_preg (cfg);
3168 int eclass_reg = alloc_preg (cfg);
3170 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3171 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3172 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3175 if (klass->cast_class == mono_defaults.object_class) {
3176 int parent_reg = alloc_preg (cfg);
3177 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3178 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3179 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3180 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3181 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3182 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3183 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3185 } else if (klass->cast_class == mono_defaults.enum_class) {
3186 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3187 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3188 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3189 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3191 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3192 /* Check that the object is a vector too */
3193 int bounds_reg = alloc_preg (cfg);
3194 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3196 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3199 /* the is_null_bb target simply copies the input register to the output */
3200 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3202 } else if (mono_class_is_nullable (klass)) {
3203 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3204 /* the is_null_bb target simply copies the input register to the output */
3205 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3207 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3208 /* the remoting code is broken, access the class for now */
3209 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3210 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3212 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3213 cfg->exception_ptr = klass;
3216 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3219 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3221 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3225 /* the is_null_bb target simply copies the input register to the output */
3226 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3231 MONO_START_BB (cfg, false_bb);
3233 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3234 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3236 MONO_START_BB (cfg, is_null_bb);
3238 MONO_START_BB (cfg, end_bb);
3244 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3246 /* This opcode takes as input an object reference and a class, and returns:
3247 0) if the object is an instance of the class,
3248 1) if the object is not instance of the class,
3249 2) if the object is a proxy whose type cannot be determined */
3252 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3253 int obj_reg = src->dreg;
3254 int dreg = alloc_ireg (cfg);
3256 int klass_reg = alloc_preg (cfg);
3258 NEW_BBLOCK (cfg, true_bb);
3259 NEW_BBLOCK (cfg, false_bb);
3260 NEW_BBLOCK (cfg, false2_bb);
3261 NEW_BBLOCK (cfg, end_bb);
3262 NEW_BBLOCK (cfg, no_proxy_bb);
3264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3267 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3268 NEW_BBLOCK (cfg, interface_fail_bb);
3270 tmp_reg = alloc_preg (cfg);
3271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3272 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3273 MONO_START_BB (cfg, interface_fail_bb);
3274 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3276 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3278 tmp_reg = alloc_preg (cfg);
3279 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3283 tmp_reg = alloc_preg (cfg);
3284 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3285 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3287 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3288 tmp_reg = alloc_preg (cfg);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3290 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3292 tmp_reg = alloc_preg (cfg);
3293 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3294 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3295 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3297 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3298 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3300 MONO_START_BB (cfg, no_proxy_bb);
3302 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3305 MONO_START_BB (cfg, false_bb);
3307 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3308 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3310 MONO_START_BB (cfg, false2_bb);
3312 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3313 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3315 MONO_START_BB (cfg, true_bb);
3317 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3319 MONO_START_BB (cfg, end_bb);
3322 MONO_INST_NEW (cfg, ins, OP_ICONST);
3324 ins->type = STACK_I4;
3330 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3332 /* This opcode takes as input an object reference and a class, and returns:
3333 0) if the object is an instance of the class,
3334 1) if the object is a proxy whose type cannot be determined
3335 an InvalidCastException exception is thrown otherwhise*/
3338 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3339 int obj_reg = src->dreg;
3340 int dreg = alloc_ireg (cfg);
3341 int tmp_reg = alloc_preg (cfg);
3342 int klass_reg = alloc_preg (cfg);
3344 NEW_BBLOCK (cfg, end_bb);
3345 NEW_BBLOCK (cfg, ok_result_bb);
3347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3350 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3351 NEW_BBLOCK (cfg, interface_fail_bb);
3353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3354 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3355 MONO_START_BB (cfg, interface_fail_bb);
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3358 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3360 tmp_reg = alloc_preg (cfg);
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3362 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3363 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3365 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3369 NEW_BBLOCK (cfg, no_proxy_bb);
3371 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3372 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3373 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3375 tmp_reg = alloc_preg (cfg);
3376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3377 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3379 tmp_reg = alloc_preg (cfg);
3380 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3384 NEW_BBLOCK (cfg, fail_1_bb);
3386 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3388 MONO_START_BB (cfg, fail_1_bb);
3390 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3391 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3393 MONO_START_BB (cfg, no_proxy_bb);
3395 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3398 MONO_START_BB (cfg, ok_result_bb);
3400 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3402 MONO_START_BB (cfg, end_bb);
3405 MONO_INST_NEW (cfg, ins, OP_ICONST);
3407 ins->type = STACK_I4;
3413 * Returns NULL and set the cfg exception on error.
3415 static G_GNUC_UNUSED MonoInst*
3416 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3418 gpointer *trampoline;
3419 MonoInst *obj, *method_ins, *tramp_ins;
3423 obj = handle_alloc (cfg, klass, FALSE);
3427 /* Inline the contents of mono_delegate_ctor */
3429 /* Set target field */
3430 /* Optimize away setting of NULL target */
3431 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3432 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3434 /* Set method field */
3435 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3436 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3439 * To avoid looking up the compiled code belonging to the target method
3440 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3441 * store it, and we fill it after the method has been compiled.
3443 if (!cfg->compile_aot && !method->dynamic) {
3444 MonoInst *code_slot_ins;
3447 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3449 domain = mono_domain_get ();
3450 mono_domain_lock (domain);
3451 if (!domain_jit_info (domain)->method_code_hash)
3452 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3453 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3455 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3456 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3458 mono_domain_unlock (domain);
3460 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3462 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3465 /* Set invoke_impl field */
3466 if (cfg->compile_aot) {
3467 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3469 trampoline = mono_create_delegate_trampoline (klass);
3470 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3472 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3474 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3480 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3482 MonoJitICallInfo *info;
3484 /* Need to register the icall so it gets an icall wrapper */
3485 info = mono_get_array_new_va_icall (rank);
3487 cfg->flags |= MONO_CFG_HAS_VARARGS;
3489 /* mono_array_new_va () needs a vararg calling convention */
3490 cfg->disable_llvm = TRUE;
3492 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3493 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3497 mono_emit_load_got_addr (MonoCompile *cfg)
3499 MonoInst *getaddr, *dummy_use;
3501 if (!cfg->got_var || cfg->got_var_allocated)
3504 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3505 getaddr->dreg = cfg->got_var->dreg;
3507 /* Add it to the start of the first bblock */
3508 if (cfg->bb_entry->code) {
3509 getaddr->next = cfg->bb_entry->code;
3510 cfg->bb_entry->code = getaddr;
3513 MONO_ADD_INS (cfg->bb_entry, getaddr);
3515 cfg->got_var_allocated = TRUE;
3518 * Add a dummy use to keep the got_var alive, since real uses might
3519 * only be generated by the back ends.
3520 * Add it to end_bblock, so the variable's lifetime covers the whole
3522 * It would be better to make the usage of the got var explicit in all
3523 * cases when the backend needs it (i.e. calls, throw etc.), so this
3524 * wouldn't be needed.
3526 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3527 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3530 static int inline_limit;
3531 static gboolean inline_limit_inited;
3534 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3536 MonoMethodHeader *header;
3538 #ifdef MONO_ARCH_SOFT_FLOAT
3539 MonoMethodSignature *sig = mono_method_signature (method);
3543 if (cfg->generic_sharing_context)
3546 if (cfg->inline_depth > 10)
3549 #ifdef MONO_ARCH_HAVE_LMF_OPS
3550 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3551 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3552 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3556 if (method->is_inflated)
3557 /* Avoid inflating the header */
3558 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3560 header = mono_method_get_header (method);
3562 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3563 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3564 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3565 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3566 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3567 (method->klass->marshalbyref) ||
3568 !header || header->num_clauses)
3571 /* also consider num_locals? */
3572 /* Do the size check early to avoid creating vtables */
3573 if (!inline_limit_inited) {
3574 if (getenv ("MONO_INLINELIMIT"))
3575 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3577 inline_limit = INLINE_LENGTH_LIMIT;
3578 inline_limit_inited = TRUE;
3580 if (header->code_size >= inline_limit)
3584 * if we can initialize the class of the method right away, we do,
3585 * otherwise we don't allow inlining if the class needs initialization,
3586 * since it would mean inserting a call to mono_runtime_class_init()
3587 * inside the inlined code
3589 if (!(cfg->opt & MONO_OPT_SHARED)) {
3590 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3591 if (cfg->run_cctors && method->klass->has_cctor) {
3592 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3593 if (!method->klass->runtime_info)
3594 /* No vtable created yet */
3596 vtable = mono_class_vtable (cfg->domain, method->klass);
3599 /* This makes so that inline cannot trigger */
3600 /* .cctors: too many apps depend on them */
3601 /* running with a specific order... */
3602 if (! vtable->initialized)
3604 mono_runtime_class_init (vtable);
3606 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3607 if (!method->klass->runtime_info)
3608 /* No vtable created yet */
3610 vtable = mono_class_vtable (cfg->domain, method->klass);
3613 if (!vtable->initialized)
3618 * If we're compiling for shared code
3619 * the cctor will need to be run at aot method load time, for example,
3620 * or at the end of the compilation of the inlining method.
3622 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3627 * CAS - do not inline methods with declarative security
3628 * Note: this has to be before any possible return TRUE;
3630 if (mono_method_has_declsec (method))
3633 #ifdef MONO_ARCH_SOFT_FLOAT
3635 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3637 for (i = 0; i < sig->param_count; ++i)
3638 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3646 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3648 if (vtable->initialized && !cfg->compile_aot)
3651 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3654 if (!mono_class_needs_cctor_run (vtable->klass, method))
3657 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3658 /* The initialization is already done before the method is called */
3665 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3669 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3671 mono_class_init (klass);
3672 size = mono_class_array_element_size (klass);
3674 mult_reg = alloc_preg (cfg);
3675 array_reg = arr->dreg;
3676 index_reg = index->dreg;
3678 #if SIZEOF_REGISTER == 8
3679 /* The array reg is 64 bits but the index reg is only 32 */
3680 index2_reg = alloc_preg (cfg);
3681 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3683 if (index->type == STACK_I8) {
3684 index2_reg = alloc_preg (cfg);
3685 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3687 index2_reg = index_reg;
3691 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3693 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3694 if (size == 1 || size == 2 || size == 4 || size == 8) {
3695 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3697 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3698 ins->type = STACK_PTR;
3704 add_reg = alloc_preg (cfg);
3706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3707 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3708 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3709 ins->type = STACK_PTR;
3710 MONO_ADD_INS (cfg->cbb, ins);
3715 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3717 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3719 int bounds_reg = alloc_preg (cfg);
3720 int add_reg = alloc_preg (cfg);
3721 int mult_reg = alloc_preg (cfg);
3722 int mult2_reg = alloc_preg (cfg);
3723 int low1_reg = alloc_preg (cfg);
3724 int low2_reg = alloc_preg (cfg);
3725 int high1_reg = alloc_preg (cfg);
3726 int high2_reg = alloc_preg (cfg);
3727 int realidx1_reg = alloc_preg (cfg);
3728 int realidx2_reg = alloc_preg (cfg);
3729 int sum_reg = alloc_preg (cfg);
3734 mono_class_init (klass);
3735 size = mono_class_array_element_size (klass);
3737 index1 = index_ins1->dreg;
3738 index2 = index_ins2->dreg;
3740 /* range checking */
3741 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3742 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3745 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3746 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3748 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3749 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3750 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3752 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3753 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3754 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3755 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3756 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3757 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3758 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3760 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3761 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3762 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3763 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3764 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3766 ins->type = STACK_MP;
3768 MONO_ADD_INS (cfg->cbb, ins);
3775 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3779 MonoMethod *addr_method;
3782 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3785 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3787 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3788 /* emit_ldelema_2 depends on OP_LMUL */
3789 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3790 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3794 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3795 addr_method = mono_marshal_get_array_address (rank, element_size);
3796 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3802 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3804 MonoInst *ins = NULL;
3806 static MonoClass *runtime_helpers_class = NULL;
3807 if (! runtime_helpers_class)
3808 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3809 "System.Runtime.CompilerServices", "RuntimeHelpers");
3811 if (cmethod->klass == mono_defaults.string_class) {
3812 if (strcmp (cmethod->name, "get_Chars") == 0) {
3813 int dreg = alloc_ireg (cfg);
3814 int index_reg = alloc_preg (cfg);
3815 int mult_reg = alloc_preg (cfg);
3816 int add_reg = alloc_preg (cfg);
3818 #if SIZEOF_REGISTER == 8
3819 /* The array reg is 64 bits but the index reg is only 32 */
3820 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3822 index_reg = args [1]->dreg;
3824 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3826 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3827 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3828 add_reg = ins->dreg;
3829 /* Avoid a warning */
3831 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3836 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3837 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3839 type_from_op (ins, NULL, NULL);
3841 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3842 int dreg = alloc_ireg (cfg);
3843 /* Decompose later to allow more optimizations */
3844 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3845 ins->type = STACK_I4;
3846 cfg->cbb->has_array_access = TRUE;
3847 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3850 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3851 int mult_reg = alloc_preg (cfg);
3852 int add_reg = alloc_preg (cfg);
3854 /* The corlib functions check for oob already. */
3855 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3856 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3857 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3860 } else if (cmethod->klass == mono_defaults.object_class) {
3862 if (strcmp (cmethod->name, "GetType") == 0) {
3863 int dreg = alloc_preg (cfg);
3864 int vt_reg = alloc_preg (cfg);
3865 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3866 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3867 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3868 type_from_op (ins, NULL, NULL);
3871 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3872 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3873 int dreg = alloc_ireg (cfg);
3874 int t1 = alloc_ireg (cfg);
3876 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3877 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3878 ins->type = STACK_I4;
3882 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3883 MONO_INST_NEW (cfg, ins, OP_NOP);
3884 MONO_ADD_INS (cfg->cbb, ins);
3888 } else if (cmethod->klass == mono_defaults.array_class) {
3889 if (cmethod->name [0] != 'g')
3892 if (strcmp (cmethod->name, "get_Rank") == 0) {
3893 int dreg = alloc_ireg (cfg);
3894 int vtable_reg = alloc_preg (cfg);
3895 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3896 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3897 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3898 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3899 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3900 type_from_op (ins, NULL, NULL);
3903 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3904 int dreg = alloc_ireg (cfg);
3906 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3907 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3908 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3909 type_from_op (ins, NULL, NULL);
3914 } else if (cmethod->klass == runtime_helpers_class) {
3916 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3917 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3921 } else if (cmethod->klass == mono_defaults.thread_class) {
3922 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3923 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3924 MONO_ADD_INS (cfg->cbb, ins);
3926 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3927 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3928 MONO_ADD_INS (cfg->cbb, ins);
3931 } else if (cmethod->klass == mono_defaults.monitor_class) {
3932 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3933 if (strcmp (cmethod->name, "Enter") == 0) {
3936 if (COMPILE_LLVM (cfg)) {
3938 * Pass the argument normally, the LLVM backend will handle the
3939 * calling convention problems.
3941 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
3943 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3944 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3945 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3946 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3949 return (MonoInst*)call;
3950 } else if (strcmp (cmethod->name, "Exit") == 0) {
3953 if (COMPILE_LLVM (cfg)) {
3954 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
3956 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3957 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3958 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3959 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3962 return (MonoInst*)call;
3964 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3965 MonoMethod *fast_method = NULL;
3967 /* Avoid infinite recursion */
3968 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3969 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3970 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3973 if (strcmp (cmethod->name, "Enter") == 0 ||
3974 strcmp (cmethod->name, "Exit") == 0)
3975 fast_method = mono_monitor_get_fast_path (cmethod);
3979 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3981 } else if (mini_class_is_system_array (cmethod->klass) &&
3982 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3983 MonoInst *addr, *store, *load;
3984 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3986 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3987 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3988 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3990 } else if (cmethod->klass->image == mono_defaults.corlib &&
3991 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3992 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3995 #if SIZEOF_REGISTER == 8
3996 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3997 /* 64 bit reads are already atomic */
3998 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3999 ins->dreg = mono_alloc_preg (cfg);
4000 ins->inst_basereg = args [0]->dreg;
4001 ins->inst_offset = 0;
4002 MONO_ADD_INS (cfg->cbb, ins);
4006 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4007 if (strcmp (cmethod->name, "Increment") == 0) {
4008 MonoInst *ins_iconst;
4011 if (fsig->params [0]->type == MONO_TYPE_I4)
4012 opcode = OP_ATOMIC_ADD_NEW_I4;
4013 #if SIZEOF_REGISTER == 8
4014 else if (fsig->params [0]->type == MONO_TYPE_I8)
4015 opcode = OP_ATOMIC_ADD_NEW_I8;
4018 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4019 ins_iconst->inst_c0 = 1;
4020 ins_iconst->dreg = mono_alloc_ireg (cfg);
4021 MONO_ADD_INS (cfg->cbb, ins_iconst);
4023 MONO_INST_NEW (cfg, ins, opcode);
4024 ins->dreg = mono_alloc_ireg (cfg);
4025 ins->inst_basereg = args [0]->dreg;
4026 ins->inst_offset = 0;
4027 ins->sreg2 = ins_iconst->dreg;
4028 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4029 MONO_ADD_INS (cfg->cbb, ins);
4031 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4032 MonoInst *ins_iconst;
4035 if (fsig->params [0]->type == MONO_TYPE_I4)
4036 opcode = OP_ATOMIC_ADD_NEW_I4;
4037 #if SIZEOF_REGISTER == 8
4038 else if (fsig->params [0]->type == MONO_TYPE_I8)
4039 opcode = OP_ATOMIC_ADD_NEW_I8;
4042 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4043 ins_iconst->inst_c0 = -1;
4044 ins_iconst->dreg = mono_alloc_ireg (cfg);
4045 MONO_ADD_INS (cfg->cbb, ins_iconst);
4047 MONO_INST_NEW (cfg, ins, opcode);
4048 ins->dreg = mono_alloc_ireg (cfg);
4049 ins->inst_basereg = args [0]->dreg;
4050 ins->inst_offset = 0;
4051 ins->sreg2 = ins_iconst->dreg;
4052 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4053 MONO_ADD_INS (cfg->cbb, ins);
4055 } else if (strcmp (cmethod->name, "Add") == 0) {
4058 if (fsig->params [0]->type == MONO_TYPE_I4)
4059 opcode = OP_ATOMIC_ADD_NEW_I4;
4060 #if SIZEOF_REGISTER == 8
4061 else if (fsig->params [0]->type == MONO_TYPE_I8)
4062 opcode = OP_ATOMIC_ADD_NEW_I8;
4066 MONO_INST_NEW (cfg, ins, opcode);
4067 ins->dreg = mono_alloc_ireg (cfg);
4068 ins->inst_basereg = args [0]->dreg;
4069 ins->inst_offset = 0;
4070 ins->sreg2 = args [1]->dreg;
4071 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4072 MONO_ADD_INS (cfg->cbb, ins);
4075 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4077 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4078 if (strcmp (cmethod->name, "Exchange") == 0) {
4080 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4082 if (fsig->params [0]->type == MONO_TYPE_I4)
4083 opcode = OP_ATOMIC_EXCHANGE_I4;
4084 #if SIZEOF_REGISTER == 8
4085 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4086 (fsig->params [0]->type == MONO_TYPE_I))
4087 opcode = OP_ATOMIC_EXCHANGE_I8;
4089 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4090 opcode = OP_ATOMIC_EXCHANGE_I4;
4095 MONO_INST_NEW (cfg, ins, opcode);
4096 ins->dreg = mono_alloc_ireg (cfg);
4097 ins->inst_basereg = args [0]->dreg;
4098 ins->inst_offset = 0;
4099 ins->sreg2 = args [1]->dreg;
4100 MONO_ADD_INS (cfg->cbb, ins);
4102 switch (fsig->params [0]->type) {
4104 ins->type = STACK_I4;
4108 ins->type = STACK_I8;
4110 case MONO_TYPE_OBJECT:
4111 ins->type = STACK_OBJ;
4114 g_assert_not_reached ();
4117 #if HAVE_WRITE_BARRIERS
4119 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4120 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4124 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4126 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4127 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4129 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4130 if (fsig->params [1]->type == MONO_TYPE_I4)
4132 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4133 size = sizeof (gpointer);
4134 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4137 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4138 ins->dreg = alloc_ireg (cfg);
4139 ins->sreg1 = args [0]->dreg;
4140 ins->sreg2 = args [1]->dreg;
4141 ins->sreg3 = args [2]->dreg;
4142 ins->type = STACK_I4;
4143 MONO_ADD_INS (cfg->cbb, ins);
4144 } else if (size == 8) {
4145 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4146 ins->dreg = alloc_ireg (cfg);
4147 ins->sreg1 = args [0]->dreg;
4148 ins->sreg2 = args [1]->dreg;
4149 ins->sreg3 = args [2]->dreg;
4150 ins->type = STACK_I8;
4151 MONO_ADD_INS (cfg->cbb, ins);
4153 /* g_assert_not_reached (); */
4155 #if HAVE_WRITE_BARRIERS
4157 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4158 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4162 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4166 } else if (cmethod->klass->image == mono_defaults.corlib) {
4167 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4168 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4169 MONO_INST_NEW (cfg, ins, OP_BREAK);
4170 MONO_ADD_INS (cfg->cbb, ins);
4173 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4174 && strcmp (cmethod->klass->name, "Environment") == 0) {
4176 EMIT_NEW_ICONST (cfg, ins, 1);
4178 EMIT_NEW_ICONST (cfg, ins, 0);
4182 } else if (cmethod->klass == mono_defaults.math_class) {
4184 * There is general branches code for Min/Max, but it does not work for
4186 * http://everything2.com/?node_id=1051618
4190 #ifdef MONO_ARCH_SIMD_INTRINSICS
4191 if (cfg->opt & MONO_OPT_SIMD) {
4192 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4198 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4202 * This entry point could be used later for arbitrary method
4205 inline static MonoInst*
4206 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4207 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4209 if (method->klass == mono_defaults.string_class) {
4210 /* managed string allocation support */
4211 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4212 MonoInst *iargs [2];
4213 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4214 MonoMethod *managed_alloc = NULL;
4216 g_assert (vtable); /*Should not fail since it System.String*/
4217 #ifndef MONO_CROSS_COMPILE
4218 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4222 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4223 iargs [1] = args [0];
4224 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4231 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4233 MonoInst *store, *temp;
4236 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4237 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4240 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4241 * would be different than the MonoInst's used to represent arguments, and
4242 * the ldelema implementation can't deal with that.
4243 * Solution: When ldelema is used on an inline argument, create a var for
4244 * it, emit ldelema on that var, and emit the saving code below in
4245 * inline_method () if needed.
4247 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4248 cfg->args [i] = temp;
4249 /* This uses cfg->args [i] which is set by the preceeding line */
4250 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4251 store->cil_code = sp [0]->cil_code;
4256 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4257 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4259 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4261 check_inline_called_method_name_limit (MonoMethod *called_method)
4264 static char *limit = NULL;
4266 if (limit == NULL) {
4267 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4269 if (limit_string != NULL)
4270 limit = limit_string;
4272 limit = (char *) "";
4275 if (limit [0] != '\0') {
4276 char *called_method_name = mono_method_full_name (called_method, TRUE);
4278 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4279 g_free (called_method_name);
4281 //return (strncmp_result <= 0);
4282 return (strncmp_result == 0);
4289 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4291 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4294 static char *limit = NULL;
4296 if (limit == NULL) {
4297 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4298 if (limit_string != NULL) {
4299 limit = limit_string;
4301 limit = (char *) "";
4305 if (limit [0] != '\0') {
4306 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4308 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4309 g_free (caller_method_name);
4311 //return (strncmp_result <= 0);
4312 return (strncmp_result == 0);
4320 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4321 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4323 MonoInst *ins, *rvar = NULL;
4324 MonoMethodHeader *cheader;
4325 MonoBasicBlock *ebblock, *sbblock;
4327 MonoMethod *prev_inlined_method;
4328 MonoInst **prev_locals, **prev_args;
4329 MonoType **prev_arg_types;
4330 guint prev_real_offset;
4331 GHashTable *prev_cbb_hash;
4332 MonoBasicBlock **prev_cil_offset_to_bb;
4333 MonoBasicBlock *prev_cbb;
4334 unsigned char* prev_cil_start;
4335 guint32 prev_cil_offset_to_bb_len;
4336 MonoMethod *prev_current_method;
4337 MonoGenericContext *prev_generic_context;
4338 gboolean ret_var_set, prev_ret_var_set;
4340 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4342 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4343 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4346 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4347 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4351 if (cfg->verbose_level > 2)
4352 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4354 if (!cmethod->inline_info) {
4355 mono_jit_stats.inlineable_methods++;
4356 cmethod->inline_info = 1;
4358 /* allocate space to store the return value */
4359 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4360 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4363 /* allocate local variables */
4364 cheader = mono_method_get_header (cmethod);
4365 prev_locals = cfg->locals;
4366 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4367 for (i = 0; i < cheader->num_locals; ++i)
4368 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4370 /* allocate start and end blocks */
4371 /* This is needed so if the inline is aborted, we can clean up */
4372 NEW_BBLOCK (cfg, sbblock);
4373 sbblock->real_offset = real_offset;
4375 NEW_BBLOCK (cfg, ebblock);
4376 ebblock->block_num = cfg->num_bblocks++;
4377 ebblock->real_offset = real_offset;
4379 prev_args = cfg->args;
4380 prev_arg_types = cfg->arg_types;
4381 prev_inlined_method = cfg->inlined_method;
4382 cfg->inlined_method = cmethod;
4383 cfg->ret_var_set = FALSE;
4384 cfg->inline_depth ++;
4385 prev_real_offset = cfg->real_offset;
4386 prev_cbb_hash = cfg->cbb_hash;
4387 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4388 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4389 prev_cil_start = cfg->cil_start;
4390 prev_cbb = cfg->cbb;
4391 prev_current_method = cfg->current_method;
4392 prev_generic_context = cfg->generic_context;
4393 prev_ret_var_set = cfg->ret_var_set;
4395 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4397 ret_var_set = cfg->ret_var_set;
4399 cfg->inlined_method = prev_inlined_method;
4400 cfg->real_offset = prev_real_offset;
4401 cfg->cbb_hash = prev_cbb_hash;
4402 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4403 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4404 cfg->cil_start = prev_cil_start;
4405 cfg->locals = prev_locals;
4406 cfg->args = prev_args;
4407 cfg->arg_types = prev_arg_types;
4408 cfg->current_method = prev_current_method;
4409 cfg->generic_context = prev_generic_context;
4410 cfg->ret_var_set = prev_ret_var_set;
4411 cfg->inline_depth --;
4413 if ((costs >= 0 && costs < 60) || inline_allways) {
4414 if (cfg->verbose_level > 2)
4415 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4417 mono_jit_stats.inlined_methods++;
4419 /* always add some code to avoid block split failures */
4420 MONO_INST_NEW (cfg, ins, OP_NOP);
4421 MONO_ADD_INS (prev_cbb, ins);
4423 prev_cbb->next_bb = sbblock;
4424 link_bblock (cfg, prev_cbb, sbblock);
4427 * Get rid of the begin and end bblocks if possible to aid local
4430 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4432 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4433 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4435 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4436 MonoBasicBlock *prev = ebblock->in_bb [0];
4437 mono_merge_basic_blocks (cfg, prev, ebblock);
4439 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4440 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4441 cfg->cbb = prev_cbb;
4449 * If the inlined method contains only a throw, then the ret var is not
4450 * set, so set it to a dummy value.
4453 static double r8_0 = 0.0;
4455 switch (rvar->type) {
4457 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4460 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4465 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4468 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4469 ins->type = STACK_R8;
4470 ins->inst_p0 = (void*)&r8_0;
4471 ins->dreg = rvar->dreg;
4472 MONO_ADD_INS (cfg->cbb, ins);
4475 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4478 g_assert_not_reached ();
4482 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4487 if (cfg->verbose_level > 2)
4488 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4489 cfg->exception_type = MONO_EXCEPTION_NONE;
4490 mono_loader_clear_error ();
4492 /* This gets rid of the newly added bblocks */
4493 cfg->cbb = prev_cbb;
4499 * Some of these comments may well be out-of-date.
4500 * Design decisions: we do a single pass over the IL code (and we do bblock
4501 * splitting/merging in the few cases when it's required: a back jump to an IL
4502 * address that was not already seen as bblock starting point).
4503 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4504 * Complex operations are decomposed in simpler ones right away. We need to let the
4505 * arch-specific code peek and poke inside this process somehow (except when the
4506 * optimizations can take advantage of the full semantic info of coarse opcodes).
4507 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4508 * MonoInst->opcode initially is the IL opcode or some simplification of that
4509 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4510 * opcode with value bigger than OP_LAST.
4511 * At this point the IR can be handed over to an interpreter, a dumb code generator
4512 * or to the optimizing code generator that will translate it to SSA form.
4514 * Profiling directed optimizations.
4515 * We may compile by default with few or no optimizations and instrument the code
4516 * or the user may indicate what methods to optimize the most either in a config file
4517 * or through repeated runs where the compiler applies offline the optimizations to
4518 * each method and then decides if it was worth it.
4521 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4522 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4523 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4524 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4525 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4526 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4527 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4528 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4530 /* offset from br.s -> br like opcodes */
4531 #define BIG_BRANCH_OFFSET 13
4534 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4536 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4538 return b == NULL || b == bb;
4542 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4544 unsigned char *ip = start;
4545 unsigned char *target;
4548 MonoBasicBlock *bblock;
4549 const MonoOpcode *opcode;
4552 cli_addr = ip - start;
4553 i = mono_opcode_value ((const guint8 **)&ip, end);
4556 opcode = &mono_opcodes [i];
4557 switch (opcode->argument) {
4558 case MonoInlineNone:
4561 case MonoInlineString:
4562 case MonoInlineType:
4563 case MonoInlineField:
4564 case MonoInlineMethod:
4567 case MonoShortInlineR:
4574 case MonoShortInlineVar:
4575 case MonoShortInlineI:
4578 case MonoShortInlineBrTarget:
4579 target = start + cli_addr + 2 + (signed char)ip [1];
4580 GET_BBLOCK (cfg, bblock, target);
4583 GET_BBLOCK (cfg, bblock, ip);
4585 case MonoInlineBrTarget:
4586 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4587 GET_BBLOCK (cfg, bblock, target);
4590 GET_BBLOCK (cfg, bblock, ip);
4592 case MonoInlineSwitch: {
4593 guint32 n = read32 (ip + 1);
4596 cli_addr += 5 + 4 * n;
4597 target = start + cli_addr;
4598 GET_BBLOCK (cfg, bblock, target);
4600 for (j = 0; j < n; ++j) {
4601 target = start + cli_addr + (gint32)read32 (ip);
4602 GET_BBLOCK (cfg, bblock, target);
4612 g_assert_not_reached ();
4615 if (i == CEE_THROW) {
4616 unsigned char *bb_start = ip - 1;
4618 /* Find the start of the bblock containing the throw */
4620 while ((bb_start >= start) && !bblock) {
4621 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4625 bblock->out_of_line = 1;
4634 static inline MonoMethod *
4635 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4639 if (m->wrapper_type != MONO_WRAPPER_NONE)
4640 return mono_method_get_wrapper_data (m, token);
4642 method = mono_get_method_full (m->klass->image, token, klass, context);
4647 static inline MonoMethod *
4648 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4650 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4652 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4658 static inline MonoClass*
4659 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4663 if (method->wrapper_type != MONO_WRAPPER_NONE)
4664 klass = mono_method_get_wrapper_data (method, token);
4666 klass = mono_class_get_full (method->klass->image, token, context);
4668 mono_class_init (klass);
4673 * Returns TRUE if the JIT should abort inlining because "callee"
4674 * is influenced by security attributes.
4677 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4681 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4685 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4686 if (result == MONO_JIT_SECURITY_OK)
4689 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4690 /* Generate code to throw a SecurityException before the actual call/link */
4691 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4694 NEW_ICONST (cfg, args [0], 4);
4695 NEW_METHODCONST (cfg, args [1], caller);
4696 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4697 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4698 /* don't hide previous results */
4699 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4700 cfg->exception_data = result;
4708 throw_exception (void)
4710 static MonoMethod *method = NULL;
4713 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4714 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4721 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4723 MonoMethod *thrower = throw_exception ();
4726 EMIT_NEW_PCONST (cfg, args [0], ex);
4727 mono_emit_method_call (cfg, thrower, args, NULL);
4731 * Return the original method is a wrapper is specified. We can only access
4732 * the custom attributes from the original method.
4735 get_original_method (MonoMethod *method)
4737 if (method->wrapper_type == MONO_WRAPPER_NONE)
4740 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4741 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4744 /* in other cases we need to find the original method */
4745 return mono_marshal_method_from_wrapper (method);
4749 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4750 MonoBasicBlock *bblock, unsigned char *ip)
4752 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4753 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4756 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4757 caller = get_original_method (caller);
4761 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4762 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4763 emit_throw_exception (cfg, mono_get_exception_field_access ());
4767 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4768 MonoBasicBlock *bblock, unsigned char *ip)
4770 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4771 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4774 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4775 caller = get_original_method (caller);
4779 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4780 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4781 emit_throw_exception (cfg, mono_get_exception_method_access ());
4785 * Check that the IL instructions at ip are the array initialization
4786 * sequence and return the pointer to the data and the size.
4789 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4792 * newarr[System.Int32]
4794 * ldtoken field valuetype ...
4795 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4797 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4798 guint32 token = read32 (ip + 7);
4799 guint32 field_token = read32 (ip + 2);
4800 guint32 field_index = field_token & 0xffffff;
4802 const char *data_ptr;
4804 MonoMethod *cmethod;
4805 MonoClass *dummy_class;
4806 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4812 *out_field_token = field_token;
4814 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4817 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4819 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4820 case MONO_TYPE_BOOLEAN:
4824 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4825 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4826 case MONO_TYPE_CHAR:
4836 return NULL; /* stupid ARM FP swapped format */
4846 if (size > mono_type_size (field->type, &dummy_align))
4849 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4850 if (!method->klass->image->dynamic) {
4851 field_index = read32 (ip + 2) & 0xffffff;
4852 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4853 data_ptr = mono_image_rva_map (method->klass->image, rva);
4854 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4855 /* for aot code we do the lookup on load */
4856 if (aot && data_ptr)
4857 return GUINT_TO_POINTER (rva);
4859 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4861 data_ptr = mono_field_get_data (field);
4869 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4871 char *method_fname = mono_method_full_name (method, TRUE);
4874 if (mono_method_get_header (method)->code_size == 0)
4875 method_code = g_strdup ("method body is empty.");
4877 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4878 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4879 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4880 g_free (method_fname);
4881 g_free (method_code);
4885 set_exception_object (MonoCompile *cfg, MonoException *exception)
4887 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4888 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4889 cfg->exception_ptr = exception;
4893 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4897 if (cfg->generic_sharing_context)
4898 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4900 type = &klass->byval_arg;
4901 return MONO_TYPE_IS_REFERENCE (type);
4905 * mono_decompose_array_access_opts:
4907 * Decompose array access opcodes.
4908 * This should be in decompose.c, but it emits calls so it has to stay here until
4909 * the old JIT is gone.
4912 mono_decompose_array_access_opts (MonoCompile *cfg)
4914 MonoBasicBlock *bb, *first_bb;
4917 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4918 * can be executed anytime. It should be run before decompose_long
4922 * Create a dummy bblock and emit code into it so we can use the normal
4923 * code generation macros.
4925 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4926 first_bb = cfg->cbb;
4928 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4930 MonoInst *prev = NULL;
4932 MonoInst *iargs [3];
4935 if (!bb->has_array_access)
4938 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4940 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4946 for (ins = bb->code; ins; ins = ins->next) {
4947 switch (ins->opcode) {
4949 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
4950 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4951 G_STRUCT_OFFSET (MonoArray, max_length));
4952 MONO_ADD_INS (cfg->cbb, dest);
4954 case OP_BOUNDS_CHECK:
4955 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1); \
4956 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4959 if (cfg->opt & MONO_OPT_SHARED) {
4960 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4961 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4962 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4963 iargs [2]->dreg = ins->sreg1;
4965 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4966 dest->dreg = ins->dreg;
4968 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4969 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (vtable, 1);
4971 g_assert (vtable); /*This shall not fail since we check for this condition on OP_NEWARR creation*/
4972 NEW_VTABLECONST (cfg, iargs [0], vtable);
4973 MONO_ADD_INS (cfg->cbb, iargs [0]);
4974 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4975 iargs [1]->dreg = ins->sreg1;
4978 dest = mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4980 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4981 dest->dreg = ins->dreg;
4985 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
4986 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4987 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4988 MONO_ADD_INS (cfg->cbb, dest);
4994 g_assert (cfg->cbb == first_bb);
4996 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4997 /* Replace the original instruction with the new code sequence */
4999 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5000 first_bb->code = first_bb->last_ins = NULL;
5001 first_bb->in_count = first_bb->out_count = 0;
5002 cfg->cbb = first_bb;
5009 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
5019 #ifdef MONO_ARCH_SOFT_FLOAT
5022 * mono_decompose_soft_float:
5024 * Soft float support on ARM. We store each double value in a pair of integer vregs,
5025 * similar to long support on 32 bit platforms. 32 bit float values require special
5026 * handling when used as locals, arguments, and in calls.
5027 * One big problem with soft-float is that there are few r4 test cases in our test suite.
5030 mono_decompose_soft_float (MonoCompile *cfg)
5032 MonoBasicBlock *bb, *first_bb;
5035 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
5039 * Create a dummy bblock and emit code into it so we can use the normal
5040 * code generation macros.
5042 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
5043 first_bb = cfg->cbb;
5045 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5047 MonoInst *prev = NULL;
5050 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
5052 cfg->cbb->code = cfg->cbb->last_ins = NULL;
5058 for (ins = bb->code; ins; ins = ins->next) {
5059 const char *spec = INS_INFO (ins->opcode);
5061 /* Most fp operations are handled automatically by opcode emulation */
5063 switch (ins->opcode) {
5066 d.vald = *(double*)ins->inst_p0;
5067 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5072 /* We load the r8 value */
5073 d.vald = *(float*)ins->inst_p0;
5074 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5078 ins->opcode = OP_LMOVE;
5081 ins->opcode = OP_MOVE;
5082 ins->sreg1 = ins->sreg1 + 1;
5085 ins->opcode = OP_MOVE;
5086 ins->sreg1 = ins->sreg1 + 2;
5089 int reg = ins->sreg1;
5091 ins->opcode = OP_SETLRET;
5093 ins->sreg1 = reg + 1;
5094 ins->sreg2 = reg + 2;
5097 case OP_LOADR8_MEMBASE:
5098 ins->opcode = OP_LOADI8_MEMBASE;
5100 case OP_STORER8_MEMBASE_REG:
5101 ins->opcode = OP_STOREI8_MEMBASE_REG;
5103 case OP_STORER4_MEMBASE_REG: {
5104 MonoInst *iargs [2];
5107 /* Arg 1 is the double value */
5108 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5109 iargs [0]->dreg = ins->sreg1;
5111 /* Arg 2 is the address to store to */
5112 addr_reg = mono_alloc_preg (cfg);
5113 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5114 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5118 case OP_LOADR4_MEMBASE: {
5119 MonoInst *iargs [1];
5123 addr_reg = mono_alloc_preg (cfg);
5124 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5125 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5126 conv->dreg = ins->dreg;
5131 case OP_FCALL_MEMBASE: {
5132 MonoCallInst *call = (MonoCallInst*)ins;
5133 if (call->signature->ret->type == MONO_TYPE_R4) {
5134 MonoCallInst *call2;
5135 MonoInst *iargs [1];
5138 /* Convert the call into a call returning an int */
5139 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5140 memcpy (call2, call, sizeof (MonoCallInst));
5141 switch (ins->opcode) {
5143 call2->inst.opcode = OP_CALL;
5146 call2->inst.opcode = OP_CALL_REG;
5148 case OP_FCALL_MEMBASE:
5149 call2->inst.opcode = OP_CALL_MEMBASE;
5152 g_assert_not_reached ();
5154 call2->inst.dreg = mono_alloc_ireg (cfg);
5155 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5157 /* FIXME: Optimize this */
5159 /* Emit an r4->r8 conversion */
5160 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5161 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5162 conv->dreg = ins->dreg;
5164 /* The call sequence might include fp ins */
5167 switch (ins->opcode) {
5169 ins->opcode = OP_LCALL;
5172 ins->opcode = OP_LCALL_REG;
5174 case OP_FCALL_MEMBASE:
5175 ins->opcode = OP_LCALL_MEMBASE;
5178 g_assert_not_reached ();
5184 MonoJitICallInfo *info;
5185 MonoInst *iargs [2];
5186 MonoInst *call, *cmp, *br;
5188 /* Convert fcompare+fbcc to icall+icompare+beq */
5190 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5193 /* Create dummy MonoInst's for the arguments */
5194 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5195 iargs [0]->dreg = ins->sreg1;
5196 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5197 iargs [1]->dreg = ins->sreg2;
5199 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5201 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5202 cmp->sreg1 = call->dreg;
5204 MONO_ADD_INS (cfg->cbb, cmp);
5206 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5207 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5208 br->inst_true_bb = ins->next->inst_true_bb;
5209 br->inst_false_bb = ins->next->inst_false_bb;
5210 MONO_ADD_INS (cfg->cbb, br);
5212 /* The call sequence might include fp ins */
5215 /* Skip fbcc or fccc */
5216 NULLIFY_INS (ins->next);
5224 MonoJitICallInfo *info;
5225 MonoInst *iargs [2];
5228 /* Convert fccc to icall+icompare+iceq */
5230 info = mono_find_jit_opcode_emulation (ins->opcode);
5233 /* Create dummy MonoInst's for the arguments */
5234 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5235 iargs [0]->dreg = ins->sreg1;
5236 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5237 iargs [1]->dreg = ins->sreg2;
5239 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5242 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5244 /* The call sequence might include fp ins */
5249 MonoInst *iargs [2];
5250 MonoInst *call, *cmp;
5252 /* Convert to icall+icompare+cond_exc+move */
5254 /* Create dummy MonoInst's for the arguments */
5255 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5256 iargs [0]->dreg = ins->sreg1;
5258 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5260 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5261 cmp->sreg1 = call->dreg;
5263 MONO_ADD_INS (cfg->cbb, cmp);
5265 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5267 /* Do the assignment if the value is finite */
5268 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5274 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5275 mono_print_ins (ins);
5276 g_assert_not_reached ();
5281 g_assert (cfg->cbb == first_bb);
5283 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5284 /* Replace the original instruction with the new code sequence */
5286 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5287 first_bb->code = first_bb->last_ins = NULL;
5288 first_bb->in_count = first_bb->out_count = 0;
5289 cfg->cbb = first_bb;
5296 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5299 mono_decompose_long_opts (cfg);
5305 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5308 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5309 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5310 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5311 /* Optimize reg-reg moves away */
5313 * Can't optimize other opcodes, since sp[0] might point to
5314 * the last ins of a decomposed opcode.
5316 sp [0]->dreg = (cfg)->locals [n]->dreg;
5318 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5323 * ldloca inhibits many optimizations so try to get rid of it in common
5326 static inline unsigned char *
5327 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5336 local = read16 (ip + 2);
5340 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5341 gboolean skip = FALSE;
5343 /* From the INITOBJ case */
5344 token = read32 (ip + 2);
5345 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5346 CHECK_TYPELOAD (klass);
5347 if (generic_class_is_reference_type (cfg, klass)) {
5348 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5349 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5350 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5351 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5352 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5365 is_exception_class (MonoClass *class)
5368 if (class == mono_defaults.exception_class)
5370 class = class->parent;
5376 * mono_method_to_ir:
5378 * Translate the .net IL into linear IR.
5381 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5382 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5383 guint inline_offset, gboolean is_virtual_call)
5385 MonoInst *ins, **sp, **stack_start;
5386 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5387 MonoMethod *cmethod, *method_definition;
5388 MonoInst **arg_array;
5389 MonoMethodHeader *header;
5391 guint32 token, ins_flag;
5393 MonoClass *constrained_call = NULL;
5394 unsigned char *ip, *end, *target, *err_pos;
5395 static double r8_0 = 0.0;
5396 MonoMethodSignature *sig;
5397 MonoGenericContext *generic_context = NULL;
5398 MonoGenericContainer *generic_container = NULL;
5399 MonoType **param_types;
5400 int i, n, start_new_bblock, dreg;
5401 int num_calls = 0, inline_costs = 0;
5402 int breakpoint_id = 0;
5404 MonoBoolean security, pinvoke;
5405 MonoSecurityManager* secman = NULL;
5406 MonoDeclSecurityActions actions;
5407 GSList *class_inits = NULL;
5408 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5410 gboolean init_locals, seq_points;
5412 /* serialization and xdomain stuff may need access to private fields and methods */
5413 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5414 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5415 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5416 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5417 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5418 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5420 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5422 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5423 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5424 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5425 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5427 image = method->klass->image;
5428 header = mono_method_get_header (method);
5429 generic_container = mono_method_get_generic_container (method);
5430 sig = mono_method_signature (method);
5431 num_args = sig->hasthis + sig->param_count;
5432 ip = (unsigned char*)header->code;
5433 cfg->cil_start = ip;
5434 end = ip + header->code_size;
5435 mono_jit_stats.cil_code_size += header->code_size;
5436 init_locals = header->init_locals;
5438 seq_points = cfg->gen_seq_points && cfg->method == method;
5441 * Methods without init_locals set could cause asserts in various passes
5446 method_definition = method;
5447 while (method_definition->is_inflated) {
5448 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5449 method_definition = imethod->declaring;
5452 /* SkipVerification is not allowed if core-clr is enabled */
5453 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5455 dont_verify_stloc = TRUE;
5458 if (!dont_verify && mini_method_verify (cfg, method_definition))
5459 goto exception_exit;
5461 if (mono_debug_using_mono_debugger ())
5462 cfg->keep_cil_nops = TRUE;
5464 if (sig->is_inflated)
5465 generic_context = mono_method_get_context (method);
5466 else if (generic_container)
5467 generic_context = &generic_container->context;
5468 cfg->generic_context = generic_context;
5470 if (!cfg->generic_sharing_context)
5471 g_assert (!sig->has_type_parameters);
5473 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5474 g_assert (method->is_inflated);
5475 g_assert (mono_method_get_context (method)->method_inst);
5477 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5478 g_assert (sig->generic_param_count);
5480 if (cfg->method == method) {
5481 cfg->real_offset = 0;
5483 cfg->real_offset = inline_offset;
5486 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5487 cfg->cil_offset_to_bb_len = header->code_size;
5489 cfg->current_method = method;
5491 if (cfg->verbose_level > 2)
5492 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5494 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5496 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5497 for (n = 0; n < sig->param_count; ++n)
5498 param_types [n + sig->hasthis] = sig->params [n];
5499 cfg->arg_types = param_types;
5501 dont_inline = g_list_prepend (dont_inline, method);
5502 if (cfg->method == method) {
5504 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5505 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5508 NEW_BBLOCK (cfg, start_bblock);
5509 cfg->bb_entry = start_bblock;
5510 start_bblock->cil_code = NULL;
5511 start_bblock->cil_length = 0;
5514 NEW_BBLOCK (cfg, end_bblock);
5515 cfg->bb_exit = end_bblock;
5516 end_bblock->cil_code = NULL;
5517 end_bblock->cil_length = 0;
5518 g_assert (cfg->num_bblocks == 2);
5520 arg_array = cfg->args;
5522 if (header->num_clauses) {
5523 cfg->spvars = g_hash_table_new (NULL, NULL);
5524 cfg->exvars = g_hash_table_new (NULL, NULL);
5526 /* handle exception clauses */
5527 for (i = 0; i < header->num_clauses; ++i) {
5528 MonoBasicBlock *try_bb;
5529 MonoExceptionClause *clause = &header->clauses [i];
5530 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5531 try_bb->real_offset = clause->try_offset;
5532 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5533 tblock->real_offset = clause->handler_offset;
5534 tblock->flags |= BB_EXCEPTION_HANDLER;
5536 link_bblock (cfg, try_bb, tblock);
5538 if (*(ip + clause->handler_offset) == CEE_POP)
5539 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5541 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5542 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5543 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5544 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5545 MONO_ADD_INS (tblock, ins);
5547 /* todo: is a fault block unsafe to optimize? */
5548 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5549 tblock->flags |= BB_EXCEPTION_UNSAFE;
5553 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5555 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5557 /* catch and filter blocks get the exception object on the stack */
5558 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5559 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5560 MonoInst *dummy_use;
5562 /* mostly like handle_stack_args (), but just sets the input args */
5563 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5564 tblock->in_scount = 1;
5565 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5566 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5569 * Add a dummy use for the exvar so its liveness info will be
5573 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5575 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5576 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5577 tblock->flags |= BB_EXCEPTION_HANDLER;
5578 tblock->real_offset = clause->data.filter_offset;
5579 tblock->in_scount = 1;
5580 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5581 /* The filter block shares the exvar with the handler block */
5582 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5583 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5584 MONO_ADD_INS (tblock, ins);
5588 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5589 clause->data.catch_class &&
5590 cfg->generic_sharing_context &&
5591 mono_class_check_context_used (clause->data.catch_class)) {
5593 * In shared generic code with catch
5594 * clauses containing type variables
5595 * the exception handling code has to
5596 * be able to get to the rgctx.
5597 * Therefore we have to make sure that
5598 * the vtable/mrgctx argument (for
5599 * static or generic methods) or the
5600 * "this" argument (for non-static
5601 * methods) are live.
5603 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5604 mini_method_get_context (method)->method_inst ||
5605 method->klass->valuetype) {
5606 mono_get_vtable_var (cfg);
5608 MonoInst *dummy_use;
5610 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5615 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5616 cfg->cbb = start_bblock;
5617 cfg->args = arg_array;
5618 mono_save_args (cfg, sig, inline_args);
5621 /* FIRST CODE BLOCK */
5622 NEW_BBLOCK (cfg, bblock);
5623 bblock->cil_code = ip;
5627 ADD_BBLOCK (cfg, bblock);
5629 if (cfg->method == method) {
5630 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5631 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5632 MONO_INST_NEW (cfg, ins, OP_BREAK);
5633 MONO_ADD_INS (bblock, ins);
5637 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5638 secman = mono_security_manager_get_methods ();
5640 security = (secman && mono_method_has_declsec (method));
5641 /* at this point having security doesn't mean we have any code to generate */
5642 if (security && (cfg->method == method)) {
5643 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5644 * And we do not want to enter the next section (with allocation) if we
5645 * have nothing to generate */
5646 security = mono_declsec_get_demands (method, &actions);
5649 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5650 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5652 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5653 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5654 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5656 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5657 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5661 mono_custom_attrs_free (custom);
5664 custom = mono_custom_attrs_from_class (wrapped->klass);
5665 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5669 mono_custom_attrs_free (custom);
5672 /* not a P/Invoke after all */
5677 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5678 /* we use a separate basic block for the initialization code */
5679 NEW_BBLOCK (cfg, init_localsbb);
5680 cfg->bb_init = init_localsbb;
5681 init_localsbb->real_offset = cfg->real_offset;
5682 start_bblock->next_bb = init_localsbb;
5683 init_localsbb->next_bb = bblock;
5684 link_bblock (cfg, start_bblock, init_localsbb);
5685 link_bblock (cfg, init_localsbb, bblock);
5687 cfg->cbb = init_localsbb;
5689 start_bblock->next_bb = bblock;
5690 link_bblock (cfg, start_bblock, bblock);
5693 /* at this point we know, if security is TRUE, that some code needs to be generated */
5694 if (security && (cfg->method == method)) {
5697 mono_jit_stats.cas_demand_generation++;
5699 if (actions.demand.blob) {
5700 /* Add code for SecurityAction.Demand */
5701 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5702 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5703 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5704 mono_emit_method_call (cfg, secman->demand, args, NULL);
5706 if (actions.noncasdemand.blob) {
5707 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5708 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5709 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5710 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5711 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5712 mono_emit_method_call (cfg, secman->demand, args, NULL);
5714 if (actions.demandchoice.blob) {
5715 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5716 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5717 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5718 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5719 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5723 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5725 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5728 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5729 /* check if this is native code, e.g. an icall or a p/invoke */
5730 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5731 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5733 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5734 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5736 /* if this ia a native call then it can only be JITted from platform code */
5737 if ((icall || pinvk) && method->klass && method->klass->image) {
5738 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5739 MonoException *ex = icall ? mono_get_exception_security () :
5740 mono_get_exception_method_access ();
5741 emit_throw_exception (cfg, ex);
5748 if (header->code_size == 0)
5751 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5756 if (cfg->method == method)
5757 mono_debug_init_method (cfg, bblock, breakpoint_id);
5759 for (n = 0; n < header->num_locals; ++n) {
5760 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5765 /* We force the vtable variable here for all shared methods
5766 for the possibility that they might show up in a stack
5767 trace where their exact instantiation is needed. */
5768 if (cfg->generic_sharing_context && method == cfg->method) {
5769 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5770 mini_method_get_context (method)->method_inst ||
5771 method->klass->valuetype) {
5772 mono_get_vtable_var (cfg);
5774 /* FIXME: Is there a better way to do this?
5775 We need the variable live for the duration
5776 of the whole method. */
5777 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5781 /* add a check for this != NULL to inlined methods */
5782 if (is_virtual_call) {
5785 NEW_ARGLOAD (cfg, arg_ins, 0);
5786 MONO_ADD_INS (cfg->cbb, arg_ins);
5787 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5790 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5791 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5794 start_new_bblock = 0;
5798 if (cfg->method == method)
5799 cfg->real_offset = ip - header->code;
5801 cfg->real_offset = inline_offset;
5806 if (start_new_bblock) {
5807 bblock->cil_length = ip - bblock->cil_code;
5808 if (start_new_bblock == 2) {
5809 g_assert (ip == tblock->cil_code);
5811 GET_BBLOCK (cfg, tblock, ip);
5813 bblock->next_bb = tblock;
5816 start_new_bblock = 0;
5817 for (i = 0; i < bblock->in_scount; ++i) {
5818 if (cfg->verbose_level > 3)
5819 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5820 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5824 g_slist_free (class_inits);
5827 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5828 link_bblock (cfg, bblock, tblock);
5829 if (sp != stack_start) {
5830 handle_stack_args (cfg, stack_start, sp - stack_start);
5832 CHECK_UNVERIFIABLE (cfg);
5834 bblock->next_bb = tblock;
5837 for (i = 0; i < bblock->in_scount; ++i) {
5838 if (cfg->verbose_level > 3)
5839 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5840 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5843 g_slist_free (class_inits);
5849 * Sequence points are points where the debugger can place a breakpoint.
5850 * Currently, we generate these automatically at points where the IL
5853 if (seq_points && sp == stack_start) {
5854 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5855 MONO_ADD_INS (cfg->cbb, ins);
5858 bblock->real_offset = cfg->real_offset;
5860 if ((cfg->method == method) && cfg->coverage_info) {
5861 guint32 cil_offset = ip - header->code;
5862 cfg->coverage_info->data [cil_offset].cil_code = ip;
5864 /* TODO: Use an increment here */
5865 #if defined(TARGET_X86)
5866 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5867 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5869 MONO_ADD_INS (cfg->cbb, ins);
5871 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5872 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5876 if (cfg->verbose_level > 3)
5877 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5881 if (cfg->keep_cil_nops)
5882 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5884 MONO_INST_NEW (cfg, ins, OP_NOP);
5886 MONO_ADD_INS (bblock, ins);
5889 MONO_INST_NEW (cfg, ins, OP_BREAK);
5891 MONO_ADD_INS (bblock, ins);
5897 CHECK_STACK_OVF (1);
5898 n = (*ip)-CEE_LDARG_0;
5900 EMIT_NEW_ARGLOAD (cfg, ins, n);
5908 CHECK_STACK_OVF (1);
5909 n = (*ip)-CEE_LDLOC_0;
5911 EMIT_NEW_LOCLOAD (cfg, ins, n);
5920 n = (*ip)-CEE_STLOC_0;
5923 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5925 emit_stloc_ir (cfg, sp, header, n);
5932 CHECK_STACK_OVF (1);
5935 EMIT_NEW_ARGLOAD (cfg, ins, n);
5941 CHECK_STACK_OVF (1);
5944 NEW_ARGLOADA (cfg, ins, n);
5945 MONO_ADD_INS (cfg->cbb, ins);
5955 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5957 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5962 CHECK_STACK_OVF (1);
5965 EMIT_NEW_LOCLOAD (cfg, ins, n);
5969 case CEE_LDLOCA_S: {
5970 unsigned char *tmp_ip;
5972 CHECK_STACK_OVF (1);
5973 CHECK_LOCAL (ip [1]);
5975 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5981 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5990 CHECK_LOCAL (ip [1]);
5991 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5993 emit_stloc_ir (cfg, sp, header, ip [1]);
5998 CHECK_STACK_OVF (1);
5999 EMIT_NEW_PCONST (cfg, ins, NULL);
6000 ins->type = STACK_OBJ;
6005 CHECK_STACK_OVF (1);
6006 EMIT_NEW_ICONST (cfg, ins, -1);
6019 CHECK_STACK_OVF (1);
6020 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6026 CHECK_STACK_OVF (1);
6028 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6034 CHECK_STACK_OVF (1);
6035 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6041 CHECK_STACK_OVF (1);
6042 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6043 ins->type = STACK_I8;
6044 ins->dreg = alloc_dreg (cfg, STACK_I8);
6046 ins->inst_l = (gint64)read64 (ip);
6047 MONO_ADD_INS (bblock, ins);
6053 gboolean use_aotconst = FALSE;
6055 #ifdef TARGET_POWERPC
6056 /* FIXME: Clean this up */
6057 if (cfg->compile_aot)
6058 use_aotconst = TRUE;
6061 /* FIXME: we should really allocate this only late in the compilation process */
6062 f = mono_domain_alloc (cfg->domain, sizeof (float));
6064 CHECK_STACK_OVF (1);
6070 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6072 dreg = alloc_freg (cfg);
6073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6074 ins->type = STACK_R8;
6076 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6077 ins->type = STACK_R8;
6078 ins->dreg = alloc_dreg (cfg, STACK_R8);
6080 MONO_ADD_INS (bblock, ins);
6090 gboolean use_aotconst = FALSE;
6092 #ifdef TARGET_POWERPC
6093 /* FIXME: Clean this up */
6094 if (cfg->compile_aot)
6095 use_aotconst = TRUE;
6098 /* FIXME: we should really allocate this only late in the compilation process */
6099 d = mono_domain_alloc (cfg->domain, sizeof (double));
6101 CHECK_STACK_OVF (1);
6107 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6109 dreg = alloc_freg (cfg);
6110 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6111 ins->type = STACK_R8;
6113 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6114 ins->type = STACK_R8;
6115 ins->dreg = alloc_dreg (cfg, STACK_R8);
6117 MONO_ADD_INS (bblock, ins);
6126 MonoInst *temp, *store;
6128 CHECK_STACK_OVF (1);
6132 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6133 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6135 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6138 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6151 if (sp [0]->type == STACK_R8)
6152 /* we need to pop the value from the x86 FP stack */
6153 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6162 if (stack_start != sp)
6164 token = read32 (ip + 1);
6165 /* FIXME: check the signature matches */
6166 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6171 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6172 GENERIC_SHARING_FAILURE (CEE_JMP);
6174 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6175 CHECK_CFG_EXCEPTION;
6177 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6179 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6182 /* Handle tail calls similarly to calls */
6183 n = fsig->param_count + fsig->hasthis;
6185 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6186 call->method = cmethod;
6187 call->tail_call = TRUE;
6188 call->signature = mono_method_signature (cmethod);
6189 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6190 call->inst.inst_p0 = cmethod;
6191 for (i = 0; i < n; ++i)
6192 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6194 mono_arch_emit_call (cfg, call);
6195 MONO_ADD_INS (bblock, (MonoInst*)call);
6198 for (i = 0; i < num_args; ++i)
6199 /* Prevent arguments from being optimized away */
6200 arg_array [i]->flags |= MONO_INST_VOLATILE;
6202 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6203 ins = (MonoInst*)call;
6204 ins->inst_p0 = cmethod;
6205 MONO_ADD_INS (bblock, ins);
6209 start_new_bblock = 1;
6214 case CEE_CALLVIRT: {
6215 MonoInst *addr = NULL;
6216 MonoMethodSignature *fsig = NULL;
6218 int virtual = *ip == CEE_CALLVIRT;
6219 int calli = *ip == CEE_CALLI;
6220 gboolean pass_imt_from_rgctx = FALSE;
6221 MonoInst *imt_arg = NULL;
6222 gboolean pass_vtable = FALSE;
6223 gboolean pass_mrgctx = FALSE;
6224 MonoInst *vtable_arg = NULL;
6225 gboolean check_this = FALSE;
6226 gboolean supported_tail_call = FALSE;
6229 token = read32 (ip + 1);
6236 if (method->wrapper_type != MONO_WRAPPER_NONE)
6237 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6239 fsig = mono_metadata_parse_signature (image, token);
6241 n = fsig->param_count + fsig->hasthis;
6243 if (method->dynamic && fsig->pinvoke) {
6247 * This is a call through a function pointer using a pinvoke
6248 * signature. Have to create a wrapper and call that instead.
6249 * FIXME: This is very slow, need to create a wrapper at JIT time
6250 * instead based on the signature.
6252 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6253 EMIT_NEW_PCONST (cfg, args [1], fsig);
6255 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6258 MonoMethod *cil_method;
6260 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6261 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6262 cil_method = cmethod;
6263 } else if (constrained_call) {
6264 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6266 * This is needed since get_method_constrained can't find
6267 * the method in klass representing a type var.
6268 * The type var is guaranteed to be a reference type in this
6271 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6272 cil_method = cmethod;
6273 g_assert (!cmethod->klass->valuetype);
6275 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6278 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6279 cil_method = cmethod;
6284 if (!dont_verify && !cfg->skip_visibility) {
6285 MonoMethod *target_method = cil_method;
6286 if (method->is_inflated) {
6287 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6289 if (!mono_method_can_access_method (method_definition, target_method) &&
6290 !mono_method_can_access_method (method, cil_method))
6291 METHOD_ACCESS_FAILURE;
6294 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6295 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6297 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6298 /* MS.NET seems to silently convert this to a callvirt */
6301 if (!cmethod->klass->inited)
6302 if (!mono_class_init (cmethod->klass))
6305 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6306 mini_class_is_system_array (cmethod->klass)) {
6307 array_rank = cmethod->klass->rank;
6308 fsig = mono_method_signature (cmethod);
6310 if (mono_method_signature (cmethod)->pinvoke) {
6311 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6312 check_for_pending_exc, FALSE);
6313 fsig = mono_method_signature (wrapper);
6314 } else if (constrained_call) {
6315 fsig = mono_method_signature (cmethod);
6317 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6321 mono_save_token_info (cfg, image, token, cil_method);
6323 n = fsig->param_count + fsig->hasthis;
6325 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6326 if (check_linkdemand (cfg, method, cmethod))
6328 CHECK_CFG_EXCEPTION;
6331 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6332 g_assert_not_reached ();
6335 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6338 if (!cfg->generic_sharing_context && cmethod)
6339 g_assert (!mono_method_check_context_used (cmethod));
6343 //g_assert (!virtual || fsig->hasthis);
6347 if (constrained_call) {
6349 * We have the `constrained.' prefix opcode.
6351 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6353 * The type parameter is instantiated as a valuetype,
6354 * but that type doesn't override the method we're
6355 * calling, so we need to box `this'.
6357 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6358 ins->klass = constrained_call;
6359 sp [0] = handle_box (cfg, ins, constrained_call);
6360 CHECK_CFG_EXCEPTION;
6361 } else if (!constrained_call->valuetype) {
6362 int dreg = alloc_preg (cfg);
6365 * The type parameter is instantiated as a reference
6366 * type. We have a managed pointer on the stack, so
6367 * we need to dereference it here.
6369 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6370 ins->type = STACK_OBJ;
6372 } else if (cmethod->klass->valuetype)
6374 constrained_call = NULL;
6377 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6381 * If the callee is a shared method, then its static cctor
6382 * might not get called after the call was patched.
6384 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6385 emit_generic_class_init (cfg, cmethod->klass);
6386 CHECK_TYPELOAD (cmethod->klass);
6389 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6390 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6391 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6392 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6393 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6396 * Pass vtable iff target method might
6397 * be shared, which means that sharing
6398 * is enabled for its class and its
6399 * context is sharable (and it's not a
6402 if (sharing_enabled && context_sharable &&
6403 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6407 if (cmethod && mini_method_get_context (cmethod) &&
6408 mini_method_get_context (cmethod)->method_inst) {
6409 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6410 MonoGenericContext *context = mini_method_get_context (cmethod);
6411 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6413 g_assert (!pass_vtable);
6415 if (sharing_enabled && context_sharable)
6419 if (cfg->generic_sharing_context && cmethod) {
6420 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6422 context_used = mono_method_check_context_used (cmethod);
6424 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6425 /* Generic method interface
6426 calls are resolved via a
6427 helper function and don't
6429 if (!cmethod_context || !cmethod_context->method_inst)
6430 pass_imt_from_rgctx = TRUE;
6434 * If a shared method calls another
6435 * shared method then the caller must
6436 * have a generic sharing context
6437 * because the magic trampoline
6438 * requires it. FIXME: We shouldn't
6439 * have to force the vtable/mrgctx
6440 * variable here. Instead there
6441 * should be a flag in the cfg to
6442 * request a generic sharing context.
6445 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6446 mono_get_vtable_var (cfg);
6451 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6453 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6455 CHECK_TYPELOAD (cmethod->klass);
6456 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6461 g_assert (!vtable_arg);
6463 if (!cfg->compile_aot) {
6465 * emit_get_rgctx_method () calls mono_class_vtable () so check
6466 * for type load errors before.
6468 mono_class_vtable (cfg->domain, cmethod->klass);
6469 CHECK_TYPELOAD (cmethod->klass);
6472 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6474 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6475 MONO_METHOD_IS_FINAL (cmethod)) {
6482 if (pass_imt_from_rgctx) {
6483 g_assert (!pass_vtable);
6486 imt_arg = emit_get_rgctx_method (cfg, context_used,
6487 cmethod, MONO_RGCTX_INFO_METHOD);
6491 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6493 /* Calling virtual generic methods */
6494 if (cmethod && virtual &&
6495 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6496 !(MONO_METHOD_IS_FINAL (cmethod) &&
6497 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6498 mono_method_signature (cmethod)->generic_param_count) {
6499 MonoInst *this_temp, *this_arg_temp, *store;
6500 MonoInst *iargs [4];
6502 g_assert (mono_method_signature (cmethod)->is_inflated);
6504 /* Prevent inlining of methods that contain indirect calls */
6507 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6508 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6509 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6510 g_assert (!imt_arg);
6512 g_assert (cmethod->is_inflated);
6513 imt_arg = emit_get_rgctx_method (cfg, context_used,
6514 cmethod, MONO_RGCTX_INFO_METHOD);
6515 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6519 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6520 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6521 MONO_ADD_INS (bblock, store);
6523 /* FIXME: This should be a managed pointer */
6524 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6526 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6527 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6528 cmethod, MONO_RGCTX_INFO_METHOD);
6529 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6530 addr = mono_emit_jit_icall (cfg,
6531 mono_helper_compile_generic_method, iargs);
6533 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6535 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6538 if (!MONO_TYPE_IS_VOID (fsig->ret))
6539 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6546 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6547 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6549 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6553 /* FIXME: runtime generic context pointer for jumps? */
6554 /* FIXME: handle this for generic sharing eventually */
6555 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6558 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6561 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6562 /* Handle tail calls similarly to calls */
6563 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6565 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6566 call->tail_call = TRUE;
6567 call->method = cmethod;
6568 call->signature = mono_method_signature (cmethod);
6571 * We implement tail calls by storing the actual arguments into the
6572 * argument variables, then emitting a CEE_JMP.
6574 for (i = 0; i < n; ++i) {
6575 /* Prevent argument from being register allocated */
6576 arg_array [i]->flags |= MONO_INST_VOLATILE;
6577 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6581 ins = (MonoInst*)call;
6582 ins->inst_p0 = cmethod;
6583 ins->inst_p1 = arg_array [0];
6584 MONO_ADD_INS (bblock, ins);
6585 link_bblock (cfg, bblock, end_bblock);
6586 start_new_bblock = 1;
6587 /* skip CEE_RET as well */
6593 /* Conversion to a JIT intrinsic */
6594 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6595 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6596 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6607 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6608 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6609 mono_method_check_inlining (cfg, cmethod) &&
6610 !g_list_find (dont_inline, cmethod)) {
6612 gboolean allways = FALSE;
6614 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6615 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6616 /* Prevent inlining of methods that call wrappers */
6618 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6622 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6624 cfg->real_offset += 5;
6627 if (!MONO_TYPE_IS_VOID (fsig->ret))
6628 /* *sp is already set by inline_method */
6631 inline_costs += costs;
6637 inline_costs += 10 * num_calls++;
6639 /* Tail recursion elimination */
6640 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6641 gboolean has_vtargs = FALSE;
6644 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6647 /* keep it simple */
6648 for (i = fsig->param_count - 1; i >= 0; i--) {
6649 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6654 for (i = 0; i < n; ++i)
6655 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6656 MONO_INST_NEW (cfg, ins, OP_BR);
6657 MONO_ADD_INS (bblock, ins);
6658 tblock = start_bblock->out_bb [0];
6659 link_bblock (cfg, bblock, tblock);
6660 ins->inst_target_bb = tblock;
6661 start_new_bblock = 1;
6663 /* skip the CEE_RET, too */
6664 if (ip_in_bb (cfg, bblock, ip + 5))
6674 /* Generic sharing */
6675 /* FIXME: only do this for generic methods if
6676 they are not shared! */
6677 if (context_used && !imt_arg && !array_rank &&
6678 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6679 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6680 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6681 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6684 g_assert (cfg->generic_sharing_context && cmethod);
6688 * We are compiling a call to a
6689 * generic method from shared code,
6690 * which means that we have to look up
6691 * the method in the rgctx and do an
6694 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6697 /* Indirect calls */
6699 g_assert (!imt_arg);
6701 if (*ip == CEE_CALL)
6702 g_assert (context_used);
6703 else if (*ip == CEE_CALLI)
6704 g_assert (!vtable_arg);
6706 /* FIXME: what the hell is this??? */
6707 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6708 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6710 /* Prevent inlining of methods with indirect calls */
6714 #ifdef MONO_ARCH_RGCTX_REG
6716 int rgctx_reg = mono_alloc_preg (cfg);
6718 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6719 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6720 call = (MonoCallInst*)ins;
6721 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6722 cfg->uses_rgctx_reg = TRUE;
6723 call->rgctx_reg = TRUE;
6728 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6730 * Instead of emitting an indirect call, emit a direct call
6731 * with the contents of the aotconst as the patch info.
6733 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6735 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6736 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6739 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6742 if (!MONO_TYPE_IS_VOID (fsig->ret))
6743 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6754 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6755 if (sp [fsig->param_count]->type == STACK_OBJ) {
6756 MonoInst *iargs [2];
6759 iargs [1] = sp [fsig->param_count];
6761 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6764 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6765 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6766 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6767 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6769 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6772 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6773 if (!cmethod->klass->element_class->valuetype && !readonly)
6774 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6775 CHECK_TYPELOAD (cmethod->klass);
6778 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6781 g_assert_not_reached ();
6789 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6791 if (!MONO_TYPE_IS_VOID (fsig->ret))
6792 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6802 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6804 } else if (imt_arg) {
6805 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6807 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6810 if (!MONO_TYPE_IS_VOID (fsig->ret))
6811 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6818 if (cfg->method != method) {
6819 /* return from inlined method */
6821 * If in_count == 0, that means the ret is unreachable due to
6822 * being preceeded by a throw. In that case, inline_method () will
6823 * handle setting the return value
6824 * (test case: test_0_inline_throw ()).
6826 if (return_var && cfg->cbb->in_count) {
6830 //g_assert (returnvar != -1);
6831 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6832 cfg->ret_var_set = TRUE;
6836 MonoType *ret_type = mono_method_signature (method)->ret;
6838 g_assert (!return_var);
6841 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6844 if (!cfg->vret_addr) {
6847 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6849 EMIT_NEW_RETLOADA (cfg, ret_addr);
6851 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6852 ins->klass = mono_class_from_mono_type (ret_type);
6855 #ifdef MONO_ARCH_SOFT_FLOAT
6856 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6857 MonoInst *iargs [1];
6861 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6862 mono_arch_emit_setret (cfg, method, conv);
6864 mono_arch_emit_setret (cfg, method, *sp);
6867 mono_arch_emit_setret (cfg, method, *sp);
6872 if (sp != stack_start)
6874 MONO_INST_NEW (cfg, ins, OP_BR);
6876 ins->inst_target_bb = end_bblock;
6877 MONO_ADD_INS (bblock, ins);
6878 link_bblock (cfg, bblock, end_bblock);
6879 start_new_bblock = 1;
6883 MONO_INST_NEW (cfg, ins, OP_BR);
6885 target = ip + 1 + (signed char)(*ip);
6887 GET_BBLOCK (cfg, tblock, target);
6888 link_bblock (cfg, bblock, tblock);
6889 ins->inst_target_bb = tblock;
6890 if (sp != stack_start) {
6891 handle_stack_args (cfg, stack_start, sp - stack_start);
6893 CHECK_UNVERIFIABLE (cfg);
6895 MONO_ADD_INS (bblock, ins);
6896 start_new_bblock = 1;
6897 inline_costs += BRANCH_COST;
6911 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6913 target = ip + 1 + *(signed char*)ip;
6919 inline_costs += BRANCH_COST;
6923 MONO_INST_NEW (cfg, ins, OP_BR);
6926 target = ip + 4 + (gint32)read32(ip);
6928 GET_BBLOCK (cfg, tblock, target);
6929 link_bblock (cfg, bblock, tblock);
6930 ins->inst_target_bb = tblock;
6931 if (sp != stack_start) {
6932 handle_stack_args (cfg, stack_start, sp - stack_start);
6934 CHECK_UNVERIFIABLE (cfg);
6937 MONO_ADD_INS (bblock, ins);
6939 start_new_bblock = 1;
6940 inline_costs += BRANCH_COST;
6947 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6948 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6949 guint32 opsize = is_short ? 1 : 4;
6951 CHECK_OPSIZE (opsize);
6953 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6956 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6961 GET_BBLOCK (cfg, tblock, target);
6962 link_bblock (cfg, bblock, tblock);
6963 GET_BBLOCK (cfg, tblock, ip);
6964 link_bblock (cfg, bblock, tblock);
6966 if (sp != stack_start) {
6967 handle_stack_args (cfg, stack_start, sp - stack_start);
6968 CHECK_UNVERIFIABLE (cfg);
6971 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6972 cmp->sreg1 = sp [0]->dreg;
6973 type_from_op (cmp, sp [0], NULL);
6976 #if SIZEOF_REGISTER == 4
6977 if (cmp->opcode == OP_LCOMPARE_IMM) {
6978 /* Convert it to OP_LCOMPARE */
6979 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6980 ins->type = STACK_I8;
6981 ins->dreg = alloc_dreg (cfg, STACK_I8);
6983 MONO_ADD_INS (bblock, ins);
6984 cmp->opcode = OP_LCOMPARE;
6985 cmp->sreg2 = ins->dreg;
6988 MONO_ADD_INS (bblock, cmp);
6990 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6991 type_from_op (ins, sp [0], NULL);
6992 MONO_ADD_INS (bblock, ins);
6993 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6994 GET_BBLOCK (cfg, tblock, target);
6995 ins->inst_true_bb = tblock;
6996 GET_BBLOCK (cfg, tblock, ip);
6997 ins->inst_false_bb = tblock;
6998 start_new_bblock = 2;
7001 inline_costs += BRANCH_COST;
7016 MONO_INST_NEW (cfg, ins, *ip);
7018 target = ip + 4 + (gint32)read32(ip);
7024 inline_costs += BRANCH_COST;
7028 MonoBasicBlock **targets;
7029 MonoBasicBlock *default_bblock;
7030 MonoJumpInfoBBTable *table;
7031 int offset_reg = alloc_preg (cfg);
7032 int target_reg = alloc_preg (cfg);
7033 int table_reg = alloc_preg (cfg);
7034 int sum_reg = alloc_preg (cfg);
7035 gboolean use_op_switch;
7039 n = read32 (ip + 1);
7042 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7046 CHECK_OPSIZE (n * sizeof (guint32));
7047 target = ip + n * sizeof (guint32);
7049 GET_BBLOCK (cfg, default_bblock, target);
7051 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7052 for (i = 0; i < n; ++i) {
7053 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7054 targets [i] = tblock;
7058 if (sp != stack_start) {
7060 * Link the current bb with the targets as well, so handle_stack_args
7061 * will set their in_stack correctly.
7063 link_bblock (cfg, bblock, default_bblock);
7064 for (i = 0; i < n; ++i)
7065 link_bblock (cfg, bblock, targets [i]);
7067 handle_stack_args (cfg, stack_start, sp - stack_start);
7069 CHECK_UNVERIFIABLE (cfg);
7072 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7073 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7076 for (i = 0; i < n; ++i)
7077 link_bblock (cfg, bblock, targets [i]);
7079 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7080 table->table = targets;
7081 table->table_size = n;
7083 use_op_switch = FALSE;
7085 /* ARM implements SWITCH statements differently */
7086 /* FIXME: Make it use the generic implementation */
7087 if (!cfg->compile_aot)
7088 use_op_switch = TRUE;
7091 if (COMPILE_LLVM (cfg))
7092 use_op_switch = TRUE;
7094 cfg->cbb->has_jump_table = 1;
7096 if (use_op_switch) {
7097 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7098 ins->sreg1 = src1->dreg;
7099 ins->inst_p0 = table;
7100 ins->inst_many_bb = targets;
7101 ins->klass = GUINT_TO_POINTER (n);
7102 MONO_ADD_INS (cfg->cbb, ins);
7104 if (sizeof (gpointer) == 8)
7105 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7107 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7109 #if SIZEOF_REGISTER == 8
7110 /* The upper word might not be zero, and we add it to a 64 bit address later */
7111 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7114 if (cfg->compile_aot) {
7115 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7117 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7118 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7119 ins->inst_p0 = table;
7120 ins->dreg = table_reg;
7121 MONO_ADD_INS (cfg->cbb, ins);
7124 /* FIXME: Use load_memindex */
7125 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7127 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7129 start_new_bblock = 1;
7130 inline_costs += (BRANCH_COST * 2);
7150 dreg = alloc_freg (cfg);
7153 dreg = alloc_lreg (cfg);
7156 dreg = alloc_preg (cfg);
7159 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7160 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7161 ins->flags |= ins_flag;
7163 MONO_ADD_INS (bblock, ins);
7178 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7179 ins->flags |= ins_flag;
7181 MONO_ADD_INS (bblock, ins);
7183 #if HAVE_WRITE_BARRIERS
7184 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7185 /* insert call to write barrier */
7186 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7187 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7198 MONO_INST_NEW (cfg, ins, (*ip));
7200 ins->sreg1 = sp [0]->dreg;
7201 ins->sreg2 = sp [1]->dreg;
7202 type_from_op (ins, sp [0], sp [1]);
7204 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7206 /* Use the immediate opcodes if possible */
7207 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7208 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7209 if (imm_opcode != -1) {
7210 ins->opcode = imm_opcode;
7211 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7214 sp [1]->opcode = OP_NOP;
7218 MONO_ADD_INS ((cfg)->cbb, (ins));
7220 *sp++ = mono_decompose_opcode (cfg, ins);
7237 MONO_INST_NEW (cfg, ins, (*ip));
7239 ins->sreg1 = sp [0]->dreg;
7240 ins->sreg2 = sp [1]->dreg;
7241 type_from_op (ins, sp [0], sp [1]);
7243 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7244 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7246 /* FIXME: Pass opcode to is_inst_imm */
7248 /* Use the immediate opcodes if possible */
7249 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7252 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7253 if (imm_opcode != -1) {
7254 ins->opcode = imm_opcode;
7255 if (sp [1]->opcode == OP_I8CONST) {
7256 #if SIZEOF_REGISTER == 8
7257 ins->inst_imm = sp [1]->inst_l;
7259 ins->inst_ls_word = sp [1]->inst_ls_word;
7260 ins->inst_ms_word = sp [1]->inst_ms_word;
7264 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7267 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7268 if (sp [1]->next == NULL)
7269 sp [1]->opcode = OP_NOP;
7272 MONO_ADD_INS ((cfg)->cbb, (ins));
7274 *sp++ = mono_decompose_opcode (cfg, ins);
7287 case CEE_CONV_OVF_I8:
7288 case CEE_CONV_OVF_U8:
7292 /* Special case this earlier so we have long constants in the IR */
7293 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7294 int data = sp [-1]->inst_c0;
7295 sp [-1]->opcode = OP_I8CONST;
7296 sp [-1]->type = STACK_I8;
7297 #if SIZEOF_REGISTER == 8
7298 if ((*ip) == CEE_CONV_U8)
7299 sp [-1]->inst_c0 = (guint32)data;
7301 sp [-1]->inst_c0 = data;
7303 sp [-1]->inst_ls_word = data;
7304 if ((*ip) == CEE_CONV_U8)
7305 sp [-1]->inst_ms_word = 0;
7307 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7309 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7316 case CEE_CONV_OVF_I4:
7317 case CEE_CONV_OVF_I1:
7318 case CEE_CONV_OVF_I2:
7319 case CEE_CONV_OVF_I:
7320 case CEE_CONV_OVF_U:
7323 if (sp [-1]->type == STACK_R8) {
7324 ADD_UNOP (CEE_CONV_OVF_I8);
7331 case CEE_CONV_OVF_U1:
7332 case CEE_CONV_OVF_U2:
7333 case CEE_CONV_OVF_U4:
7336 if (sp [-1]->type == STACK_R8) {
7337 ADD_UNOP (CEE_CONV_OVF_U8);
7344 case CEE_CONV_OVF_I1_UN:
7345 case CEE_CONV_OVF_I2_UN:
7346 case CEE_CONV_OVF_I4_UN:
7347 case CEE_CONV_OVF_I8_UN:
7348 case CEE_CONV_OVF_U1_UN:
7349 case CEE_CONV_OVF_U2_UN:
7350 case CEE_CONV_OVF_U4_UN:
7351 case CEE_CONV_OVF_U8_UN:
7352 case CEE_CONV_OVF_I_UN:
7353 case CEE_CONV_OVF_U_UN:
7363 case CEE_ADD_OVF_UN:
7365 case CEE_MUL_OVF_UN:
7367 case CEE_SUB_OVF_UN:
7375 token = read32 (ip + 1);
7376 klass = mini_get_class (method, token, generic_context);
7377 CHECK_TYPELOAD (klass);
7379 if (generic_class_is_reference_type (cfg, klass)) {
7380 MonoInst *store, *load;
7381 int dreg = alloc_preg (cfg);
7383 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7384 load->flags |= ins_flag;
7385 MONO_ADD_INS (cfg->cbb, load);
7387 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7388 store->flags |= ins_flag;
7389 MONO_ADD_INS (cfg->cbb, store);
7391 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7403 token = read32 (ip + 1);
7404 klass = mini_get_class (method, token, generic_context);
7405 CHECK_TYPELOAD (klass);
7407 /* Optimize the common ldobj+stloc combination */
7417 loc_index = ip [5] - CEE_STLOC_0;
7424 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7425 CHECK_LOCAL (loc_index);
7427 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7428 ins->dreg = cfg->locals [loc_index]->dreg;
7434 /* Optimize the ldobj+stobj combination */
7435 /* The reference case ends up being a load+store anyway */
7436 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7441 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7448 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7457 CHECK_STACK_OVF (1);
7459 n = read32 (ip + 1);
7461 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7462 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7463 ins->type = STACK_OBJ;
7466 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7467 MonoInst *iargs [1];
7469 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7470 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7472 if (cfg->opt & MONO_OPT_SHARED) {
7473 MonoInst *iargs [3];
7475 if (cfg->compile_aot) {
7476 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7478 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7479 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7480 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7481 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7482 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7484 if (bblock->out_of_line) {
7485 MonoInst *iargs [2];
7487 if (image == mono_defaults.corlib) {
7489 * Avoid relocations in AOT and save some space by using a
7490 * version of helper_ldstr specialized to mscorlib.
7492 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7493 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7495 /* Avoid creating the string object */
7496 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7497 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7498 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7502 if (cfg->compile_aot) {
7503 NEW_LDSTRCONST (cfg, ins, image, n);
7505 MONO_ADD_INS (bblock, ins);
7508 NEW_PCONST (cfg, ins, NULL);
7509 ins->type = STACK_OBJ;
7510 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7512 MONO_ADD_INS (bblock, ins);
7521 MonoInst *iargs [2];
7522 MonoMethodSignature *fsig;
7525 MonoInst *vtable_arg = NULL;
7528 token = read32 (ip + 1);
7529 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7532 fsig = mono_method_get_signature (cmethod, image, token);
7536 mono_save_token_info (cfg, image, token, cmethod);
7538 if (!mono_class_init (cmethod->klass))
7541 if (cfg->generic_sharing_context)
7542 context_used = mono_method_check_context_used (cmethod);
7544 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7545 if (check_linkdemand (cfg, method, cmethod))
7547 CHECK_CFG_EXCEPTION;
7548 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7549 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7552 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7553 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7554 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7555 mono_class_vtable (cfg->domain, cmethod->klass);
7556 CHECK_TYPELOAD (cmethod->klass);
7558 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7559 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7562 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7563 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7565 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7567 CHECK_TYPELOAD (cmethod->klass);
7568 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7573 n = fsig->param_count;
7577 * Generate smaller code for the common newobj <exception> instruction in
7578 * argument checking code.
7580 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7581 is_exception_class (cmethod->klass) && n <= 2 &&
7582 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7583 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7584 MonoInst *iargs [3];
7586 g_assert (!vtable_arg);
7590 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7593 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7597 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7602 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7605 g_assert_not_reached ();
7613 /* move the args to allow room for 'this' in the first position */
7619 /* check_call_signature () requires sp[0] to be set */
7620 this_ins.type = STACK_OBJ;
7622 if (check_call_signature (cfg, fsig, sp))
7627 if (mini_class_is_system_array (cmethod->klass)) {
7628 g_assert (!vtable_arg);
7630 *sp = emit_get_rgctx_method (cfg, context_used,
7631 cmethod, MONO_RGCTX_INFO_METHOD);
7633 /* Avoid varargs in the common case */
7634 if (fsig->param_count == 1)
7635 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7636 else if (fsig->param_count == 2)
7637 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7639 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7640 } else if (cmethod->string_ctor) {
7641 g_assert (!context_used);
7642 g_assert (!vtable_arg);
7643 /* we simply pass a null pointer */
7644 EMIT_NEW_PCONST (cfg, *sp, NULL);
7645 /* now call the string ctor */
7646 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7648 MonoInst* callvirt_this_arg = NULL;
7650 if (cmethod->klass->valuetype) {
7651 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7652 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7653 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7658 * The code generated by mini_emit_virtual_call () expects
7659 * iargs [0] to be a boxed instance, but luckily the vcall
7660 * will be transformed into a normal call there.
7662 } else if (context_used) {
7666 if (cfg->opt & MONO_OPT_SHARED)
7667 rgctx_info = MONO_RGCTX_INFO_KLASS;
7669 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7670 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7672 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7675 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7677 CHECK_TYPELOAD (cmethod->klass);
7680 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7681 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7682 * As a workaround, we call class cctors before allocating objects.
7684 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7685 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7686 if (cfg->verbose_level > 2)
7687 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7688 class_inits = g_slist_prepend (class_inits, vtable);
7691 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7694 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7697 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7699 /* Now call the actual ctor */
7700 /* Avoid virtual calls to ctors if possible */
7701 if (cmethod->klass->marshalbyref)
7702 callvirt_this_arg = sp [0];
7704 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7705 mono_method_check_inlining (cfg, cmethod) &&
7706 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7707 !g_list_find (dont_inline, cmethod)) {
7710 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7711 cfg->real_offset += 5;
7714 inline_costs += costs - 5;
7717 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7719 } else if (context_used &&
7720 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7721 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7722 MonoInst *cmethod_addr;
7724 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7725 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7727 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7730 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7731 callvirt_this_arg, NULL, vtable_arg);
7735 if (alloc == NULL) {
7737 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7738 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7752 token = read32 (ip + 1);
7753 klass = mini_get_class (method, token, generic_context);
7754 CHECK_TYPELOAD (klass);
7755 if (sp [0]->type != STACK_OBJ)
7758 if (cfg->generic_sharing_context)
7759 context_used = mono_class_check_context_used (klass);
7768 args [1] = emit_get_rgctx_klass (cfg, context_used,
7769 klass, MONO_RGCTX_INFO_KLASS);
7771 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7775 } else if (mono_class_has_variant_generic_params (klass)) {
7782 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7784 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7788 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7789 MonoMethod *mono_castclass;
7790 MonoInst *iargs [1];
7793 mono_castclass = mono_marshal_get_castclass (klass);
7796 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7797 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7798 g_assert (costs > 0);
7801 cfg->real_offset += 5;
7806 inline_costs += costs;
7809 ins = handle_castclass (cfg, klass, *sp);
7810 CHECK_CFG_EXCEPTION;
7820 token = read32 (ip + 1);
7821 klass = mini_get_class (method, token, generic_context);
7822 CHECK_TYPELOAD (klass);
7823 if (sp [0]->type != STACK_OBJ)
7826 if (cfg->generic_sharing_context)
7827 context_used = mono_class_check_context_used (klass);
7836 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7838 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7842 } else if (mono_class_has_variant_generic_params (klass)) {
7849 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7851 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7855 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7856 MonoMethod *mono_isinst;
7857 MonoInst *iargs [1];
7860 mono_isinst = mono_marshal_get_isinst (klass);
7863 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7864 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7865 g_assert (costs > 0);
7868 cfg->real_offset += 5;
7873 inline_costs += costs;
7876 ins = handle_isinst (cfg, klass, *sp);
7877 CHECK_CFG_EXCEPTION;
7884 case CEE_UNBOX_ANY: {
7888 token = read32 (ip + 1);
7889 klass = mini_get_class (method, token, generic_context);
7890 CHECK_TYPELOAD (klass);
7892 mono_save_token_info (cfg, image, token, klass);
7894 if (cfg->generic_sharing_context)
7895 context_used = mono_class_check_context_used (klass);
7897 if (generic_class_is_reference_type (cfg, klass)) {
7898 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7900 MonoInst *iargs [2];
7905 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7906 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7910 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7911 MonoMethod *mono_castclass;
7912 MonoInst *iargs [1];
7915 mono_castclass = mono_marshal_get_castclass (klass);
7918 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7919 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7921 g_assert (costs > 0);
7924 cfg->real_offset += 5;
7928 inline_costs += costs;
7930 ins = handle_castclass (cfg, klass, *sp);
7931 CHECK_CFG_EXCEPTION;
7939 if (mono_class_is_nullable (klass)) {
7940 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7947 ins = handle_unbox (cfg, klass, sp, context_used);
7953 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7966 token = read32 (ip + 1);
7967 klass = mini_get_class (method, token, generic_context);
7968 CHECK_TYPELOAD (klass);
7970 mono_save_token_info (cfg, image, token, klass);
7972 if (cfg->generic_sharing_context)
7973 context_used = mono_class_check_context_used (klass);
7975 if (generic_class_is_reference_type (cfg, klass)) {
7981 if (klass == mono_defaults.void_class)
7983 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7985 /* frequent check in generic code: box (struct), brtrue */
7986 if (!mono_class_is_nullable (klass) &&
7987 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7988 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7990 MONO_INST_NEW (cfg, ins, OP_BR);
7991 if (*ip == CEE_BRTRUE_S) {
7994 target = ip + 1 + (signed char)(*ip);
7999 target = ip + 4 + (gint)(read32 (ip));
8002 GET_BBLOCK (cfg, tblock, target);
8003 link_bblock (cfg, bblock, tblock);
8004 ins->inst_target_bb = tblock;
8005 GET_BBLOCK (cfg, tblock, ip);
8007 * This leads to some inconsistency, since the two bblocks are
8008 * not really connected, but it is needed for handling stack
8009 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8010 * FIXME: This should only be needed if sp != stack_start, but that
8011 * doesn't work for some reason (test failure in mcs/tests on x86).
8013 link_bblock (cfg, bblock, tblock);
8014 if (sp != stack_start) {
8015 handle_stack_args (cfg, stack_start, sp - stack_start);
8017 CHECK_UNVERIFIABLE (cfg);
8019 MONO_ADD_INS (bblock, ins);
8020 start_new_bblock = 1;
8028 if (cfg->opt & MONO_OPT_SHARED)
8029 rgctx_info = MONO_RGCTX_INFO_KLASS;
8031 rgctx_info = MONO_RGCTX_INFO_VTABLE;
8032 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
8033 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
8035 *sp++ = handle_box (cfg, val, klass);
8038 CHECK_CFG_EXCEPTION;
8047 token = read32 (ip + 1);
8048 klass = mini_get_class (method, token, generic_context);
8049 CHECK_TYPELOAD (klass);
8051 mono_save_token_info (cfg, image, token, klass);
8053 if (cfg->generic_sharing_context)
8054 context_used = mono_class_check_context_used (klass);
8056 if (mono_class_is_nullable (klass)) {
8059 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8060 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8064 ins = handle_unbox (cfg, klass, sp, context_used);
8074 MonoClassField *field;
8078 if (*ip == CEE_STFLD) {
8085 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8087 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8090 token = read32 (ip + 1);
8091 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8092 field = mono_method_get_wrapper_data (method, token);
8093 klass = field->parent;
8096 field = mono_field_from_token (image, token, &klass, generic_context);
8100 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8101 FIELD_ACCESS_FAILURE;
8102 mono_class_init (klass);
8104 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8105 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8106 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8107 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8110 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8111 if (*ip == CEE_STFLD) {
8112 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8114 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8115 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8116 MonoInst *iargs [5];
8119 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8120 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8121 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8125 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8126 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8127 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8128 g_assert (costs > 0);
8130 cfg->real_offset += 5;
8133 inline_costs += costs;
8135 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8140 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8142 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8144 #if HAVE_WRITE_BARRIERS
8145 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8146 /* insert call to write barrier */
8147 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8148 MonoInst *iargs [2];
8151 dreg = alloc_preg (cfg);
8152 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8154 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8158 store->flags |= ins_flag;
8165 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8166 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8167 MonoInst *iargs [4];
8170 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8171 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8172 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8173 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8174 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8175 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8177 g_assert (costs > 0);
8179 cfg->real_offset += 5;
8183 inline_costs += costs;
8185 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8189 if (sp [0]->type == STACK_VTYPE) {
8192 /* Have to compute the address of the variable */
8194 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8196 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8198 g_assert (var->klass == klass);
8200 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8204 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8206 if (*ip == CEE_LDFLDA) {
8207 dreg = alloc_preg (cfg);
8209 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8210 ins->klass = mono_class_from_mono_type (field->type);
8211 ins->type = STACK_MP;
8216 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8217 load->flags |= ins_flag;
8228 MonoClassField *field;
8229 gpointer addr = NULL;
8230 gboolean is_special_static;
8233 token = read32 (ip + 1);
8235 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8236 field = mono_method_get_wrapper_data (method, token);
8237 klass = field->parent;
8240 field = mono_field_from_token (image, token, &klass, generic_context);
8243 mono_class_init (klass);
8244 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8245 FIELD_ACCESS_FAILURE;
8247 /* if the class is Critical then transparent code cannot access it's fields */
8248 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8249 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8252 * We can only support shared generic static
8253 * field access on architectures where the
8254 * trampoline code has been extended to handle
8255 * the generic class init.
8257 #ifndef MONO_ARCH_VTABLE_REG
8258 GENERIC_SHARING_FAILURE (*ip);
8261 if (cfg->generic_sharing_context)
8262 context_used = mono_class_check_context_used (klass);
8264 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8266 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8267 * to be called here.
8269 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8270 mono_class_vtable (cfg->domain, klass);
8271 CHECK_TYPELOAD (klass);
8273 mono_domain_lock (cfg->domain);
8274 if (cfg->domain->special_static_fields)
8275 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8276 mono_domain_unlock (cfg->domain);
8278 is_special_static = mono_class_field_is_special_static (field);
8280 /* Generate IR to compute the field address */
8282 if ((cfg->opt & MONO_OPT_SHARED) ||
8283 (cfg->compile_aot && is_special_static) ||
8284 (context_used && is_special_static)) {
8285 MonoInst *iargs [2];
8287 g_assert (field->parent);
8288 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8290 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8291 field, MONO_RGCTX_INFO_CLASS_FIELD);
8293 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8295 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8296 } else if (context_used) {
8297 MonoInst *static_data;
8300 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8301 method->klass->name_space, method->klass->name, method->name,
8302 depth, field->offset);
8305 if (mono_class_needs_cctor_run (klass, method)) {
8309 vtable = emit_get_rgctx_klass (cfg, context_used,
8310 klass, MONO_RGCTX_INFO_VTABLE);
8312 // FIXME: This doesn't work since it tries to pass the argument
8313 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8315 * The vtable pointer is always passed in a register regardless of
8316 * the calling convention, so assign it manually, and make a call
8317 * using a signature without parameters.
8319 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8320 #ifdef MONO_ARCH_VTABLE_REG
8321 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8322 cfg->uses_vtable_reg = TRUE;
8329 * The pointer we're computing here is
8331 * super_info.static_data + field->offset
8333 static_data = emit_get_rgctx_klass (cfg, context_used,
8334 klass, MONO_RGCTX_INFO_STATIC_DATA);
8336 if (field->offset == 0) {
8339 int addr_reg = mono_alloc_preg (cfg);
8340 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8342 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8343 MonoInst *iargs [2];
8345 g_assert (field->parent);
8346 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8347 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8348 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8350 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8352 CHECK_TYPELOAD (klass);
8354 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8355 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8356 if (cfg->verbose_level > 2)
8357 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8358 class_inits = g_slist_prepend (class_inits, vtable);
8360 if (cfg->run_cctors) {
8362 /* This makes so that inline cannot trigger */
8363 /* .cctors: too many apps depend on them */
8364 /* running with a specific order... */
8365 if (! vtable->initialized)
8367 ex = mono_runtime_class_init_full (vtable, FALSE);
8369 set_exception_object (cfg, ex);
8370 goto exception_exit;
8374 addr = (char*)vtable->data + field->offset;
8376 if (cfg->compile_aot)
8377 EMIT_NEW_SFLDACONST (cfg, ins, field);
8379 EMIT_NEW_PCONST (cfg, ins, addr);
8382 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8383 * This could be later optimized to do just a couple of
8384 * memory dereferences with constant offsets.
8386 MonoInst *iargs [1];
8387 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8388 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8392 /* Generate IR to do the actual load/store operation */
8394 if (*ip == CEE_LDSFLDA) {
8395 ins->klass = mono_class_from_mono_type (field->type);
8396 ins->type = STACK_PTR;
8398 } else if (*ip == CEE_STSFLD) {
8403 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8404 store->flags |= ins_flag;
8406 gboolean is_const = FALSE;
8407 MonoVTable *vtable = NULL;
8409 if (!context_used) {
8410 vtable = mono_class_vtable (cfg->domain, klass);
8411 CHECK_TYPELOAD (klass);
8413 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8414 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8415 gpointer addr = (char*)vtable->data + field->offset;
8416 int ro_type = field->type->type;
8417 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8418 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8420 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8423 case MONO_TYPE_BOOLEAN:
8425 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8429 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8432 case MONO_TYPE_CHAR:
8434 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8438 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8443 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8447 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8450 #ifndef HAVE_MOVING_COLLECTOR
8453 case MONO_TYPE_STRING:
8454 case MONO_TYPE_OBJECT:
8455 case MONO_TYPE_CLASS:
8456 case MONO_TYPE_SZARRAY:
8458 case MONO_TYPE_FNPTR:
8459 case MONO_TYPE_ARRAY:
8460 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8461 type_to_eval_stack_type ((cfg), field->type, *sp);
8467 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8472 case MONO_TYPE_VALUETYPE:
8482 CHECK_STACK_OVF (1);
8484 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8485 load->flags |= ins_flag;
8498 token = read32 (ip + 1);
8499 klass = mini_get_class (method, token, generic_context);
8500 CHECK_TYPELOAD (klass);
8501 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8502 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8513 const char *data_ptr;
8515 guint32 field_token;
8521 token = read32 (ip + 1);
8523 klass = mini_get_class (method, token, generic_context);
8524 CHECK_TYPELOAD (klass);
8526 if (cfg->generic_sharing_context)
8527 context_used = mono_class_check_context_used (klass);
8529 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8530 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8531 ins->sreg1 = sp [0]->dreg;
8532 ins->type = STACK_I4;
8533 ins->dreg = alloc_ireg (cfg);
8534 MONO_ADD_INS (cfg->cbb, ins);
8535 *sp = mono_decompose_opcode (cfg, ins);
8540 MonoClass *array_class = mono_array_class_get (klass, 1);
8541 /* FIXME: we cannot get a managed
8542 allocator because we can't get the
8543 open generic class's vtable. We
8544 have the same problem in
8545 handle_alloc_from_inst(). This
8546 needs to be solved so that we can
8547 have managed allocs of shared
8550 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8551 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8553 MonoMethod *managed_alloc = NULL;
8555 /* FIXME: Decompose later to help abcrem */
8558 args [0] = emit_get_rgctx_klass (cfg, context_used,
8559 array_class, MONO_RGCTX_INFO_VTABLE);
8564 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8566 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8568 if (cfg->opt & MONO_OPT_SHARED) {
8569 /* Decompose now to avoid problems with references to the domainvar */
8570 MonoInst *iargs [3];
8572 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8573 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8576 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8578 /* Decompose later since it is needed by abcrem */
8579 MonoClass *array_type = mono_array_class_get (klass, 1);
8580 mono_class_vtable (cfg->domain, array_type);
8581 CHECK_TYPELOAD (array_type);
8583 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8584 ins->dreg = alloc_preg (cfg);
8585 ins->sreg1 = sp [0]->dreg;
8586 ins->inst_newa_class = klass;
8587 ins->type = STACK_OBJ;
8589 MONO_ADD_INS (cfg->cbb, ins);
8590 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8591 cfg->cbb->has_array_access = TRUE;
8593 /* Needed so mono_emit_load_get_addr () gets called */
8594 mono_get_got_var (cfg);
8604 * we inline/optimize the initialization sequence if possible.
8605 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8606 * for small sizes open code the memcpy
8607 * ensure the rva field is big enough
8609 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8610 MonoMethod *memcpy_method = get_memcpy_method ();
8611 MonoInst *iargs [3];
8612 int add_reg = alloc_preg (cfg);
8614 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8615 if (cfg->compile_aot) {
8616 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8618 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8620 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8621 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8630 if (sp [0]->type != STACK_OBJ)
8633 dreg = alloc_preg (cfg);
8634 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8635 ins->dreg = alloc_preg (cfg);
8636 ins->sreg1 = sp [0]->dreg;
8637 ins->type = STACK_I4;
8638 MONO_ADD_INS (cfg->cbb, ins);
8639 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8640 cfg->cbb->has_array_access = TRUE;
8648 if (sp [0]->type != STACK_OBJ)
8651 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8653 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8654 CHECK_TYPELOAD (klass);
8655 /* we need to make sure that this array is exactly the type it needs
8656 * to be for correctness. the wrappers are lax with their usage
8657 * so we need to ignore them here
8659 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8660 MonoClass *array_class = mono_array_class_get (klass, 1);
8661 mini_emit_check_array_type (cfg, sp [0], array_class);
8662 CHECK_TYPELOAD (array_class);
8666 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8681 case CEE_LDELEM_REF: {
8687 if (*ip == CEE_LDELEM) {
8689 token = read32 (ip + 1);
8690 klass = mini_get_class (method, token, generic_context);
8691 CHECK_TYPELOAD (klass);
8692 mono_class_init (klass);
8695 klass = array_access_to_klass (*ip);
8697 if (sp [0]->type != STACK_OBJ)
8700 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8702 if (sp [1]->opcode == OP_ICONST) {
8703 int array_reg = sp [0]->dreg;
8704 int index_reg = sp [1]->dreg;
8705 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8707 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8708 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8710 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8711 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8714 if (*ip == CEE_LDELEM)
8727 case CEE_STELEM_REF:
8734 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8736 if (*ip == CEE_STELEM) {
8738 token = read32 (ip + 1);
8739 klass = mini_get_class (method, token, generic_context);
8740 CHECK_TYPELOAD (klass);
8741 mono_class_init (klass);
8744 klass = array_access_to_klass (*ip);
8746 if (sp [0]->type != STACK_OBJ)
8749 /* storing a NULL doesn't need any of the complex checks in stelemref */
8750 if (generic_class_is_reference_type (cfg, klass) &&
8751 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8752 MonoMethod* helper = mono_marshal_get_stelemref ();
8753 MonoInst *iargs [3];
8755 if (sp [0]->type != STACK_OBJ)
8757 if (sp [2]->type != STACK_OBJ)
8764 mono_emit_method_call (cfg, helper, iargs, NULL);
8766 if (sp [1]->opcode == OP_ICONST) {
8767 int array_reg = sp [0]->dreg;
8768 int index_reg = sp [1]->dreg;
8769 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8771 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8772 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8774 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8775 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8779 if (*ip == CEE_STELEM)
8786 case CEE_CKFINITE: {
8790 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8791 ins->sreg1 = sp [0]->dreg;
8792 ins->dreg = alloc_freg (cfg);
8793 ins->type = STACK_R8;
8794 MONO_ADD_INS (bblock, ins);
8796 *sp++ = mono_decompose_opcode (cfg, ins);
8801 case CEE_REFANYVAL: {
8802 MonoInst *src_var, *src;
8804 int klass_reg = alloc_preg (cfg);
8805 int dreg = alloc_preg (cfg);
8808 MONO_INST_NEW (cfg, ins, *ip);
8811 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8812 CHECK_TYPELOAD (klass);
8813 mono_class_init (klass);
8815 if (cfg->generic_sharing_context)
8816 context_used = mono_class_check_context_used (klass);
8819 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8821 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8822 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8826 MonoInst *klass_ins;
8828 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8829 klass, MONO_RGCTX_INFO_KLASS);
8832 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8833 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8835 mini_emit_class_check (cfg, klass_reg, klass);
8837 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8838 ins->type = STACK_MP;
8843 case CEE_MKREFANY: {
8844 MonoInst *loc, *addr;
8847 MONO_INST_NEW (cfg, ins, *ip);
8850 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8851 CHECK_TYPELOAD (klass);
8852 mono_class_init (klass);
8854 if (cfg->generic_sharing_context)
8855 context_used = mono_class_check_context_used (klass);
8857 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8858 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8861 MonoInst *const_ins;
8862 int type_reg = alloc_preg (cfg);
8864 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8865 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8866 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8867 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8868 } else if (cfg->compile_aot) {
8869 int const_reg = alloc_preg (cfg);
8870 int type_reg = alloc_preg (cfg);
8872 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8874 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8877 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8878 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8880 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8882 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8883 ins->type = STACK_VTYPE;
8884 ins->klass = mono_defaults.typed_reference_class;
8891 MonoClass *handle_class;
8893 CHECK_STACK_OVF (1);
8896 n = read32 (ip + 1);
8898 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8899 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8900 handle = mono_method_get_wrapper_data (method, n);
8901 handle_class = mono_method_get_wrapper_data (method, n + 1);
8902 if (handle_class == mono_defaults.typehandle_class)
8903 handle = &((MonoClass*)handle)->byval_arg;
8906 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8910 mono_class_init (handle_class);
8911 if (cfg->generic_sharing_context) {
8912 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8913 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8914 /* This case handles ldtoken
8915 of an open type, like for
8918 } else if (handle_class == mono_defaults.typehandle_class) {
8919 /* If we get a MONO_TYPE_CLASS
8920 then we need to provide the
8922 instantiation of it. */
8923 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8926 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8927 } else if (handle_class == mono_defaults.fieldhandle_class)
8928 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8929 else if (handle_class == mono_defaults.methodhandle_class)
8930 context_used = mono_method_check_context_used (handle);
8932 g_assert_not_reached ();
8935 if ((cfg->opt & MONO_OPT_SHARED) &&
8936 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8937 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8938 MonoInst *addr, *vtvar, *iargs [3];
8939 int method_context_used;
8941 if (cfg->generic_sharing_context)
8942 method_context_used = mono_method_check_context_used (method);
8944 method_context_used = 0;
8946 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8948 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8949 EMIT_NEW_ICONST (cfg, iargs [1], n);
8950 if (method_context_used) {
8951 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8952 method, MONO_RGCTX_INFO_METHOD);
8953 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8955 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8956 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8958 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8960 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8962 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8964 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8965 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8966 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8967 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8968 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8969 MonoClass *tclass = mono_class_from_mono_type (handle);
8971 mono_class_init (tclass);
8973 ins = emit_get_rgctx_klass (cfg, context_used,
8974 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8975 } else if (cfg->compile_aot) {
8976 if (method->wrapper_type) {
8977 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8978 /* Special case for static synchronized wrappers */
8979 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8981 /* FIXME: n is not a normal token */
8982 cfg->disable_aot = TRUE;
8983 EMIT_NEW_PCONST (cfg, ins, NULL);
8986 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8989 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8991 ins->type = STACK_OBJ;
8992 ins->klass = cmethod->klass;
8995 MonoInst *addr, *vtvar;
8997 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9000 if (handle_class == mono_defaults.typehandle_class) {
9001 ins = emit_get_rgctx_klass (cfg, context_used,
9002 mono_class_from_mono_type (handle),
9003 MONO_RGCTX_INFO_TYPE);
9004 } else if (handle_class == mono_defaults.methodhandle_class) {
9005 ins = emit_get_rgctx_method (cfg, context_used,
9006 handle, MONO_RGCTX_INFO_METHOD);
9007 } else if (handle_class == mono_defaults.fieldhandle_class) {
9008 ins = emit_get_rgctx_field (cfg, context_used,
9009 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9011 g_assert_not_reached ();
9013 } else if (cfg->compile_aot) {
9014 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9016 EMIT_NEW_PCONST (cfg, ins, handle);
9018 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9019 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9020 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9030 MONO_INST_NEW (cfg, ins, OP_THROW);
9032 ins->sreg1 = sp [0]->dreg;
9034 bblock->out_of_line = TRUE;
9035 MONO_ADD_INS (bblock, ins);
9036 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9037 MONO_ADD_INS (bblock, ins);
9040 link_bblock (cfg, bblock, end_bblock);
9041 start_new_bblock = 1;
9043 case CEE_ENDFINALLY:
9044 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9045 MONO_ADD_INS (bblock, ins);
9047 start_new_bblock = 1;
9050 * Control will leave the method so empty the stack, otherwise
9051 * the next basic block will start with a nonempty stack.
9053 while (sp != stack_start) {
9061 if (*ip == CEE_LEAVE) {
9063 target = ip + 5 + (gint32)read32(ip + 1);
9066 target = ip + 2 + (signed char)(ip [1]);
9069 /* empty the stack */
9070 while (sp != stack_start) {
9075 * If this leave statement is in a catch block, check for a
9076 * pending exception, and rethrow it if necessary.
9077 * We avoid doing this in runtime invoke wrappers, since those are called
9078 * by native code which excepts the wrapper to catch all exceptions.
9080 for (i = 0; i < header->num_clauses; ++i) {
9081 MonoExceptionClause *clause = &header->clauses [i];
9084 * Use <= in the final comparison to handle clauses with multiple
9085 * leave statements, like in bug #78024.
9086 * The ordering of the exception clauses guarantees that we find the
9089 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9091 MonoBasicBlock *dont_throw;
9096 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9099 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9101 NEW_BBLOCK (cfg, dont_throw);
9104 * Currently, we allways rethrow the abort exception, despite the
9105 * fact that this is not correct. See thread6.cs for an example.
9106 * But propagating the abort exception is more important than
9107 * getting the sematics right.
9109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9110 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9111 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9113 MONO_START_BB (cfg, dont_throw);
9118 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9120 for (tmp = handlers; tmp; tmp = tmp->next) {
9122 link_bblock (cfg, bblock, tblock);
9123 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9124 ins->inst_target_bb = tblock;
9125 MONO_ADD_INS (bblock, ins);
9126 bblock->has_call_handler = 1;
9127 if (COMPILE_LLVM (cfg)) {
9128 MonoBasicBlock *target_bb;
9131 * Link the finally bblock with the target, since it will
9132 * conceptually branch there.
9133 * FIXME: Have to link the bblock containing the endfinally.
9135 GET_BBLOCK (cfg, target_bb, target);
9136 link_bblock (cfg, tblock, target_bb);
9139 g_list_free (handlers);
9142 MONO_INST_NEW (cfg, ins, OP_BR);
9143 MONO_ADD_INS (bblock, ins);
9144 GET_BBLOCK (cfg, tblock, target);
9145 link_bblock (cfg, bblock, tblock);
9146 ins->inst_target_bb = tblock;
9147 start_new_bblock = 1;
9149 if (*ip == CEE_LEAVE)
9158 * Mono specific opcodes
9160 case MONO_CUSTOM_PREFIX: {
9162 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9166 case CEE_MONO_ICALL: {
9168 MonoJitICallInfo *info;
9170 token = read32 (ip + 2);
9171 func = mono_method_get_wrapper_data (method, token);
9172 info = mono_find_jit_icall_by_addr (func);
9175 CHECK_STACK (info->sig->param_count);
9176 sp -= info->sig->param_count;
9178 ins = mono_emit_jit_icall (cfg, info->func, sp);
9179 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9183 inline_costs += 10 * num_calls++;
9187 case CEE_MONO_LDPTR: {
9190 CHECK_STACK_OVF (1);
9192 token = read32 (ip + 2);
9194 ptr = mono_method_get_wrapper_data (method, token);
9195 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9196 MonoJitICallInfo *callinfo;
9197 const char *icall_name;
9199 icall_name = method->name + strlen ("__icall_wrapper_");
9200 g_assert (icall_name);
9201 callinfo = mono_find_jit_icall_by_name (icall_name);
9202 g_assert (callinfo);
9204 if (ptr == callinfo->func) {
9205 /* Will be transformed into an AOTCONST later */
9206 EMIT_NEW_PCONST (cfg, ins, ptr);
9212 /* FIXME: Generalize this */
9213 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9214 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9219 EMIT_NEW_PCONST (cfg, ins, ptr);
9222 inline_costs += 10 * num_calls++;
9223 /* Can't embed random pointers into AOT code */
9224 cfg->disable_aot = 1;
9227 case CEE_MONO_ICALL_ADDR: {
9228 MonoMethod *cmethod;
9231 CHECK_STACK_OVF (1);
9233 token = read32 (ip + 2);
9235 cmethod = mono_method_get_wrapper_data (method, token);
9237 if (cfg->compile_aot) {
9238 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9240 ptr = mono_lookup_internal_call (cmethod);
9242 EMIT_NEW_PCONST (cfg, ins, ptr);
9248 case CEE_MONO_VTADDR: {
9249 MonoInst *src_var, *src;
9255 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9256 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9261 case CEE_MONO_NEWOBJ: {
9262 MonoInst *iargs [2];
9264 CHECK_STACK_OVF (1);
9266 token = read32 (ip + 2);
9267 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9268 mono_class_init (klass);
9269 NEW_DOMAINCONST (cfg, iargs [0]);
9270 MONO_ADD_INS (cfg->cbb, iargs [0]);
9271 NEW_CLASSCONST (cfg, iargs [1], klass);
9272 MONO_ADD_INS (cfg->cbb, iargs [1]);
9273 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9275 inline_costs += 10 * num_calls++;
9278 case CEE_MONO_OBJADDR:
9281 MONO_INST_NEW (cfg, ins, OP_MOVE);
9282 ins->dreg = alloc_preg (cfg);
9283 ins->sreg1 = sp [0]->dreg;
9284 ins->type = STACK_MP;
9285 MONO_ADD_INS (cfg->cbb, ins);
9289 case CEE_MONO_LDNATIVEOBJ:
9291 * Similar to LDOBJ, but instead load the unmanaged
9292 * representation of the vtype to the stack.
9297 token = read32 (ip + 2);
9298 klass = mono_method_get_wrapper_data (method, token);
9299 g_assert (klass->valuetype);
9300 mono_class_init (klass);
9303 MonoInst *src, *dest, *temp;
9306 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9307 temp->backend.is_pinvoke = 1;
9308 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9309 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9311 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9312 dest->type = STACK_VTYPE;
9313 dest->klass = klass;
9319 case CEE_MONO_RETOBJ: {
9321 * Same as RET, but return the native representation of a vtype
9324 g_assert (cfg->ret);
9325 g_assert (mono_method_signature (method)->pinvoke);
9330 token = read32 (ip + 2);
9331 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9333 if (!cfg->vret_addr) {
9334 g_assert (cfg->ret_var_is_local);
9336 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9338 EMIT_NEW_RETLOADA (cfg, ins);
9340 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9342 if (sp != stack_start)
9345 MONO_INST_NEW (cfg, ins, OP_BR);
9346 ins->inst_target_bb = end_bblock;
9347 MONO_ADD_INS (bblock, ins);
9348 link_bblock (cfg, bblock, end_bblock);
9349 start_new_bblock = 1;
9353 case CEE_MONO_CISINST:
9354 case CEE_MONO_CCASTCLASS: {
9359 token = read32 (ip + 2);
9360 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9361 if (ip [1] == CEE_MONO_CISINST)
9362 ins = handle_cisinst (cfg, klass, sp [0]);
9364 ins = handle_ccastclass (cfg, klass, sp [0]);
9370 case CEE_MONO_SAVE_LMF:
9371 case CEE_MONO_RESTORE_LMF:
9372 #ifdef MONO_ARCH_HAVE_LMF_OPS
9373 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9374 MONO_ADD_INS (bblock, ins);
9375 cfg->need_lmf_area = TRUE;
9379 case CEE_MONO_CLASSCONST:
9380 CHECK_STACK_OVF (1);
9382 token = read32 (ip + 2);
9383 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9386 inline_costs += 10 * num_calls++;
9388 case CEE_MONO_NOT_TAKEN:
9389 bblock->out_of_line = TRUE;
9393 CHECK_STACK_OVF (1);
9395 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9396 ins->dreg = alloc_preg (cfg);
9397 ins->inst_offset = (gint32)read32 (ip + 2);
9398 ins->type = STACK_PTR;
9399 MONO_ADD_INS (bblock, ins);
9403 case CEE_MONO_DYN_CALL: {
9406 /* It would be easier to call a trampoline, but that would put an
9407 * extra frame on the stack, confusing exception handling. So
9408 * implement it inline using an opcode for now.
9411 if (!cfg->dyn_call_var) {
9412 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9413 /* prevent it from being register allocated */
9414 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9417 /* Has to use a call inst since it local regalloc expects it */
9418 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9419 ins = (MonoInst*)call;
9421 ins->sreg1 = sp [0]->dreg;
9422 ins->sreg2 = sp [1]->dreg;
9423 MONO_ADD_INS (bblock, ins);
9425 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9426 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9430 inline_costs += 10 * num_calls++;
9435 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9445 /* somewhat similar to LDTOKEN */
9446 MonoInst *addr, *vtvar;
9447 CHECK_STACK_OVF (1);
9448 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9450 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9451 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9453 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9454 ins->type = STACK_VTYPE;
9455 ins->klass = mono_defaults.argumenthandle_class;
9468 * The following transforms:
9469 * CEE_CEQ into OP_CEQ
9470 * CEE_CGT into OP_CGT
9471 * CEE_CGT_UN into OP_CGT_UN
9472 * CEE_CLT into OP_CLT
9473 * CEE_CLT_UN into OP_CLT_UN
9475 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9477 MONO_INST_NEW (cfg, ins, cmp->opcode);
9479 cmp->sreg1 = sp [0]->dreg;
9480 cmp->sreg2 = sp [1]->dreg;
9481 type_from_op (cmp, sp [0], sp [1]);
9483 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9484 cmp->opcode = OP_LCOMPARE;
9485 else if (sp [0]->type == STACK_R8)
9486 cmp->opcode = OP_FCOMPARE;
9488 cmp->opcode = OP_ICOMPARE;
9489 MONO_ADD_INS (bblock, cmp);
9490 ins->type = STACK_I4;
9491 ins->dreg = alloc_dreg (cfg, ins->type);
9492 type_from_op (ins, sp [0], sp [1]);
9494 if (cmp->opcode == OP_FCOMPARE) {
9496 * The backends expect the fceq opcodes to do the
9499 cmp->opcode = OP_NOP;
9500 ins->sreg1 = cmp->sreg1;
9501 ins->sreg2 = cmp->sreg2;
9503 MONO_ADD_INS (bblock, ins);
9510 MonoMethod *cil_method;
9511 gboolean needs_static_rgctx_invoke;
9512 int invoke_context_used = 0;
9514 CHECK_STACK_OVF (1);
9516 n = read32 (ip + 2);
9517 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9520 mono_class_init (cmethod->klass);
9522 mono_save_token_info (cfg, image, n, cmethod);
9524 if (cfg->generic_sharing_context)
9525 context_used = mono_method_check_context_used (cmethod);
9527 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9529 cil_method = cmethod;
9530 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9531 METHOD_ACCESS_FAILURE;
9533 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9534 if (check_linkdemand (cfg, method, cmethod))
9536 CHECK_CFG_EXCEPTION;
9537 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9538 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9542 * Optimize the common case of ldftn+delegate creation
9544 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9545 /* FIXME: SGEN support */
9546 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9547 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9548 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9549 MonoInst *target_ins;
9552 invoke = mono_get_delegate_invoke (ctor_method->klass);
9553 if (!invoke || !mono_method_signature (invoke))
9556 if (cfg->generic_sharing_context)
9557 invoke_context_used = mono_method_check_context_used (invoke);
9559 if (invoke_context_used == 0) {
9561 if (cfg->verbose_level > 3)
9562 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9563 target_ins = sp [-1];
9565 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9566 CHECK_CFG_EXCEPTION;
9575 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9576 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9580 inline_costs += 10 * num_calls++;
9583 case CEE_LDVIRTFTN: {
9588 n = read32 (ip + 2);
9589 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9592 mono_class_init (cmethod->klass);
9594 if (cfg->generic_sharing_context)
9595 context_used = mono_method_check_context_used (cmethod);
9597 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9598 if (check_linkdemand (cfg, method, cmethod))
9600 CHECK_CFG_EXCEPTION;
9601 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9602 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9608 args [1] = emit_get_rgctx_method (cfg, context_used,
9609 cmethod, MONO_RGCTX_INFO_METHOD);
9612 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9614 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9617 inline_costs += 10 * num_calls++;
9621 CHECK_STACK_OVF (1);
9623 n = read16 (ip + 2);
9625 EMIT_NEW_ARGLOAD (cfg, ins, n);
9630 CHECK_STACK_OVF (1);
9632 n = read16 (ip + 2);
9634 NEW_ARGLOADA (cfg, ins, n);
9635 MONO_ADD_INS (cfg->cbb, ins);
9643 n = read16 (ip + 2);
9645 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9647 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9651 CHECK_STACK_OVF (1);
9653 n = read16 (ip + 2);
9655 EMIT_NEW_LOCLOAD (cfg, ins, n);
9660 unsigned char *tmp_ip;
9661 CHECK_STACK_OVF (1);
9663 n = read16 (ip + 2);
9666 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9672 EMIT_NEW_LOCLOADA (cfg, ins, n);
9681 n = read16 (ip + 2);
9683 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9685 emit_stloc_ir (cfg, sp, header, n);
9692 if (sp != stack_start)
9694 if (cfg->method != method)
9696 * Inlining this into a loop in a parent could lead to
9697 * stack overflows which is different behavior than the
9698 * non-inlined case, thus disable inlining in this case.
9700 goto inline_failure;
9702 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9703 ins->dreg = alloc_preg (cfg);
9704 ins->sreg1 = sp [0]->dreg;
9705 ins->type = STACK_PTR;
9706 MONO_ADD_INS (cfg->cbb, ins);
9708 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9710 ins->flags |= MONO_INST_INIT;
9715 case CEE_ENDFILTER: {
9716 MonoExceptionClause *clause, *nearest;
9717 int cc, nearest_num;
9721 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9723 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9724 ins->sreg1 = (*sp)->dreg;
9725 MONO_ADD_INS (bblock, ins);
9726 start_new_bblock = 1;
9731 for (cc = 0; cc < header->num_clauses; ++cc) {
9732 clause = &header->clauses [cc];
9733 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9734 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9735 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9741 if ((ip - header->code) != nearest->handler_offset)
9746 case CEE_UNALIGNED_:
9747 ins_flag |= MONO_INST_UNALIGNED;
9748 /* FIXME: record alignment? we can assume 1 for now */
9753 ins_flag |= MONO_INST_VOLATILE;
9757 ins_flag |= MONO_INST_TAILCALL;
9758 cfg->flags |= MONO_CFG_HAS_TAIL;
9759 /* Can't inline tail calls at this time */
9760 inline_costs += 100000;
9767 token = read32 (ip + 2);
9768 klass = mini_get_class (method, token, generic_context);
9769 CHECK_TYPELOAD (klass);
9770 if (generic_class_is_reference_type (cfg, klass))
9771 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9773 mini_emit_initobj (cfg, *sp, NULL, klass);
9777 case CEE_CONSTRAINED_:
9779 token = read32 (ip + 2);
9780 if (method->wrapper_type != MONO_WRAPPER_NONE)
9781 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9783 constrained_call = mono_class_get_full (image, token, generic_context);
9784 CHECK_TYPELOAD (constrained_call);
9789 MonoInst *iargs [3];
9793 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9794 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9795 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9796 /* emit_memset only works when val == 0 */
9797 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9802 if (ip [1] == CEE_CPBLK) {
9803 MonoMethod *memcpy_method = get_memcpy_method ();
9804 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9806 MonoMethod *memset_method = get_memset_method ();
9807 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9817 ins_flag |= MONO_INST_NOTYPECHECK;
9819 ins_flag |= MONO_INST_NORANGECHECK;
9820 /* we ignore the no-nullcheck for now since we
9821 * really do it explicitly only when doing callvirt->call
9827 int handler_offset = -1;
9829 for (i = 0; i < header->num_clauses; ++i) {
9830 MonoExceptionClause *clause = &header->clauses [i];
9831 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9832 handler_offset = clause->handler_offset;
9837 bblock->flags |= BB_EXCEPTION_UNSAFE;
9839 g_assert (handler_offset != -1);
9841 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9842 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9843 ins->sreg1 = load->dreg;
9844 MONO_ADD_INS (bblock, ins);
9846 link_bblock (cfg, bblock, end_bblock);
9847 start_new_bblock = 1;
9855 CHECK_STACK_OVF (1);
9857 token = read32 (ip + 2);
9858 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9859 MonoType *type = mono_type_create_from_typespec (image, token);
9860 token = mono_type_size (type, &ialign);
9862 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9863 CHECK_TYPELOAD (klass);
9864 mono_class_init (klass);
9865 token = mono_class_value_size (klass, &align);
9867 EMIT_NEW_ICONST (cfg, ins, token);
9872 case CEE_REFANYTYPE: {
9873 MonoInst *src_var, *src;
9879 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9881 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9882 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9883 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9901 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9911 g_warning ("opcode 0x%02x not handled", *ip);
9915 if (start_new_bblock != 1)
9918 bblock->cil_length = ip - bblock->cil_code;
9919 bblock->next_bb = end_bblock;
9921 if (cfg->method == method && cfg->domainvar) {
9923 MonoInst *get_domain;
9925 cfg->cbb = init_localsbb;
9927 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9928 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9931 get_domain->dreg = alloc_preg (cfg);
9932 MONO_ADD_INS (cfg->cbb, get_domain);
9934 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9935 MONO_ADD_INS (cfg->cbb, store);
9938 #ifdef TARGET_POWERPC
9939 if (cfg->compile_aot)
9940 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9941 mono_get_got_var (cfg);
9944 if (cfg->method == method && cfg->got_var)
9945 mono_emit_load_got_addr (cfg);
9950 cfg->cbb = init_localsbb;
9952 for (i = 0; i < header->num_locals; ++i) {
9953 MonoType *ptype = header->locals [i];
9954 int t = ptype->type;
9955 dreg = cfg->locals [i]->dreg;
9957 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9958 t = mono_class_enum_basetype (ptype->data.klass)->type;
9960 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9961 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9962 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9963 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9964 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9965 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9966 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9967 ins->type = STACK_R8;
9968 ins->inst_p0 = (void*)&r8_0;
9969 ins->dreg = alloc_dreg (cfg, STACK_R8);
9970 MONO_ADD_INS (init_localsbb, ins);
9971 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9972 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9973 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9974 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9976 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9981 /* Add a sequence point for method entry/exit events */
9983 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9984 MONO_ADD_INS (init_localsbb, ins);
9985 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9986 MONO_ADD_INS (cfg->bb_exit, ins);
9991 if (cfg->method == method) {
9993 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9994 bb->region = mono_find_block_region (cfg, bb->real_offset);
9996 mono_create_spvar_for_region (cfg, bb->region);
9997 if (cfg->verbose_level > 2)
9998 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10002 g_slist_free (class_inits);
10003 dont_inline = g_list_remove (dont_inline, method);
10005 if (inline_costs < 0) {
10008 /* Method is too large */
10009 mname = mono_method_full_name (method, TRUE);
10010 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10011 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10016 if ((cfg->verbose_level > 2) && (cfg->method == method))
10017 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10019 return inline_costs;
10022 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10023 g_slist_free (class_inits);
10024 dont_inline = g_list_remove (dont_inline, method);
10028 g_slist_free (class_inits);
10029 dont_inline = g_list_remove (dont_inline, method);
10033 g_slist_free (class_inits);
10034 dont_inline = g_list_remove (dont_inline, method);
10035 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10039 g_slist_free (class_inits);
10040 dont_inline = g_list_remove (dont_inline, method);
10041 set_exception_type_from_invalid_il (cfg, method, ip);
10046 store_membase_reg_to_store_membase_imm (int opcode)
10049 case OP_STORE_MEMBASE_REG:
10050 return OP_STORE_MEMBASE_IMM;
10051 case OP_STOREI1_MEMBASE_REG:
10052 return OP_STOREI1_MEMBASE_IMM;
10053 case OP_STOREI2_MEMBASE_REG:
10054 return OP_STOREI2_MEMBASE_IMM;
10055 case OP_STOREI4_MEMBASE_REG:
10056 return OP_STOREI4_MEMBASE_IMM;
10057 case OP_STOREI8_MEMBASE_REG:
10058 return OP_STOREI8_MEMBASE_IMM;
10060 g_assert_not_reached ();
10066 #endif /* DISABLE_JIT */
10069 mono_op_to_op_imm (int opcode)
10073 return OP_IADD_IMM;
10075 return OP_ISUB_IMM;
10077 return OP_IDIV_IMM;
10079 return OP_IDIV_UN_IMM;
10081 return OP_IREM_IMM;
10083 return OP_IREM_UN_IMM;
10085 return OP_IMUL_IMM;
10087 return OP_IAND_IMM;
10091 return OP_IXOR_IMM;
10093 return OP_ISHL_IMM;
10095 return OP_ISHR_IMM;
10097 return OP_ISHR_UN_IMM;
10100 return OP_LADD_IMM;
10102 return OP_LSUB_IMM;
10104 return OP_LAND_IMM;
10108 return OP_LXOR_IMM;
10110 return OP_LSHL_IMM;
10112 return OP_LSHR_IMM;
10114 return OP_LSHR_UN_IMM;
10117 return OP_COMPARE_IMM;
10119 return OP_ICOMPARE_IMM;
10121 return OP_LCOMPARE_IMM;
10123 case OP_STORE_MEMBASE_REG:
10124 return OP_STORE_MEMBASE_IMM;
10125 case OP_STOREI1_MEMBASE_REG:
10126 return OP_STOREI1_MEMBASE_IMM;
10127 case OP_STOREI2_MEMBASE_REG:
10128 return OP_STOREI2_MEMBASE_IMM;
10129 case OP_STOREI4_MEMBASE_REG:
10130 return OP_STOREI4_MEMBASE_IMM;
10132 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10134 return OP_X86_PUSH_IMM;
10135 case OP_X86_COMPARE_MEMBASE_REG:
10136 return OP_X86_COMPARE_MEMBASE_IMM;
10138 #if defined(TARGET_AMD64)
10139 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10140 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10142 case OP_VOIDCALL_REG:
10143 return OP_VOIDCALL;
10151 return OP_LOCALLOC_IMM;
10158 ldind_to_load_membase (int opcode)
10162 return OP_LOADI1_MEMBASE;
10164 return OP_LOADU1_MEMBASE;
10166 return OP_LOADI2_MEMBASE;
10168 return OP_LOADU2_MEMBASE;
10170 return OP_LOADI4_MEMBASE;
10172 return OP_LOADU4_MEMBASE;
10174 return OP_LOAD_MEMBASE;
10175 case CEE_LDIND_REF:
10176 return OP_LOAD_MEMBASE;
10178 return OP_LOADI8_MEMBASE;
10180 return OP_LOADR4_MEMBASE;
10182 return OP_LOADR8_MEMBASE;
10184 g_assert_not_reached ();
10191 stind_to_store_membase (int opcode)
10195 return OP_STOREI1_MEMBASE_REG;
10197 return OP_STOREI2_MEMBASE_REG;
10199 return OP_STOREI4_MEMBASE_REG;
10201 case CEE_STIND_REF:
10202 return OP_STORE_MEMBASE_REG;
10204 return OP_STOREI8_MEMBASE_REG;
10206 return OP_STORER4_MEMBASE_REG;
10208 return OP_STORER8_MEMBASE_REG;
10210 g_assert_not_reached ();
10217 mono_load_membase_to_load_mem (int opcode)
10219 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10220 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10222 case OP_LOAD_MEMBASE:
10223 return OP_LOAD_MEM;
10224 case OP_LOADU1_MEMBASE:
10225 return OP_LOADU1_MEM;
10226 case OP_LOADU2_MEMBASE:
10227 return OP_LOADU2_MEM;
10228 case OP_LOADI4_MEMBASE:
10229 return OP_LOADI4_MEM;
10230 case OP_LOADU4_MEMBASE:
10231 return OP_LOADU4_MEM;
10232 #if SIZEOF_REGISTER == 8
10233 case OP_LOADI8_MEMBASE:
10234 return OP_LOADI8_MEM;
10243 op_to_op_dest_membase (int store_opcode, int opcode)
10245 #if defined(TARGET_X86)
10246 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10251 return OP_X86_ADD_MEMBASE_REG;
10253 return OP_X86_SUB_MEMBASE_REG;
10255 return OP_X86_AND_MEMBASE_REG;
10257 return OP_X86_OR_MEMBASE_REG;
10259 return OP_X86_XOR_MEMBASE_REG;
10262 return OP_X86_ADD_MEMBASE_IMM;
10265 return OP_X86_SUB_MEMBASE_IMM;
10268 return OP_X86_AND_MEMBASE_IMM;
10271 return OP_X86_OR_MEMBASE_IMM;
10274 return OP_X86_XOR_MEMBASE_IMM;
10280 #if defined(TARGET_AMD64)
10281 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10286 return OP_X86_ADD_MEMBASE_REG;
10288 return OP_X86_SUB_MEMBASE_REG;
10290 return OP_X86_AND_MEMBASE_REG;
10292 return OP_X86_OR_MEMBASE_REG;
10294 return OP_X86_XOR_MEMBASE_REG;
10296 return OP_X86_ADD_MEMBASE_IMM;
10298 return OP_X86_SUB_MEMBASE_IMM;
10300 return OP_X86_AND_MEMBASE_IMM;
10302 return OP_X86_OR_MEMBASE_IMM;
10304 return OP_X86_XOR_MEMBASE_IMM;
10306 return OP_AMD64_ADD_MEMBASE_REG;
10308 return OP_AMD64_SUB_MEMBASE_REG;
10310 return OP_AMD64_AND_MEMBASE_REG;
10312 return OP_AMD64_OR_MEMBASE_REG;
10314 return OP_AMD64_XOR_MEMBASE_REG;
10317 return OP_AMD64_ADD_MEMBASE_IMM;
10320 return OP_AMD64_SUB_MEMBASE_IMM;
10323 return OP_AMD64_AND_MEMBASE_IMM;
10326 return OP_AMD64_OR_MEMBASE_IMM;
10329 return OP_AMD64_XOR_MEMBASE_IMM;
10339 op_to_op_store_membase (int store_opcode, int opcode)
10341 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10344 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10345 return OP_X86_SETEQ_MEMBASE;
10347 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10348 return OP_X86_SETNE_MEMBASE;
10356 op_to_op_src1_membase (int load_opcode, int opcode)
10359 /* FIXME: This has sign extension issues */
10361 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10362 return OP_X86_COMPARE_MEMBASE8_IMM;
10365 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10370 return OP_X86_PUSH_MEMBASE;
10371 case OP_COMPARE_IMM:
10372 case OP_ICOMPARE_IMM:
10373 return OP_X86_COMPARE_MEMBASE_IMM;
10376 return OP_X86_COMPARE_MEMBASE_REG;
10380 #ifdef TARGET_AMD64
10381 /* FIXME: This has sign extension issues */
10383 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10384 return OP_X86_COMPARE_MEMBASE8_IMM;
10389 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10390 return OP_X86_PUSH_MEMBASE;
10392 /* FIXME: This only works for 32 bit immediates
10393 case OP_COMPARE_IMM:
10394 case OP_LCOMPARE_IMM:
10395 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10396 return OP_AMD64_COMPARE_MEMBASE_IMM;
10398 case OP_ICOMPARE_IMM:
10399 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10400 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10404 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10405 return OP_AMD64_COMPARE_MEMBASE_REG;
10408 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10409 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10418 op_to_op_src2_membase (int load_opcode, int opcode)
10421 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10427 return OP_X86_COMPARE_REG_MEMBASE;
10429 return OP_X86_ADD_REG_MEMBASE;
10431 return OP_X86_SUB_REG_MEMBASE;
10433 return OP_X86_AND_REG_MEMBASE;
10435 return OP_X86_OR_REG_MEMBASE;
10437 return OP_X86_XOR_REG_MEMBASE;
10441 #ifdef TARGET_AMD64
10444 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10445 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10449 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10450 return OP_AMD64_COMPARE_REG_MEMBASE;
10453 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10454 return OP_X86_ADD_REG_MEMBASE;
10456 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10457 return OP_X86_SUB_REG_MEMBASE;
10459 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10460 return OP_X86_AND_REG_MEMBASE;
10462 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10463 return OP_X86_OR_REG_MEMBASE;
10465 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10466 return OP_X86_XOR_REG_MEMBASE;
10468 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10469 return OP_AMD64_ADD_REG_MEMBASE;
10471 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10472 return OP_AMD64_SUB_REG_MEMBASE;
10474 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10475 return OP_AMD64_AND_REG_MEMBASE;
10477 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10478 return OP_AMD64_OR_REG_MEMBASE;
10480 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10481 return OP_AMD64_XOR_REG_MEMBASE;
10489 mono_op_to_op_imm_noemul (int opcode)
10492 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10497 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10505 return mono_op_to_op_imm (opcode);
10509 #ifndef DISABLE_JIT
10512 * mono_handle_global_vregs:
10514 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10518 mono_handle_global_vregs (MonoCompile *cfg)
10520 gint32 *vreg_to_bb;
10521 MonoBasicBlock *bb;
10524 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10526 #ifdef MONO_ARCH_SIMD_INTRINSICS
10527 if (cfg->uses_simd_intrinsics)
10528 mono_simd_simplify_indirection (cfg);
10531 /* Find local vregs used in more than one bb */
10532 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10533 MonoInst *ins = bb->code;
10534 int block_num = bb->block_num;
10536 if (cfg->verbose_level > 2)
10537 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10540 for (; ins; ins = ins->next) {
10541 const char *spec = INS_INFO (ins->opcode);
10542 int regtype = 0, regindex;
10545 if (G_UNLIKELY (cfg->verbose_level > 2))
10546 mono_print_ins (ins);
10548 g_assert (ins->opcode >= MONO_CEE_LAST);
10550 for (regindex = 0; regindex < 4; regindex ++) {
10553 if (regindex == 0) {
10554 regtype = spec [MONO_INST_DEST];
10555 if (regtype == ' ')
10558 } else if (regindex == 1) {
10559 regtype = spec [MONO_INST_SRC1];
10560 if (regtype == ' ')
10563 } else if (regindex == 2) {
10564 regtype = spec [MONO_INST_SRC2];
10565 if (regtype == ' ')
10568 } else if (regindex == 3) {
10569 regtype = spec [MONO_INST_SRC3];
10570 if (regtype == ' ')
10575 #if SIZEOF_REGISTER == 4
10576 /* In the LLVM case, the long opcodes are not decomposed */
10577 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10579 * Since some instructions reference the original long vreg,
10580 * and some reference the two component vregs, it is quite hard
10581 * to determine when it needs to be global. So be conservative.
10583 if (!get_vreg_to_inst (cfg, vreg)) {
10584 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10586 if (cfg->verbose_level > 2)
10587 printf ("LONG VREG R%d made global.\n", vreg);
10591 * Make the component vregs volatile since the optimizations can
10592 * get confused otherwise.
10594 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10595 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10599 g_assert (vreg != -1);
10601 prev_bb = vreg_to_bb [vreg];
10602 if (prev_bb == 0) {
10603 /* 0 is a valid block num */
10604 vreg_to_bb [vreg] = block_num + 1;
10605 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10606 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10609 if (!get_vreg_to_inst (cfg, vreg)) {
10610 if (G_UNLIKELY (cfg->verbose_level > 2))
10611 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10615 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10618 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10621 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10624 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10627 g_assert_not_reached ();
10631 /* Flag as having been used in more than one bb */
10632 vreg_to_bb [vreg] = -1;
10638 /* If a variable is used in only one bblock, convert it into a local vreg */
10639 for (i = 0; i < cfg->num_varinfo; i++) {
10640 MonoInst *var = cfg->varinfo [i];
10641 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10643 switch (var->type) {
10649 #if SIZEOF_REGISTER == 8
10652 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10653 /* Enabling this screws up the fp stack on x86 */
10656 /* Arguments are implicitly global */
10657 /* Putting R4 vars into registers doesn't work currently */
10658 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10660 * Make that the variable's liveness interval doesn't contain a call, since
10661 * that would cause the lvreg to be spilled, making the whole optimization
10664 /* This is too slow for JIT compilation */
10666 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10668 int def_index, call_index, ins_index;
10669 gboolean spilled = FALSE;
10674 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10675 const char *spec = INS_INFO (ins->opcode);
10677 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10678 def_index = ins_index;
10680 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10681 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10682 if (call_index > def_index) {
10688 if (MONO_IS_CALL (ins))
10689 call_index = ins_index;
10699 if (G_UNLIKELY (cfg->verbose_level > 2))
10700 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10701 var->flags |= MONO_INST_IS_DEAD;
10702 cfg->vreg_to_inst [var->dreg] = NULL;
10709 * Compress the varinfo and vars tables so the liveness computation is faster and
10710 * takes up less space.
10713 for (i = 0; i < cfg->num_varinfo; ++i) {
10714 MonoInst *var = cfg->varinfo [i];
10715 if (pos < i && cfg->locals_start == i)
10716 cfg->locals_start = pos;
10717 if (!(var->flags & MONO_INST_IS_DEAD)) {
10719 cfg->varinfo [pos] = cfg->varinfo [i];
10720 cfg->varinfo [pos]->inst_c0 = pos;
10721 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10722 cfg->vars [pos].idx = pos;
10723 #if SIZEOF_REGISTER == 4
10724 if (cfg->varinfo [pos]->type == STACK_I8) {
10725 /* Modify the two component vars too */
10728 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10729 var1->inst_c0 = pos;
10730 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10731 var1->inst_c0 = pos;
10738 cfg->num_varinfo = pos;
10739 if (cfg->locals_start > cfg->num_varinfo)
10740 cfg->locals_start = cfg->num_varinfo;
10744 * mono_spill_global_vars:
10746 * Generate spill code for variables which are not allocated to registers,
10747 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10748 * code is generated which could be optimized by the local optimization passes.
10751 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10753 MonoBasicBlock *bb;
10755 int orig_next_vreg;
10756 guint32 *vreg_to_lvreg;
10758 guint32 i, lvregs_len;
10759 gboolean dest_has_lvreg = FALSE;
10760 guint32 stacktypes [128];
10761 MonoInst **live_range_start, **live_range_end;
10762 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10764 *need_local_opts = FALSE;
10766 memset (spec2, 0, sizeof (spec2));
10768 /* FIXME: Move this function to mini.c */
10769 stacktypes ['i'] = STACK_PTR;
10770 stacktypes ['l'] = STACK_I8;
10771 stacktypes ['f'] = STACK_R8;
10772 #ifdef MONO_ARCH_SIMD_INTRINSICS
10773 stacktypes ['x'] = STACK_VTYPE;
10776 #if SIZEOF_REGISTER == 4
10777 /* Create MonoInsts for longs */
10778 for (i = 0; i < cfg->num_varinfo; i++) {
10779 MonoInst *ins = cfg->varinfo [i];
10781 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10782 switch (ins->type) {
10783 #ifdef MONO_ARCH_SOFT_FLOAT
10789 g_assert (ins->opcode == OP_REGOFFSET);
10791 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10793 tree->opcode = OP_REGOFFSET;
10794 tree->inst_basereg = ins->inst_basereg;
10795 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10797 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10799 tree->opcode = OP_REGOFFSET;
10800 tree->inst_basereg = ins->inst_basereg;
10801 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10811 /* FIXME: widening and truncation */
10814 * As an optimization, when a variable allocated to the stack is first loaded into
10815 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10816 * the variable again.
10818 orig_next_vreg = cfg->next_vreg;
10819 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10820 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10824 * These arrays contain the first and last instructions accessing a given
10826 * Since we emit bblocks in the same order we process them here, and we
10827 * don't split live ranges, these will precisely describe the live range of
10828 * the variable, i.e. the instruction range where a valid value can be found
10829 * in the variables location.
10831 /* FIXME: Only do this if debugging info is requested */
10832 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10833 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10834 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10835 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10837 /* Add spill loads/stores */
10838 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10841 if (cfg->verbose_level > 2)
10842 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10844 /* Clear vreg_to_lvreg array */
10845 for (i = 0; i < lvregs_len; i++)
10846 vreg_to_lvreg [lvregs [i]] = 0;
10850 MONO_BB_FOR_EACH_INS (bb, ins) {
10851 const char *spec = INS_INFO (ins->opcode);
10852 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10853 gboolean store, no_lvreg;
10854 int sregs [MONO_MAX_SRC_REGS];
10856 if (G_UNLIKELY (cfg->verbose_level > 2))
10857 mono_print_ins (ins);
10859 if (ins->opcode == OP_NOP)
10863 * We handle LDADDR here as well, since it can only be decomposed
10864 * when variable addresses are known.
10866 if (ins->opcode == OP_LDADDR) {
10867 MonoInst *var = ins->inst_p0;
10869 if (var->opcode == OP_VTARG_ADDR) {
10870 /* Happens on SPARC/S390 where vtypes are passed by reference */
10871 MonoInst *vtaddr = var->inst_left;
10872 if (vtaddr->opcode == OP_REGVAR) {
10873 ins->opcode = OP_MOVE;
10874 ins->sreg1 = vtaddr->dreg;
10876 else if (var->inst_left->opcode == OP_REGOFFSET) {
10877 ins->opcode = OP_LOAD_MEMBASE;
10878 ins->inst_basereg = vtaddr->inst_basereg;
10879 ins->inst_offset = vtaddr->inst_offset;
10883 g_assert (var->opcode == OP_REGOFFSET);
10885 ins->opcode = OP_ADD_IMM;
10886 ins->sreg1 = var->inst_basereg;
10887 ins->inst_imm = var->inst_offset;
10890 *need_local_opts = TRUE;
10891 spec = INS_INFO (ins->opcode);
10894 if (ins->opcode < MONO_CEE_LAST) {
10895 mono_print_ins (ins);
10896 g_assert_not_reached ();
10900 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10904 if (MONO_IS_STORE_MEMBASE (ins)) {
10905 tmp_reg = ins->dreg;
10906 ins->dreg = ins->sreg2;
10907 ins->sreg2 = tmp_reg;
10910 spec2 [MONO_INST_DEST] = ' ';
10911 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10912 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10913 spec2 [MONO_INST_SRC3] = ' ';
10915 } else if (MONO_IS_STORE_MEMINDEX (ins))
10916 g_assert_not_reached ();
10921 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10922 printf ("\t %.3s %d", spec, ins->dreg);
10923 num_sregs = mono_inst_get_src_registers (ins, sregs);
10924 for (srcindex = 0; srcindex < 3; ++srcindex)
10925 printf (" %d", sregs [srcindex]);
10932 regtype = spec [MONO_INST_DEST];
10933 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10936 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10937 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10938 MonoInst *store_ins;
10940 MonoInst *def_ins = ins;
10941 int dreg = ins->dreg; /* The original vreg */
10943 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10945 if (var->opcode == OP_REGVAR) {
10946 ins->dreg = var->dreg;
10947 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10949 * Instead of emitting a load+store, use a _membase opcode.
10951 g_assert (var->opcode == OP_REGOFFSET);
10952 if (ins->opcode == OP_MOVE) {
10956 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10957 ins->inst_basereg = var->inst_basereg;
10958 ins->inst_offset = var->inst_offset;
10961 spec = INS_INFO (ins->opcode);
10965 g_assert (var->opcode == OP_REGOFFSET);
10967 prev_dreg = ins->dreg;
10969 /* Invalidate any previous lvreg for this vreg */
10970 vreg_to_lvreg [ins->dreg] = 0;
10974 #ifdef MONO_ARCH_SOFT_FLOAT
10975 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10977 store_opcode = OP_STOREI8_MEMBASE_REG;
10981 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10983 if (regtype == 'l') {
10984 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10985 mono_bblock_insert_after_ins (bb, ins, store_ins);
10986 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10987 mono_bblock_insert_after_ins (bb, ins, store_ins);
10988 def_ins = store_ins;
10991 g_assert (store_opcode != OP_STOREV_MEMBASE);
10993 /* Try to fuse the store into the instruction itself */
10994 /* FIXME: Add more instructions */
10995 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10996 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10997 ins->inst_imm = ins->inst_c0;
10998 ins->inst_destbasereg = var->inst_basereg;
10999 ins->inst_offset = var->inst_offset;
11000 spec = INS_INFO (ins->opcode);
11001 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11002 ins->opcode = store_opcode;
11003 ins->inst_destbasereg = var->inst_basereg;
11004 ins->inst_offset = var->inst_offset;
11008 tmp_reg = ins->dreg;
11009 ins->dreg = ins->sreg2;
11010 ins->sreg2 = tmp_reg;
11013 spec2 [MONO_INST_DEST] = ' ';
11014 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11015 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11016 spec2 [MONO_INST_SRC3] = ' ';
11018 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11019 // FIXME: The backends expect the base reg to be in inst_basereg
11020 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11022 ins->inst_basereg = var->inst_basereg;
11023 ins->inst_offset = var->inst_offset;
11024 spec = INS_INFO (ins->opcode);
11026 /* printf ("INS: "); mono_print_ins (ins); */
11027 /* Create a store instruction */
11028 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11030 /* Insert it after the instruction */
11031 mono_bblock_insert_after_ins (bb, ins, store_ins);
11033 def_ins = store_ins;
11036 * We can't assign ins->dreg to var->dreg here, since the
11037 * sregs could use it. So set a flag, and do it after
11040 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11041 dest_has_lvreg = TRUE;
11046 if (def_ins && !live_range_start [dreg]) {
11047 live_range_start [dreg] = def_ins;
11048 live_range_start_bb [dreg] = bb;
11055 num_sregs = mono_inst_get_src_registers (ins, sregs);
11056 for (srcindex = 0; srcindex < 3; ++srcindex) {
11057 regtype = spec [MONO_INST_SRC1 + srcindex];
11058 sreg = sregs [srcindex];
11060 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11061 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11062 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11063 MonoInst *use_ins = ins;
11064 MonoInst *load_ins;
11065 guint32 load_opcode;
11067 if (var->opcode == OP_REGVAR) {
11068 sregs [srcindex] = var->dreg;
11069 //mono_inst_set_src_registers (ins, sregs);
11070 live_range_end [sreg] = use_ins;
11071 live_range_end_bb [sreg] = bb;
11075 g_assert (var->opcode == OP_REGOFFSET);
11077 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11079 g_assert (load_opcode != OP_LOADV_MEMBASE);
11081 if (vreg_to_lvreg [sreg]) {
11082 g_assert (vreg_to_lvreg [sreg] != -1);
11084 /* The variable is already loaded to an lvreg */
11085 if (G_UNLIKELY (cfg->verbose_level > 2))
11086 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11087 sregs [srcindex] = vreg_to_lvreg [sreg];
11088 //mono_inst_set_src_registers (ins, sregs);
11092 /* Try to fuse the load into the instruction */
11093 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11094 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11095 sregs [0] = var->inst_basereg;
11096 //mono_inst_set_src_registers (ins, sregs);
11097 ins->inst_offset = var->inst_offset;
11098 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11099 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11100 sregs [1] = var->inst_basereg;
11101 //mono_inst_set_src_registers (ins, sregs);
11102 ins->inst_offset = var->inst_offset;
11104 if (MONO_IS_REAL_MOVE (ins)) {
11105 ins->opcode = OP_NOP;
11108 //printf ("%d ", srcindex); mono_print_ins (ins);
11110 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11112 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11113 if (var->dreg == prev_dreg) {
11115 * sreg refers to the value loaded by the load
11116 * emitted below, but we need to use ins->dreg
11117 * since it refers to the store emitted earlier.
11121 g_assert (sreg != -1);
11122 vreg_to_lvreg [var->dreg] = sreg;
11123 g_assert (lvregs_len < 1024);
11124 lvregs [lvregs_len ++] = var->dreg;
11128 sregs [srcindex] = sreg;
11129 //mono_inst_set_src_registers (ins, sregs);
11131 if (regtype == 'l') {
11132 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11133 mono_bblock_insert_before_ins (bb, ins, load_ins);
11134 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11135 mono_bblock_insert_before_ins (bb, ins, load_ins);
11136 use_ins = load_ins;
11139 #if SIZEOF_REGISTER == 4
11140 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11142 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11143 mono_bblock_insert_before_ins (bb, ins, load_ins);
11144 use_ins = load_ins;
11148 if (var->dreg < orig_next_vreg) {
11149 live_range_end [var->dreg] = use_ins;
11150 live_range_end_bb [var->dreg] = bb;
11154 mono_inst_set_src_registers (ins, sregs);
11156 if (dest_has_lvreg) {
11157 g_assert (ins->dreg != -1);
11158 vreg_to_lvreg [prev_dreg] = ins->dreg;
11159 g_assert (lvregs_len < 1024);
11160 lvregs [lvregs_len ++] = prev_dreg;
11161 dest_has_lvreg = FALSE;
11165 tmp_reg = ins->dreg;
11166 ins->dreg = ins->sreg2;
11167 ins->sreg2 = tmp_reg;
11170 if (MONO_IS_CALL (ins)) {
11171 /* Clear vreg_to_lvreg array */
11172 for (i = 0; i < lvregs_len; i++)
11173 vreg_to_lvreg [lvregs [i]] = 0;
11175 } else if (ins->opcode == OP_NOP) {
11177 MONO_INST_NULLIFY_SREGS (ins);
11180 if (cfg->verbose_level > 2)
11181 mono_print_ins_index (1, ins);
11185 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11187 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11188 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11190 for (i = 0; i < cfg->num_varinfo; ++i) {
11191 int vreg = MONO_VARINFO (cfg, i)->vreg;
11194 if (live_range_start [vreg]) {
11195 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11197 ins->inst_c1 = vreg;
11198 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11200 if (live_range_end [vreg]) {
11201 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11203 ins->inst_c1 = vreg;
11204 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11209 g_free (live_range_start);
11210 g_free (live_range_end);
11211 g_free (live_range_start_bb);
11212 g_free (live_range_end_bb);
11217 * - use 'iadd' instead of 'int_add'
11218 * - handling ovf opcodes: decompose in method_to_ir.
11219 * - unify iregs/fregs
11220 * -> partly done, the missing parts are:
11221 * - a more complete unification would involve unifying the hregs as well, so
11222 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11223 * would no longer map to the machine hregs, so the code generators would need to
11224 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11225 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11226 * fp/non-fp branches speeds it up by about 15%.
11227 * - use sext/zext opcodes instead of shifts
11229 * - get rid of TEMPLOADs if possible and use vregs instead
11230 * - clean up usage of OP_P/OP_ opcodes
11231 * - cleanup usage of DUMMY_USE
11232 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11234 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11235 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11236 * - make sure handle_stack_args () is called before the branch is emitted
11237 * - when the new IR is done, get rid of all unused stuff
11238 * - COMPARE/BEQ as separate instructions or unify them ?
11239 * - keeping them separate allows specialized compare instructions like
11240 * compare_imm, compare_membase
11241 * - most back ends unify fp compare+branch, fp compare+ceq
11242 * - integrate mono_save_args into inline_method
11243 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11244 * - handle long shift opts on 32 bit platforms somehow: they require
11245 * 3 sregs (2 for arg1 and 1 for arg2)
11246 * - make byref a 'normal' type.
11247 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11248 * variable if needed.
11249 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11250 * like inline_method.
11251 * - remove inlining restrictions
11252 * - fix LNEG and enable cfold of INEG
11253 * - generalize x86 optimizations like ldelema as a peephole optimization
11254 * - add store_mem_imm for amd64
11255 * - optimize the loading of the interruption flag in the managed->native wrappers
11256 * - avoid special handling of OP_NOP in passes
11257 * - move code inserting instructions into one function/macro.
11258 * - try a coalescing phase after liveness analysis
11259 * - add float -> vreg conversion + local optimizations on !x86
11260 * - figure out how to handle decomposed branches during optimizations, ie.
11261 * compare+branch, op_jump_table+op_br etc.
11262 * - promote RuntimeXHandles to vregs
11263 * - vtype cleanups:
11264 * - add a NEW_VARLOADA_VREG macro
11265 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11266 * accessing vtype fields.
11267 * - get rid of I8CONST on 64 bit platforms
11268 * - dealing with the increase in code size due to branches created during opcode
11270 * - use extended basic blocks
11271 * - all parts of the JIT
11272 * - handle_global_vregs () && local regalloc
11273 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11274 * - sources of increase in code size:
11277 * - isinst and castclass
11278 * - lvregs not allocated to global registers even if used multiple times
11279 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11281 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11282 * - add all micro optimizations from the old JIT
11283 * - put tree optimizations into the deadce pass
11284 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11285 * specific function.
11286 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11287 * fcompare + branchCC.
11288 * - create a helper function for allocating a stack slot, taking into account
11289 * MONO_CFG_HAS_SPILLUP.
11291 * - merge the ia64 switch changes.
11292 * - optimize mono_regstate2_alloc_int/float.
11293 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11294 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11295 * parts of the tree could be separated by other instructions, killing the tree
11296 * arguments, or stores killing loads etc. Also, should we fold loads into other
11297 * instructions if the result of the load is used multiple times ?
11298 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11299 * - LAST MERGE: 108395.
11300 * - when returning vtypes in registers, generate IR and append it to the end of the
11301 * last bb instead of doing it in the epilog.
11302 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11310 - When to decompose opcodes:
11311 - earlier: this makes some optimizations hard to implement, since the low level IR
11312 no longer contains the neccessary information. But it is easier to do.
11313 - later: harder to implement, enables more optimizations.
11314 - Branches inside bblocks:
11315 - created when decomposing complex opcodes.
11316 - branches to another bblock: harmless, but not tracked by the branch
11317 optimizations, so need to branch to a label at the start of the bblock.
11318 - branches to inside the same bblock: very problematic, trips up the local
11319 reg allocator. Can be fixed by spitting the current bblock, but that is a
11320 complex operation, since some local vregs can become global vregs etc.
11321 - Local/global vregs:
11322 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11323 local register allocator.
11324 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11325 structure, created by mono_create_var (). Assigned to hregs or the stack by
11326 the global register allocator.
11327 - When to do optimizations like alu->alu_imm:
11328 - earlier -> saves work later on since the IR will be smaller/simpler
11329 - later -> can work on more instructions
11330 - Handling of valuetypes:
11331 - When a vtype is pushed on the stack, a new temporary is created, an
11332 instruction computing its address (LDADDR) is emitted and pushed on
11333 the stack. Need to optimize cases when the vtype is used immediately as in
11334 argument passing, stloc etc.
11335 - Instead of the to_end stuff in the old JIT, simply call the function handling
11336 the values on the stack before emitting the last instruction of the bb.
11339 #endif /* DISABLE_JIT */