2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethodHeader *header = cfg->header;
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethodHeader *header = cfg->header;
497 MonoExceptionClause *clause;
501 for (i = 0; i < header->num_clauses; ++i) {
502 clause = &header->clauses [i];
503 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
504 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
505 if (clause->flags == type)
506 res = g_list_append (res, clause);
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
561 inst->klass = klass = mono_class_from_mono_type (type);
563 inst->type = STACK_MP;
568 switch (type->type) {
570 inst->type = STACK_INV;
574 case MONO_TYPE_BOOLEAN:
580 inst->type = STACK_I4;
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
597 inst->type = STACK_I8;
601 inst->type = STACK_R8;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
609 inst->type = STACK_VTYPE;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
783 ins->opcode = OP_ICOMPARE;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
800 ins->opcode += beqops_op_map [src1->type];
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
833 ins->type = STACK_R8;
834 switch (src1->type) {
837 ins->opcode = OP_ICONV_TO_R_UN;
840 ins->opcode = OP_LCONV_TO_R_UN;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
868 ins->type = STACK_PTR;
869 switch (src1->type) {
871 ins->opcode = OP_ICONV_TO_U;
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
878 ins->opcode = OP_MOVE;
882 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_FCONV_TO_U;
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
910 ins->type = STACK_R8;
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
969 param_table [STACK_MAX] [STACK_MAX] = {
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
978 switch (args->type) {
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
993 if (!sig->params [i]->byref)
997 if (sig->params [i]->byref)
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1011 if (sig->params [i]->byref)
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 t = mono_type_get_underlying_type (t);
1099 case MONO_TYPE_BOOLEAN:
1102 case MONO_TYPE_CHAR:
1109 case MONO_TYPE_FNPTR:
1111 case MONO_TYPE_CLASS:
1112 case MONO_TYPE_STRING:
1113 case MONO_TYPE_OBJECT:
1114 case MONO_TYPE_SZARRAY:
1115 case MONO_TYPE_ARRAY:
1123 case MONO_TYPE_VALUETYPE:
1124 case MONO_TYPE_TYPEDBYREF:
1126 case MONO_TYPE_GENERICINST:
1127 if (mono_type_generic_inst_is_valuetype (t))
1133 g_assert_not_reached ();
1140 array_access_to_klass (int opcode)
1144 return mono_defaults.byte_class;
1146 return mono_defaults.uint16_class;
1149 return mono_defaults.int_class;
1152 return mono_defaults.sbyte_class;
1155 return mono_defaults.int16_class;
1158 return mono_defaults.int32_class;
1160 return mono_defaults.uint32_class;
1163 return mono_defaults.int64_class;
1166 return mono_defaults.single_class;
1169 return mono_defaults.double_class;
1170 case CEE_LDELEM_REF:
1171 case CEE_STELEM_REF:
1172 return mono_defaults.object_class;
1174 g_assert_not_reached ();
1180 * We try to share variables when possible
1183 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1188 /* inlining can result in deeper stacks */
1189 if (slot >= cfg->header->max_stack)
1190 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1192 pos = ins->type - 1 + slot * STACK_MAX;
1194 switch (ins->type) {
1201 if ((vnum = cfg->intvars [pos]))
1202 return cfg->varinfo [vnum];
1203 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1204 cfg->intvars [pos] = res->inst_c0;
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1213 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1216 * Don't use this if a generic_context is set, since that means AOT can't
1217 * look up the method using just the image+token.
1218 * table == 0 means this is a reference made from a wrapper.
1220 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1221 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1222 jump_info_token->image = image;
1223 jump_info_token->token = token;
1224 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1229 * This function is called to handle items that are left on the evaluation stack
1230 * at basic block boundaries. What happens is that we save the values to local variables
1231 * and we reload them later when first entering the target basic block (with the
1232 * handle_loaded_temps () function).
1233 * A single joint point will use the same variables (stored in the array bb->out_stack or
1234 * bb->in_stack, if the basic block is before or after the joint point).
1236 * This function needs to be called _before_ emitting the last instruction of
1237 * the bb (i.e. before emitting a branch).
1238 * If the stack merge fails at a join point, cfg->unverifiable is set.
1241 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1244 MonoBasicBlock *bb = cfg->cbb;
1245 MonoBasicBlock *outb;
1246 MonoInst *inst, **locals;
1251 if (cfg->verbose_level > 3)
1252 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1253 if (!bb->out_scount) {
1254 bb->out_scount = count;
1255 //printf ("bblock %d has out:", bb->block_num);
1257 for (i = 0; i < bb->out_count; ++i) {
1258 outb = bb->out_bb [i];
1259 /* exception handlers are linked, but they should not be considered for stack args */
1260 if (outb->flags & BB_EXCEPTION_HANDLER)
1262 //printf (" %d", outb->block_num);
1263 if (outb->in_stack) {
1265 bb->out_stack = outb->in_stack;
1271 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1272 for (i = 0; i < count; ++i) {
1274 * try to reuse temps already allocated for this purpouse, if they occupy the same
1275 * stack slot and if they are of the same type.
1276 * This won't cause conflicts since if 'local' is used to
1277 * store one of the values in the in_stack of a bblock, then
1278 * the same variable will be used for the same outgoing stack
1280 * This doesn't work when inlining methods, since the bblocks
1281 * in the inlined methods do not inherit their in_stack from
1282 * the bblock they are inlined to. See bug #58863 for an
1285 if (cfg->inlined_method)
1286 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1288 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1293 for (i = 0; i < bb->out_count; ++i) {
1294 outb = bb->out_bb [i];
1295 /* exception handlers are linked, but they should not be considered for stack args */
1296 if (outb->flags & BB_EXCEPTION_HANDLER)
1298 if (outb->in_scount) {
1299 if (outb->in_scount != bb->out_scount) {
1300 cfg->unverifiable = TRUE;
1303 continue; /* check they are the same locals */
1305 outb->in_scount = count;
1306 outb->in_stack = bb->out_stack;
1309 locals = bb->out_stack;
1311 for (i = 0; i < count; ++i) {
1312 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1313 inst->cil_code = sp [i]->cil_code;
1314 sp [i] = locals [i];
1315 if (cfg->verbose_level > 3)
1316 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1320 * It is possible that the out bblocks already have in_stack assigned, and
1321 * the in_stacks differ. In this case, we will store to all the different
1328 /* Find a bblock which has a different in_stack */
1330 while (bindex < bb->out_count) {
1331 outb = bb->out_bb [bindex];
1332 /* exception handlers are linked, but they should not be considered for stack args */
1333 if (outb->flags & BB_EXCEPTION_HANDLER) {
1337 if (outb->in_stack != locals) {
1338 for (i = 0; i < count; ++i) {
1339 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1340 inst->cil_code = sp [i]->cil_code;
1341 sp [i] = locals [i];
1342 if (cfg->verbose_level > 3)
1343 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1345 locals = outb->in_stack;
1354 /* Emit code which loads interface_offsets [klass->interface_id]
1355 * The array is stored in memory before vtable.
1358 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1360 if (cfg->compile_aot) {
1361 int ioffset_reg = alloc_preg (cfg);
1362 int iid_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1374 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1376 int ibitmap_reg = alloc_preg (cfg);
1377 #ifdef COMPRESSED_INTERFACE_BITMAP
1379 MonoInst *res, *ins;
1380 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1381 MONO_ADD_INS (cfg->cbb, ins);
1383 if (cfg->compile_aot)
1384 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1386 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1387 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1388 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1390 int ibitmap_byte_reg = alloc_preg (cfg);
1392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1394 if (cfg->compile_aot) {
1395 int iid_reg = alloc_preg (cfg);
1396 int shifted_iid_reg = alloc_preg (cfg);
1397 int ibitmap_byte_address_reg = alloc_preg (cfg);
1398 int masked_iid_reg = alloc_preg (cfg);
1399 int iid_one_bit_reg = alloc_preg (cfg);
1400 int iid_bit_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1406 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1417 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1418 * stored in "klass_reg" implements the interface "klass".
1421 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1423 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1427 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1428 * stored in "vtable_reg" implements the interface "klass".
1431 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1433 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1437 * Emit code which checks whenever the interface id of @klass is smaller than
1438 * than the value given by max_iid_reg.
1441 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1442 MonoBasicBlock *false_target)
1444 if (cfg->compile_aot) {
1445 int iid_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1454 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1457 /* Same as above, but obtains max_iid from a vtable */
1459 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 int max_iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1465 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1468 /* Same as above, but obtains max_iid from a klass */
1470 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1471 MonoBasicBlock *false_target)
1473 int max_iid_reg = alloc_preg (cfg);
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1476 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1482 int idepth_reg = alloc_preg (cfg);
1483 int stypes_reg = alloc_preg (cfg);
1484 int stype = alloc_preg (cfg);
1486 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1495 } else if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1547 } else if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1560 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1564 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1566 if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1577 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1580 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1583 int rank_reg = alloc_preg (cfg);
1584 int eclass_reg = alloc_preg (cfg);
1586 g_assert (!klass_inst);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1589 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1590 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1592 if (klass->cast_class == mono_defaults.object_class) {
1593 int parent_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1595 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1596 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1597 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1598 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class == mono_defaults.enum_class) {
1601 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1602 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1603 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1605 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1606 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1609 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1610 /* Check that the object is a vector too */
1611 int bounds_reg = alloc_preg (cfg);
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 int idepth_reg = alloc_preg (cfg);
1618 int stypes_reg = alloc_preg (cfg);
1619 int stype = alloc_preg (cfg);
1621 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1628 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1633 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1635 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1639 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1643 g_assert (val == 0);
1648 if ((size <= 4) && (size <= align)) {
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1659 #if SIZEOF_REGISTER == 8
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1667 val_reg = alloc_preg (cfg);
1669 if (SIZEOF_REGISTER == 8)
1670 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1672 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1675 /* This could be optimized further if neccesary */
1677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1684 #if !NO_UNALIGNED_ACCESS
1685 if (SIZEOF_REGISTER == 8) {
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1716 #endif /* DISABLE_JIT */
1719 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1726 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1727 g_assert (size < 10000);
1730 /* This could be optimized further if neccesary */
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1755 cur_reg = alloc_preg (cfg);
1756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1763 cur_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1765 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1771 cur_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1783 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1786 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 type = mini_get_basic_type_from_generic (gsctx, type);
1790 switch (type->type) {
1791 case MONO_TYPE_VOID:
1792 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1795 case MONO_TYPE_BOOLEAN:
1798 case MONO_TYPE_CHAR:
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1805 case MONO_TYPE_FNPTR:
1806 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 case MONO_TYPE_CLASS:
1808 case MONO_TYPE_STRING:
1809 case MONO_TYPE_OBJECT:
1810 case MONO_TYPE_SZARRAY:
1811 case MONO_TYPE_ARRAY:
1812 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1815 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1818 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1819 case MONO_TYPE_VALUETYPE:
1820 if (type->data.klass->enumtype) {
1821 type = mono_class_enum_basetype (type->data.klass);
1824 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1825 case MONO_TYPE_TYPEDBYREF:
1826 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1827 case MONO_TYPE_GENERICINST:
1828 type = &type->data.generic_class->container_class->byval_arg;
1831 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1837 * target_type_is_incompatible:
1838 * @cfg: MonoCompile context
1840 * Check that the item @arg on the evaluation stack can be stored
1841 * in the target type (can be a local, or field, etc).
1842 * The cfg arg can be used to check if we need verification or just
1845 * Returns: non-0 value if arg can't be stored on a target.
1848 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1850 MonoType *simple_type;
1853 if (target->byref) {
1854 /* FIXME: check that the pointed to types match */
1855 if (arg->type == STACK_MP)
1856 return arg->klass != mono_class_from_mono_type (target);
1857 if (arg->type == STACK_PTR)
1862 simple_type = mono_type_get_underlying_type (target);
1863 switch (simple_type->type) {
1864 case MONO_TYPE_VOID:
1868 case MONO_TYPE_BOOLEAN:
1871 case MONO_TYPE_CHAR:
1874 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1878 /* STACK_MP is needed when setting pinned locals */
1879 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1884 case MONO_TYPE_FNPTR:
1885 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1888 case MONO_TYPE_CLASS:
1889 case MONO_TYPE_STRING:
1890 case MONO_TYPE_OBJECT:
1891 case MONO_TYPE_SZARRAY:
1892 case MONO_TYPE_ARRAY:
1893 if (arg->type != STACK_OBJ)
1895 /* FIXME: check type compatibility */
1899 if (arg->type != STACK_I8)
1904 if (arg->type != STACK_R8)
1907 case MONO_TYPE_VALUETYPE:
1908 if (arg->type != STACK_VTYPE)
1910 klass = mono_class_from_mono_type (simple_type);
1911 if (klass != arg->klass)
1914 case MONO_TYPE_TYPEDBYREF:
1915 if (arg->type != STACK_VTYPE)
1917 klass = mono_class_from_mono_type (simple_type);
1918 if (klass != arg->klass)
1921 case MONO_TYPE_GENERICINST:
1922 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1930 if (arg->type != STACK_OBJ)
1932 /* FIXME: check type compatibility */
1936 case MONO_TYPE_MVAR:
1937 /* FIXME: all the arguments must be references for now,
1938 * later look inside cfg and see if the arg num is
1939 * really a reference
1941 g_assert (cfg->generic_sharing_context);
1942 if (arg->type != STACK_OBJ)
1946 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1952 * Prepare arguments for passing to a function call.
1953 * Return a non-zero value if the arguments can't be passed to the given
1955 * The type checks are not yet complete and some conversions may need
1956 * casts on 32 or 64 bit architectures.
1958 * FIXME: implement this using target_type_is_incompatible ()
1961 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1963 MonoType *simple_type;
1967 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1971 for (i = 0; i < sig->param_count; ++i) {
1972 if (sig->params [i]->byref) {
1973 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1977 simple_type = sig->params [i];
1978 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1980 switch (simple_type->type) {
1981 case MONO_TYPE_VOID:
1986 case MONO_TYPE_BOOLEAN:
1989 case MONO_TYPE_CHAR:
1992 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1998 case MONO_TYPE_FNPTR:
1999 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2002 case MONO_TYPE_CLASS:
2003 case MONO_TYPE_STRING:
2004 case MONO_TYPE_OBJECT:
2005 case MONO_TYPE_SZARRAY:
2006 case MONO_TYPE_ARRAY:
2007 if (args [i]->type != STACK_OBJ)
2012 if (args [i]->type != STACK_I8)
2017 if (args [i]->type != STACK_R8)
2020 case MONO_TYPE_VALUETYPE:
2021 if (simple_type->data.klass->enumtype) {
2022 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2025 if (args [i]->type != STACK_VTYPE)
2028 case MONO_TYPE_TYPEDBYREF:
2029 if (args [i]->type != STACK_VTYPE)
2032 case MONO_TYPE_GENERICINST:
2033 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2037 g_error ("unknown type 0x%02x in check_call_signature",
2045 callvirt_to_call (int opcode)
2050 case OP_VOIDCALLVIRT:
2059 g_assert_not_reached ();
2066 callvirt_to_call_membase (int opcode)
2070 return OP_CALL_MEMBASE;
2071 case OP_VOIDCALLVIRT:
2072 return OP_VOIDCALL_MEMBASE;
2074 return OP_FCALL_MEMBASE;
2076 return OP_LCALL_MEMBASE;
2078 return OP_VCALL_MEMBASE;
2080 g_assert_not_reached ();
2086 #ifdef MONO_ARCH_HAVE_IMT
2088 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2090 #ifdef MONO_ARCH_IMT_REG
2091 int method_reg = alloc_preg (cfg);
2094 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2095 } else if (cfg->compile_aot) {
2096 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2099 MONO_INST_NEW (cfg, ins, OP_PCONST);
2100 ins->inst_p0 = call->method;
2101 ins->dreg = method_reg;
2102 MONO_ADD_INS (cfg->cbb, ins);
2105 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2107 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2112 static MonoJumpInfo *
2113 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2115 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2119 ji->data.target = target;
2124 inline static MonoCallInst *
2125 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2126 MonoInst **args, int calli, int virtual, int tail)
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2134 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2136 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2139 call->signature = sig;
2141 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2144 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2145 call->vret_var = cfg->vret_addr;
2146 //g_assert_not_reached ();
2148 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2149 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2152 temp->backend.is_pinvoke = sig->pinvoke;
2155 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2156 * address of return value to increase optimization opportunities.
2157 * Before vtype decomposition, the dreg of the call ins itself represents the
2158 * fact the call modifies the return value. After decomposition, the call will
2159 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2160 * will be transformed into an LDADDR.
2162 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2163 loada->dreg = alloc_preg (cfg);
2164 loada->inst_p0 = temp;
2165 /* We reference the call too since call->dreg could change during optimization */
2166 loada->inst_p1 = call;
2167 MONO_ADD_INS (cfg->cbb, loada);
2169 call->inst.dreg = temp->dreg;
2171 call->vret_var = loada;
2172 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2173 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2175 #ifdef MONO_ARCH_SOFT_FLOAT
2176 if (COMPILE_SOFT_FLOAT (cfg)) {
2178 * If the call has a float argument, we would need to do an r8->r4 conversion using
2179 * an icall, but that cannot be done during the call sequence since it would clobber
2180 * the call registers + the stack. So we do it before emitting the call.
2182 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2184 MonoInst *in = call->args [i];
2186 if (i >= sig->hasthis)
2187 t = sig->params [i - sig->hasthis];
2189 t = &mono_defaults.int_class->byval_arg;
2190 t = mono_type_get_underlying_type (t);
2192 if (!t->byref && t->type == MONO_TYPE_R4) {
2193 MonoInst *iargs [1];
2197 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2199 /* The result will be in an int vreg */
2200 call->args [i] = conv;
2207 if (COMPILE_LLVM (cfg))
2208 mono_llvm_emit_call (cfg, call);
2210 mono_arch_emit_call (cfg, call);
2212 mono_arch_emit_call (cfg, call);
2215 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2216 cfg->flags |= MONO_CFG_HAS_CALLS;
2221 inline static MonoInst*
2222 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2224 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2226 call->inst.sreg1 = addr->dreg;
2228 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2230 return (MonoInst*)call;
2233 inline static MonoInst*
2234 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2236 #ifdef MONO_ARCH_RGCTX_REG
2241 rgctx_reg = mono_alloc_preg (cfg);
2242 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2244 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2246 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2247 cfg->uses_rgctx_reg = TRUE;
2248 call->rgctx_reg = TRUE;
2250 return (MonoInst*)call;
2252 g_assert_not_reached ();
2258 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2260 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2263 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2264 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2266 gboolean might_be_remote;
2267 gboolean virtual = this != NULL;
2268 gboolean enable_for_aot = TRUE;
2272 if (method->string_ctor) {
2273 /* Create the real signature */
2274 /* FIXME: Cache these */
2275 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2276 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2281 might_be_remote = this && sig->hasthis &&
2282 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2283 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2285 context_used = mono_method_check_context_used (method);
2286 if (might_be_remote && context_used) {
2289 g_assert (cfg->generic_sharing_context);
2291 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2293 return mono_emit_calli (cfg, sig, args, addr);
2296 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2298 if (might_be_remote)
2299 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2301 call->method = method;
2302 call->inst.flags |= MONO_INST_HAS_METHOD;
2303 call->inst.inst_left = this;
2306 int vtable_reg, slot_reg, this_reg;
2308 this_reg = this->dreg;
2310 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2311 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2312 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2314 /* Make a call to delegate->invoke_impl */
2315 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2316 call->inst.inst_basereg = this_reg;
2317 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2318 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2320 return (MonoInst*)call;
2324 if ((!cfg->compile_aot || enable_for_aot) &&
2325 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2326 (MONO_METHOD_IS_FINAL (method) &&
2327 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2328 !(method->klass->marshalbyref && context_used)) {
2330 * the method is not virtual, we just need to ensure this is not null
2331 * and then we can call the method directly.
2333 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2335 * The check above ensures method is not gshared, this is needed since
2336 * gshared methods can't have wrappers.
2338 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2341 if (!method->string_ctor)
2342 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2344 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2346 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2348 return (MonoInst*)call;
2351 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2353 * the method is virtual, but we can statically dispatch since either
2354 * it's class or the method itself are sealed.
2355 * But first we need to ensure it's not a null reference.
2357 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2359 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2360 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2362 return (MonoInst*)call;
2365 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2367 vtable_reg = alloc_preg (cfg);
2368 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2369 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2371 #ifdef MONO_ARCH_HAVE_IMT
2373 guint32 imt_slot = mono_method_get_imt_slot (method);
2374 emit_imt_argument (cfg, call, imt_arg);
2375 slot_reg = vtable_reg;
2376 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2379 if (slot_reg == -1) {
2380 slot_reg = alloc_preg (cfg);
2381 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2382 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2385 slot_reg = vtable_reg;
2386 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2387 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2388 #ifdef MONO_ARCH_HAVE_IMT
2390 g_assert (mono_method_signature (method)->generic_param_count);
2391 emit_imt_argument (cfg, call, imt_arg);
2396 call->inst.sreg1 = slot_reg;
2397 call->virtual = TRUE;
2400 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2402 return (MonoInst*)call;
2406 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2407 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2409 #ifdef MONO_ARCH_RGCTX_REG
2416 #ifdef MONO_ARCH_RGCTX_REG
2417 rgctx_reg = mono_alloc_preg (cfg);
2418 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2423 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2425 call = (MonoCallInst*)ins;
2427 #ifdef MONO_ARCH_RGCTX_REG
2428 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2429 cfg->uses_rgctx_reg = TRUE;
2430 call->rgctx_reg = TRUE;
2440 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2442 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2446 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2453 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2456 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2458 return (MonoInst*)call;
2462 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2464 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2468 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2472 * mono_emit_abs_call:
2474 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2476 inline static MonoInst*
2477 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2478 MonoMethodSignature *sig, MonoInst **args)
2480 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2484 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2487 if (cfg->abs_patches == NULL)
2488 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2489 g_hash_table_insert (cfg->abs_patches, ji, ji);
2490 ins = mono_emit_native_call (cfg, ji, sig, args);
2491 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2496 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2498 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2499 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2503 * Native code might return non register sized integers
2504 * without initializing the upper bits.
2506 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2507 case OP_LOADI1_MEMBASE:
2508 widen_op = OP_ICONV_TO_I1;
2510 case OP_LOADU1_MEMBASE:
2511 widen_op = OP_ICONV_TO_U1;
2513 case OP_LOADI2_MEMBASE:
2514 widen_op = OP_ICONV_TO_I2;
2516 case OP_LOADU2_MEMBASE:
2517 widen_op = OP_ICONV_TO_U2;
2523 if (widen_op != -1) {
2524 int dreg = alloc_preg (cfg);
2527 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2528 widen->type = ins->type;
2538 get_memcpy_method (void)
2540 static MonoMethod *memcpy_method = NULL;
2541 if (!memcpy_method) {
2542 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2544 g_error ("Old corlib found. Install a new one");
2546 return memcpy_method;
2550 * Emit code to copy a valuetype of type @klass whose address is stored in
2551 * @src->dreg to memory whose address is stored at @dest->dreg.
2554 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2556 MonoInst *iargs [3];
2559 MonoMethod *memcpy_method;
2563 * This check breaks with spilled vars... need to handle it during verification anyway.
2564 * g_assert (klass && klass == src->klass && klass == dest->klass);
2568 n = mono_class_native_size (klass, &align);
2570 n = mono_class_value_size (klass, &align);
2572 #if HAVE_WRITE_BARRIERS
2573 /* if native is true there should be no references in the struct */
2574 if (klass->has_references && !native) {
2575 /* Avoid barriers when storing to the stack */
2576 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2577 (dest->opcode == OP_LDADDR))) {
2578 int context_used = 0;
2583 if (cfg->generic_sharing_context)
2584 context_used = mono_class_check_context_used (klass);
2586 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2588 if (cfg->compile_aot) {
2589 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2591 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2592 mono_class_compute_gc_descriptor (klass);
2596 /* FIXME: this does the memcpy as well (or
2597 should), so we don't need the memcpy
2599 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2604 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2605 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2606 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2610 EMIT_NEW_ICONST (cfg, iargs [2], n);
2612 memcpy_method = get_memcpy_method ();
2613 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2618 get_memset_method (void)
2620 static MonoMethod *memset_method = NULL;
2621 if (!memset_method) {
2622 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2624 g_error ("Old corlib found. Install a new one");
2626 return memset_method;
2630 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2632 MonoInst *iargs [3];
2635 MonoMethod *memset_method;
2637 /* FIXME: Optimize this for the case when dest is an LDADDR */
2639 mono_class_init (klass);
2640 n = mono_class_value_size (klass, &align);
2642 if (n <= sizeof (gpointer) * 5) {
2643 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2646 memset_method = get_memset_method ();
2648 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2649 EMIT_NEW_ICONST (cfg, iargs [2], n);
2650 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2655 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2657 MonoInst *this = NULL;
2659 g_assert (cfg->generic_sharing_context);
2661 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2662 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2663 !method->klass->valuetype)
2664 EMIT_NEW_ARGLOAD (cfg, this, 0);
2666 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2667 MonoInst *mrgctx_loc, *mrgctx_var;
2670 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2672 mrgctx_loc = mono_get_vtable_var (cfg);
2673 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2676 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2677 MonoInst *vtable_loc, *vtable_var;
2681 vtable_loc = mono_get_vtable_var (cfg);
2682 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2684 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2685 MonoInst *mrgctx_var = vtable_var;
2688 vtable_reg = alloc_preg (cfg);
2689 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2690 vtable_var->type = STACK_PTR;
2696 int vtable_reg, res_reg;
2698 vtable_reg = alloc_preg (cfg);
2699 res_reg = alloc_preg (cfg);
2700 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2705 static MonoJumpInfoRgctxEntry *
2706 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2708 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2709 res->method = method;
2710 res->in_mrgctx = in_mrgctx;
2711 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2712 res->data->type = patch_type;
2713 res->data->data.target = patch_data;
2714 res->info_type = info_type;
2719 static inline MonoInst*
2720 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2722 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2726 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2727 MonoClass *klass, int rgctx_type)
2729 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2730 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2732 return emit_rgctx_fetch (cfg, rgctx, entry);
2736 * emit_get_rgctx_method:
2738 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2739 * normal constants, else emit a load from the rgctx.
2742 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2743 MonoMethod *cmethod, int rgctx_type)
2745 if (!context_used) {
2748 switch (rgctx_type) {
2749 case MONO_RGCTX_INFO_METHOD:
2750 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2752 case MONO_RGCTX_INFO_METHOD_RGCTX:
2753 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2756 g_assert_not_reached ();
2759 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2760 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2762 return emit_rgctx_fetch (cfg, rgctx, entry);
2767 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2768 MonoClassField *field, int rgctx_type)
2770 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2771 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2773 return emit_rgctx_fetch (cfg, rgctx, entry);
2777 * On return the caller must check @klass for load errors.
2780 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2782 MonoInst *vtable_arg;
2784 int context_used = 0;
2786 if (cfg->generic_sharing_context)
2787 context_used = mono_class_check_context_used (klass);
2790 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2791 klass, MONO_RGCTX_INFO_VTABLE);
2793 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2797 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2800 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2801 #ifdef MONO_ARCH_VTABLE_REG
2802 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2803 cfg->uses_vtable_reg = TRUE;
2810 * On return the caller must check @array_class for load errors
2813 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2815 int vtable_reg = alloc_preg (cfg);
2816 int context_used = 0;
2818 if (cfg->generic_sharing_context)
2819 context_used = mono_class_check_context_used (array_class);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2823 if (cfg->opt & MONO_OPT_SHARED) {
2824 int class_reg = alloc_preg (cfg);
2825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2826 if (cfg->compile_aot) {
2827 int klass_reg = alloc_preg (cfg);
2828 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2829 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2833 } else if (context_used) {
2834 MonoInst *vtable_ins;
2836 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2837 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2839 if (cfg->compile_aot) {
2843 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2845 vt_reg = alloc_preg (cfg);
2846 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2850 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2856 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2860 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2862 if (mini_get_debug_options ()->better_cast_details) {
2863 int to_klass_reg = alloc_preg (cfg);
2864 int vtable_reg = alloc_preg (cfg);
2865 int klass_reg = alloc_preg (cfg);
2866 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2869 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2873 MONO_ADD_INS (cfg->cbb, tls_get);
2874 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2875 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2878 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2884 reset_cast_details (MonoCompile *cfg)
2886 /* Reset the variables holding the cast details */
2887 if (mini_get_debug_options ()->better_cast_details) {
2888 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2890 MONO_ADD_INS (cfg->cbb, tls_get);
2891 /* It is enough to reset the from field */
2892 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2897 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2898 * generic code is generated.
2901 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2903 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2906 MonoInst *rgctx, *addr;
2908 /* FIXME: What if the class is shared? We might not
2909 have to get the address of the method from the
2911 addr = emit_get_rgctx_method (cfg, context_used, method,
2912 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2914 rgctx = emit_get_rgctx (cfg, method, context_used);
2916 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2918 return mono_emit_method_call (cfg, method, &val, NULL);
2923 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2927 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2928 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2929 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2930 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2932 obj_reg = sp [0]->dreg;
2933 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2934 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2936 /* FIXME: generics */
2937 g_assert (klass->rank == 0);
2940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2941 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2947 MonoInst *element_class;
2949 /* This assertion is from the unboxcast insn */
2950 g_assert (klass->rank == 0);
2952 element_class = emit_get_rgctx_klass (cfg, context_used,
2953 klass->element_class, MONO_RGCTX_INFO_KLASS);
2955 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2956 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2958 save_cast_details (cfg, klass->element_class, obj_reg);
2959 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2960 reset_cast_details (cfg);
2963 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2964 MONO_ADD_INS (cfg->cbb, add);
2965 add->type = STACK_MP;
2972 * Returns NULL and set the cfg exception on error.
2975 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2977 MonoInst *iargs [2];
2980 if (cfg->opt & MONO_OPT_SHARED) {
2981 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2982 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2984 alloc_ftn = mono_object_new;
2985 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2986 /* This happens often in argument checking code, eg. throw new FooException... */
2987 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2988 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2989 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2991 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2992 MonoMethod *managed_alloc = NULL;
2996 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2997 cfg->exception_ptr = klass;
3001 #ifndef MONO_CROSS_COMPILE
3002 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3005 if (managed_alloc) {
3006 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3007 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3009 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3011 guint32 lw = vtable->klass->instance_size;
3012 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3013 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3014 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3017 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3021 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3025 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3028 MonoInst *iargs [2];
3029 MonoMethod *managed_alloc = NULL;
3033 FIXME: we cannot get managed_alloc here because we can't get
3034 the class's vtable (because it's not a closed class)
3036 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3037 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3040 if (cfg->opt & MONO_OPT_SHARED) {
3041 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3042 iargs [1] = data_inst;
3043 alloc_ftn = mono_object_new;
3045 if (managed_alloc) {
3046 iargs [0] = data_inst;
3047 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3050 iargs [0] = data_inst;
3051 alloc_ftn = mono_object_new_specific;
3054 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3058 * Returns NULL and set the cfg exception on error.
3061 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3063 MonoInst *alloc, *ins;
3065 if (mono_class_is_nullable (klass)) {
3066 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3067 return mono_emit_method_call (cfg, method, &val, NULL);
3070 alloc = handle_alloc (cfg, klass, TRUE);
3074 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3080 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3082 MonoInst *alloc, *ins;
3084 if (mono_class_is_nullable (klass)) {
3085 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3086 /* FIXME: What if the class is shared? We might not
3087 have to get the method address from the RGCTX. */
3088 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3089 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3090 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3092 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3094 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3096 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3102 // FIXME: This doesn't work yet (class libs tests fail?)
3103 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3106 * Returns NULL and set the cfg exception on error.
3109 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3111 MonoBasicBlock *is_null_bb;
3112 int obj_reg = src->dreg;
3113 int vtable_reg = alloc_preg (cfg);
3114 MonoInst *klass_inst = NULL;
3119 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3120 klass, MONO_RGCTX_INFO_KLASS);
3122 if (is_complex_isinst (klass)) {
3123 /* Complex case, handle by an icall */
3129 args [1] = klass_inst;
3131 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3133 /* Simple case, handled by the code below */
3137 NEW_BBLOCK (cfg, is_null_bb);
3139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3142 save_cast_details (cfg, klass, obj_reg);
3144 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3146 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3148 int klass_reg = alloc_preg (cfg);
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3153 /* the remoting code is broken, access the class for now */
3154 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3155 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3157 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3158 cfg->exception_ptr = klass;
3161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3166 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3169 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3173 MONO_START_BB (cfg, is_null_bb);
3175 reset_cast_details (cfg);
3181 * Returns NULL and set the cfg exception on error.
3184 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3187 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3188 int obj_reg = src->dreg;
3189 int vtable_reg = alloc_preg (cfg);
3190 int res_reg = alloc_preg (cfg);
3191 MonoInst *klass_inst = NULL;
3194 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3196 if (is_complex_isinst (klass)) {
3199 /* Complex case, handle by an icall */
3205 args [1] = klass_inst;
3207 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3209 /* Simple case, the code below can handle it */
3213 NEW_BBLOCK (cfg, is_null_bb);
3214 NEW_BBLOCK (cfg, false_bb);
3215 NEW_BBLOCK (cfg, end_bb);
3217 /* Do the assignment at the beginning, so the other assignment can be if converted */
3218 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3219 ins->type = STACK_OBJ;
3222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3228 g_assert (!context_used);
3229 /* the is_null_bb target simply copies the input register to the output */
3230 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3232 int klass_reg = alloc_preg (cfg);
3235 int rank_reg = alloc_preg (cfg);
3236 int eclass_reg = alloc_preg (cfg);
3238 g_assert (!context_used);
3239 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3241 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3244 if (klass->cast_class == mono_defaults.object_class) {
3245 int parent_reg = alloc_preg (cfg);
3246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3247 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3248 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3249 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3250 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3251 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3252 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3254 } else if (klass->cast_class == mono_defaults.enum_class) {
3255 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3257 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3258 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3260 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3261 /* Check that the object is a vector too */
3262 int bounds_reg = alloc_preg (cfg);
3263 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3268 /* the is_null_bb target simply copies the input register to the output */
3269 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3271 } else if (mono_class_is_nullable (klass)) {
3272 g_assert (!context_used);
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3274 /* the is_null_bb target simply copies the input register to the output */
3275 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3277 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3278 g_assert (!context_used);
3279 /* the remoting code is broken, access the class for now */
3280 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3281 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3283 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3284 cfg->exception_ptr = klass;
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3292 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3293 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3296 /* the is_null_bb target simply copies the input register to the output */
3297 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3302 MONO_START_BB (cfg, false_bb);
3304 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3305 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3307 MONO_START_BB (cfg, is_null_bb);
3309 MONO_START_BB (cfg, end_bb);
3315 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3317 /* This opcode takes as input an object reference and a class, and returns:
3318 0) if the object is an instance of the class,
3319 1) if the object is not instance of the class,
3320 2) if the object is a proxy whose type cannot be determined */
3323 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3324 int obj_reg = src->dreg;
3325 int dreg = alloc_ireg (cfg);
3327 int klass_reg = alloc_preg (cfg);
3329 NEW_BBLOCK (cfg, true_bb);
3330 NEW_BBLOCK (cfg, false_bb);
3331 NEW_BBLOCK (cfg, false2_bb);
3332 NEW_BBLOCK (cfg, end_bb);
3333 NEW_BBLOCK (cfg, no_proxy_bb);
3335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3338 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3339 NEW_BBLOCK (cfg, interface_fail_bb);
3341 tmp_reg = alloc_preg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3343 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3344 MONO_START_BB (cfg, interface_fail_bb);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3347 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3349 tmp_reg = alloc_preg (cfg);
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3352 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3354 tmp_reg = alloc_preg (cfg);
3355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3358 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3359 tmp_reg = alloc_preg (cfg);
3360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3363 tmp_reg = alloc_preg (cfg);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3368 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3371 MONO_START_BB (cfg, no_proxy_bb);
3373 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3376 MONO_START_BB (cfg, false_bb);
3378 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3381 MONO_START_BB (cfg, false2_bb);
3383 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3386 MONO_START_BB (cfg, true_bb);
3388 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3390 MONO_START_BB (cfg, end_bb);
3393 MONO_INST_NEW (cfg, ins, OP_ICONST);
3395 ins->type = STACK_I4;
3401 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3403 /* This opcode takes as input an object reference and a class, and returns:
3404 0) if the object is an instance of the class,
3405 1) if the object is a proxy whose type cannot be determined
3406 an InvalidCastException exception is thrown otherwhise*/
3409 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3410 int obj_reg = src->dreg;
3411 int dreg = alloc_ireg (cfg);
3412 int tmp_reg = alloc_preg (cfg);
3413 int klass_reg = alloc_preg (cfg);
3415 NEW_BBLOCK (cfg, end_bb);
3416 NEW_BBLOCK (cfg, ok_result_bb);
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3421 save_cast_details (cfg, klass, obj_reg);
3423 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3424 NEW_BBLOCK (cfg, interface_fail_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3427 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3428 MONO_START_BB (cfg, interface_fail_bb);
3429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3431 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3433 tmp_reg = alloc_preg (cfg);
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3436 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3438 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3439 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3442 NEW_BBLOCK (cfg, no_proxy_bb);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3446 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3448 tmp_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3452 tmp_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3457 NEW_BBLOCK (cfg, fail_1_bb);
3459 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3461 MONO_START_BB (cfg, fail_1_bb);
3463 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3466 MONO_START_BB (cfg, no_proxy_bb);
3468 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3471 MONO_START_BB (cfg, ok_result_bb);
3473 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3475 MONO_START_BB (cfg, end_bb);
3478 MONO_INST_NEW (cfg, ins, OP_ICONST);
3480 ins->type = STACK_I4;
3486 * Returns NULL and set the cfg exception on error.
3488 static G_GNUC_UNUSED MonoInst*
3489 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3491 gpointer *trampoline;
3492 MonoInst *obj, *method_ins, *tramp_ins;
3496 obj = handle_alloc (cfg, klass, FALSE);
3500 /* Inline the contents of mono_delegate_ctor */
3502 /* Set target field */
3503 /* Optimize away setting of NULL target */
3504 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3507 /* Set method field */
3508 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3512 * To avoid looking up the compiled code belonging to the target method
3513 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3514 * store it, and we fill it after the method has been compiled.
3516 if (!cfg->compile_aot && !method->dynamic) {
3517 MonoInst *code_slot_ins;
3520 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3522 domain = mono_domain_get ();
3523 mono_domain_lock (domain);
3524 if (!domain_jit_info (domain)->method_code_hash)
3525 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3526 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3528 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3529 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3531 mono_domain_unlock (domain);
3533 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3538 /* Set invoke_impl field */
3539 if (cfg->compile_aot) {
3540 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3542 trampoline = mono_create_delegate_trampoline (klass);
3543 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3545 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3547 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3553 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3555 MonoJitICallInfo *info;
3557 /* Need to register the icall so it gets an icall wrapper */
3558 info = mono_get_array_new_va_icall (rank);
3560 cfg->flags |= MONO_CFG_HAS_VARARGS;
3562 /* mono_array_new_va () needs a vararg calling convention */
3563 cfg->disable_llvm = TRUE;
3565 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3566 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3570 mono_emit_load_got_addr (MonoCompile *cfg)
3572 MonoInst *getaddr, *dummy_use;
3574 if (!cfg->got_var || cfg->got_var_allocated)
3577 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3578 getaddr->dreg = cfg->got_var->dreg;
3580 /* Add it to the start of the first bblock */
3581 if (cfg->bb_entry->code) {
3582 getaddr->next = cfg->bb_entry->code;
3583 cfg->bb_entry->code = getaddr;
3586 MONO_ADD_INS (cfg->bb_entry, getaddr);
3588 cfg->got_var_allocated = TRUE;
3591 * Add a dummy use to keep the got_var alive, since real uses might
3592 * only be generated by the back ends.
3593 * Add it to end_bblock, so the variable's lifetime covers the whole
3595 * It would be better to make the usage of the got var explicit in all
3596 * cases when the backend needs it (i.e. calls, throw etc.), so this
3597 * wouldn't be needed.
3599 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3600 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3603 static int inline_limit;
3604 static gboolean inline_limit_inited;
3607 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3609 MonoMethodHeaderSummary header;
3611 #ifdef MONO_ARCH_SOFT_FLOAT
3612 MonoMethodSignature *sig = mono_method_signature (method);
3616 if (cfg->generic_sharing_context)
3619 if (cfg->inline_depth > 10)
3622 #ifdef MONO_ARCH_HAVE_LMF_OPS
3623 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3624 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3625 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3630 if (!mono_method_get_header_summary (method, &header))
3633 /*runtime, icall and pinvoke are checked by summary call*/
3634 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3635 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3636 (method->klass->marshalbyref) ||
3640 /* also consider num_locals? */
3641 /* Do the size check early to avoid creating vtables */
3642 if (!inline_limit_inited) {
3643 if (getenv ("MONO_INLINELIMIT"))
3644 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3646 inline_limit = INLINE_LENGTH_LIMIT;
3647 inline_limit_inited = TRUE;
3649 if (header.code_size >= inline_limit)
3653 * if we can initialize the class of the method right away, we do,
3654 * otherwise we don't allow inlining if the class needs initialization,
3655 * since it would mean inserting a call to mono_runtime_class_init()
3656 * inside the inlined code
3658 if (!(cfg->opt & MONO_OPT_SHARED)) {
3659 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3660 if (cfg->run_cctors && method->klass->has_cctor) {
3661 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3662 if (!method->klass->runtime_info)
3663 /* No vtable created yet */
3665 vtable = mono_class_vtable (cfg->domain, method->klass);
3668 /* This makes so that inline cannot trigger */
3669 /* .cctors: too many apps depend on them */
3670 /* running with a specific order... */
3671 if (! vtable->initialized)
3673 mono_runtime_class_init (vtable);
3675 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3676 if (!method->klass->runtime_info)
3677 /* No vtable created yet */
3679 vtable = mono_class_vtable (cfg->domain, method->klass);
3682 if (!vtable->initialized)
3687 * If we're compiling for shared code
3688 * the cctor will need to be run at aot method load time, for example,
3689 * or at the end of the compilation of the inlining method.
3691 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3696 * CAS - do not inline methods with declarative security
3697 * Note: this has to be before any possible return TRUE;
3699 if (mono_method_has_declsec (method))
3702 #ifdef MONO_ARCH_SOFT_FLOAT
3704 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3706 for (i = 0; i < sig->param_count; ++i)
3707 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3715 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3717 if (vtable->initialized && !cfg->compile_aot)
3720 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3723 if (!mono_class_needs_cctor_run (vtable->klass, method))
3726 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3727 /* The initialization is already done before the method is called */
3734 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3738 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3740 mono_class_init (klass);
3741 size = mono_class_array_element_size (klass);
3743 mult_reg = alloc_preg (cfg);
3744 array_reg = arr->dreg;
3745 index_reg = index->dreg;
3747 #if SIZEOF_REGISTER == 8
3748 /* The array reg is 64 bits but the index reg is only 32 */
3749 if (COMPILE_LLVM (cfg)) {
3751 index2_reg = index_reg;
3753 index2_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3757 if (index->type == STACK_I8) {
3758 index2_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3761 index2_reg = index_reg;
3765 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3767 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3768 if (size == 1 || size == 2 || size == 4 || size == 8) {
3769 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3771 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3772 ins->type = STACK_PTR;
3778 add_reg = alloc_preg (cfg);
3780 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3781 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3782 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3783 ins->type = STACK_PTR;
3784 MONO_ADD_INS (cfg->cbb, ins);
3789 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3791 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3793 int bounds_reg = alloc_preg (cfg);
3794 int add_reg = alloc_preg (cfg);
3795 int mult_reg = alloc_preg (cfg);
3796 int mult2_reg = alloc_preg (cfg);
3797 int low1_reg = alloc_preg (cfg);
3798 int low2_reg = alloc_preg (cfg);
3799 int high1_reg = alloc_preg (cfg);
3800 int high2_reg = alloc_preg (cfg);
3801 int realidx1_reg = alloc_preg (cfg);
3802 int realidx2_reg = alloc_preg (cfg);
3803 int sum_reg = alloc_preg (cfg);
3808 mono_class_init (klass);
3809 size = mono_class_array_element_size (klass);
3811 index1 = index_ins1->dreg;
3812 index2 = index_ins2->dreg;
3814 /* range checking */
3815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3816 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3819 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3820 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3822 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3823 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3824 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3826 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3827 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3828 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3830 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3831 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3832 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3834 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3837 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3838 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3840 ins->type = STACK_MP;
3842 MONO_ADD_INS (cfg->cbb, ins);
3849 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3853 MonoMethod *addr_method;
3856 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3859 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3861 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3862 /* emit_ldelema_2 depends on OP_LMUL */
3863 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3864 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3868 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3869 addr_method = mono_marshal_get_array_address (rank, element_size);
3870 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3875 static MonoBreakPolicy
3876 always_insert_breakpoint (MonoMethod *method)
3878 return MONO_BREAK_POLICY_ALWAYS;
3881 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3884 * mono_set_break_policy:
3885 * policy_callback: the new callback function
3887 * Allow embedders to decide wherther to actually obey breakpoint instructions
3888 * (both break IL instructions and Debugger.Break () method calls), for example
3889 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3890 * untrusted or semi-trusted code.
3892 * @policy_callback will be called every time a break point instruction needs to
3893 * be inserted with the method argument being the method that calls Debugger.Break()
3894 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3895 * if it wants the breakpoint to not be effective in the given method.
3896 * #MONO_BREAK_POLICY_ALWAYS is the default.
3899 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3901 if (policy_callback)
3902 break_policy_func = policy_callback;
3904 break_policy_func = always_insert_breakpoint;
3908 should_insert_brekpoint (MonoMethod *method) {
3909 switch (break_policy_func (method)) {
3910 case MONO_BREAK_POLICY_ALWAYS:
3912 case MONO_BREAK_POLICY_NEVER:
3914 case MONO_BREAK_POLICY_ON_DBG:
3915 return mono_debug_using_mono_debugger ();
3917 g_warning ("Incorrect value returned from break policy callback");
3923 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3925 MonoInst *ins = NULL;
3927 static MonoClass *runtime_helpers_class = NULL;
3928 if (! runtime_helpers_class)
3929 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3930 "System.Runtime.CompilerServices", "RuntimeHelpers");
3932 if (cmethod->klass == mono_defaults.string_class) {
3933 if (strcmp (cmethod->name, "get_Chars") == 0) {
3934 int dreg = alloc_ireg (cfg);
3935 int index_reg = alloc_preg (cfg);
3936 int mult_reg = alloc_preg (cfg);
3937 int add_reg = alloc_preg (cfg);
3939 #if SIZEOF_REGISTER == 8
3940 /* The array reg is 64 bits but the index reg is only 32 */
3941 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3943 index_reg = args [1]->dreg;
3945 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3947 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3948 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3949 add_reg = ins->dreg;
3950 /* Avoid a warning */
3952 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3956 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3957 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3958 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3960 type_from_op (ins, NULL, NULL);
3962 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3963 int dreg = alloc_ireg (cfg);
3964 /* Decompose later to allow more optimizations */
3965 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3966 ins->type = STACK_I4;
3967 cfg->cbb->has_array_access = TRUE;
3968 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3971 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3972 int mult_reg = alloc_preg (cfg);
3973 int add_reg = alloc_preg (cfg);
3975 /* The corlib functions check for oob already. */
3976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3977 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3978 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3979 return cfg->cbb->last_ins;
3982 } else if (cmethod->klass == mono_defaults.object_class) {
3984 if (strcmp (cmethod->name, "GetType") == 0) {
3985 int dreg = alloc_preg (cfg);
3986 int vt_reg = alloc_preg (cfg);
3987 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3988 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3989 type_from_op (ins, NULL, NULL);
3992 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3993 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3994 int dreg = alloc_ireg (cfg);
3995 int t1 = alloc_ireg (cfg);
3997 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3998 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3999 ins->type = STACK_I4;
4003 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4004 MONO_INST_NEW (cfg, ins, OP_NOP);
4005 MONO_ADD_INS (cfg->cbb, ins);
4009 } else if (cmethod->klass == mono_defaults.array_class) {
4010 if (cmethod->name [0] != 'g')
4013 if (strcmp (cmethod->name, "get_Rank") == 0) {
4014 int dreg = alloc_ireg (cfg);
4015 int vtable_reg = alloc_preg (cfg);
4016 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4017 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4018 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4019 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4020 type_from_op (ins, NULL, NULL);
4023 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4024 int dreg = alloc_ireg (cfg);
4026 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4027 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4028 type_from_op (ins, NULL, NULL);
4033 } else if (cmethod->klass == runtime_helpers_class) {
4035 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4036 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4040 } else if (cmethod->klass == mono_defaults.thread_class) {
4041 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4042 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4043 MONO_ADD_INS (cfg->cbb, ins);
4045 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4046 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4047 MONO_ADD_INS (cfg->cbb, ins);
4050 } else if (cmethod->klass == mono_defaults.monitor_class) {
4051 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4052 if (strcmp (cmethod->name, "Enter") == 0) {
4055 if (COMPILE_LLVM (cfg)) {
4057 * Pass the argument normally, the LLVM backend will handle the
4058 * calling convention problems.
4060 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4062 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4063 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4064 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4065 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4068 return (MonoInst*)call;
4069 } else if (strcmp (cmethod->name, "Exit") == 0) {
4072 if (COMPILE_LLVM (cfg)) {
4073 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4075 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4076 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4077 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4078 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4081 return (MonoInst*)call;
4083 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4084 MonoMethod *fast_method = NULL;
4086 /* Avoid infinite recursion */
4087 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4088 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4089 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4092 if (strcmp (cmethod->name, "Enter") == 0 ||
4093 strcmp (cmethod->name, "Exit") == 0)
4094 fast_method = mono_monitor_get_fast_path (cmethod);
4098 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4100 } else if (mini_class_is_system_array (cmethod->klass) &&
4101 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4102 MonoInst *addr, *store, *load;
4103 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4105 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4106 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4107 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4109 } else if (cmethod->klass->image == mono_defaults.corlib &&
4110 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4111 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4114 #if SIZEOF_REGISTER == 8
4115 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4116 /* 64 bit reads are already atomic */
4117 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4118 ins->dreg = mono_alloc_preg (cfg);
4119 ins->inst_basereg = args [0]->dreg;
4120 ins->inst_offset = 0;
4121 MONO_ADD_INS (cfg->cbb, ins);
4125 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4126 if (strcmp (cmethod->name, "Increment") == 0) {
4127 MonoInst *ins_iconst;
4130 if (fsig->params [0]->type == MONO_TYPE_I4)
4131 opcode = OP_ATOMIC_ADD_NEW_I4;
4132 #if SIZEOF_REGISTER == 8
4133 else if (fsig->params [0]->type == MONO_TYPE_I8)
4134 opcode = OP_ATOMIC_ADD_NEW_I8;
4137 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4138 ins_iconst->inst_c0 = 1;
4139 ins_iconst->dreg = mono_alloc_ireg (cfg);
4140 MONO_ADD_INS (cfg->cbb, ins_iconst);
4142 MONO_INST_NEW (cfg, ins, opcode);
4143 ins->dreg = mono_alloc_ireg (cfg);
4144 ins->inst_basereg = args [0]->dreg;
4145 ins->inst_offset = 0;
4146 ins->sreg2 = ins_iconst->dreg;
4147 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4148 MONO_ADD_INS (cfg->cbb, ins);
4150 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4151 MonoInst *ins_iconst;
4154 if (fsig->params [0]->type == MONO_TYPE_I4)
4155 opcode = OP_ATOMIC_ADD_NEW_I4;
4156 #if SIZEOF_REGISTER == 8
4157 else if (fsig->params [0]->type == MONO_TYPE_I8)
4158 opcode = OP_ATOMIC_ADD_NEW_I8;
4161 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4162 ins_iconst->inst_c0 = -1;
4163 ins_iconst->dreg = mono_alloc_ireg (cfg);
4164 MONO_ADD_INS (cfg->cbb, ins_iconst);
4166 MONO_INST_NEW (cfg, ins, opcode);
4167 ins->dreg = mono_alloc_ireg (cfg);
4168 ins->inst_basereg = args [0]->dreg;
4169 ins->inst_offset = 0;
4170 ins->sreg2 = ins_iconst->dreg;
4171 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4172 MONO_ADD_INS (cfg->cbb, ins);
4174 } else if (strcmp (cmethod->name, "Add") == 0) {
4177 if (fsig->params [0]->type == MONO_TYPE_I4)
4178 opcode = OP_ATOMIC_ADD_NEW_I4;
4179 #if SIZEOF_REGISTER == 8
4180 else if (fsig->params [0]->type == MONO_TYPE_I8)
4181 opcode = OP_ATOMIC_ADD_NEW_I8;
4185 MONO_INST_NEW (cfg, ins, opcode);
4186 ins->dreg = mono_alloc_ireg (cfg);
4187 ins->inst_basereg = args [0]->dreg;
4188 ins->inst_offset = 0;
4189 ins->sreg2 = args [1]->dreg;
4190 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4191 MONO_ADD_INS (cfg->cbb, ins);
4194 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4196 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4197 if (strcmp (cmethod->name, "Exchange") == 0) {
4199 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4201 if (fsig->params [0]->type == MONO_TYPE_I4)
4202 opcode = OP_ATOMIC_EXCHANGE_I4;
4203 #if SIZEOF_REGISTER == 8
4204 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4205 (fsig->params [0]->type == MONO_TYPE_I))
4206 opcode = OP_ATOMIC_EXCHANGE_I8;
4208 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4209 opcode = OP_ATOMIC_EXCHANGE_I4;
4214 MONO_INST_NEW (cfg, ins, opcode);
4215 ins->dreg = mono_alloc_ireg (cfg);
4216 ins->inst_basereg = args [0]->dreg;
4217 ins->inst_offset = 0;
4218 ins->sreg2 = args [1]->dreg;
4219 MONO_ADD_INS (cfg->cbb, ins);
4221 switch (fsig->params [0]->type) {
4223 ins->type = STACK_I4;
4227 ins->type = STACK_I8;
4229 case MONO_TYPE_OBJECT:
4230 ins->type = STACK_OBJ;
4233 g_assert_not_reached ();
4236 #if HAVE_WRITE_BARRIERS
4238 MonoInst *dummy_use;
4239 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4240 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4241 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4245 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4247 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4248 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4250 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4251 if (fsig->params [1]->type == MONO_TYPE_I4)
4253 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4254 size = sizeof (gpointer);
4255 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4258 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4259 ins->dreg = alloc_ireg (cfg);
4260 ins->sreg1 = args [0]->dreg;
4261 ins->sreg2 = args [1]->dreg;
4262 ins->sreg3 = args [2]->dreg;
4263 ins->type = STACK_I4;
4264 MONO_ADD_INS (cfg->cbb, ins);
4265 } else if (size == 8) {
4266 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4267 ins->dreg = alloc_ireg (cfg);
4268 ins->sreg1 = args [0]->dreg;
4269 ins->sreg2 = args [1]->dreg;
4270 ins->sreg3 = args [2]->dreg;
4271 ins->type = STACK_I8;
4272 MONO_ADD_INS (cfg->cbb, ins);
4274 /* g_assert_not_reached (); */
4276 #if HAVE_WRITE_BARRIERS
4278 MonoInst *dummy_use;
4279 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4280 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4281 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4285 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4289 } else if (cmethod->klass->image == mono_defaults.corlib) {
4290 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4291 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4292 if (should_insert_brekpoint (cfg->method))
4293 MONO_INST_NEW (cfg, ins, OP_BREAK);
4295 MONO_INST_NEW (cfg, ins, OP_NOP);
4296 MONO_ADD_INS (cfg->cbb, ins);
4299 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4300 && strcmp (cmethod->klass->name, "Environment") == 0) {
4302 EMIT_NEW_ICONST (cfg, ins, 1);
4304 EMIT_NEW_ICONST (cfg, ins, 0);
4308 } else if (cmethod->klass == mono_defaults.math_class) {
4310 * There is general branches code for Min/Max, but it does not work for
4312 * http://everything2.com/?node_id=1051618
4316 #ifdef MONO_ARCH_SIMD_INTRINSICS
4317 if (cfg->opt & MONO_OPT_SIMD) {
4318 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4324 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4328 * This entry point could be used later for arbitrary method
4331 inline static MonoInst*
4332 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4333 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4335 if (method->klass == mono_defaults.string_class) {
4336 /* managed string allocation support */
4337 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4338 MonoInst *iargs [2];
4339 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4340 MonoMethod *managed_alloc = NULL;
4342 g_assert (vtable); /*Should not fail since it System.String*/
4343 #ifndef MONO_CROSS_COMPILE
4344 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4348 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4349 iargs [1] = args [0];
4350 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4357 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4359 MonoInst *store, *temp;
4362 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4363 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4366 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4367 * would be different than the MonoInst's used to represent arguments, and
4368 * the ldelema implementation can't deal with that.
4369 * Solution: When ldelema is used on an inline argument, create a var for
4370 * it, emit ldelema on that var, and emit the saving code below in
4371 * inline_method () if needed.
4373 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4374 cfg->args [i] = temp;
4375 /* This uses cfg->args [i] which is set by the preceeding line */
4376 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4377 store->cil_code = sp [0]->cil_code;
4382 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4383 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4385 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4387 check_inline_called_method_name_limit (MonoMethod *called_method)
4390 static char *limit = NULL;
4392 if (limit == NULL) {
4393 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4395 if (limit_string != NULL)
4396 limit = limit_string;
4398 limit = (char *) "";
4401 if (limit [0] != '\0') {
4402 char *called_method_name = mono_method_full_name (called_method, TRUE);
4404 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4405 g_free (called_method_name);
4407 //return (strncmp_result <= 0);
4408 return (strncmp_result == 0);
4415 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4417 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4420 static char *limit = NULL;
4422 if (limit == NULL) {
4423 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4424 if (limit_string != NULL) {
4425 limit = limit_string;
4427 limit = (char *) "";
4431 if (limit [0] != '\0') {
4432 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4434 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4435 g_free (caller_method_name);
4437 //return (strncmp_result <= 0);
4438 return (strncmp_result == 0);
4446 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4447 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4449 MonoInst *ins, *rvar = NULL;
4450 MonoMethodHeader *cheader;
4451 MonoBasicBlock *ebblock, *sbblock;
4453 MonoMethod *prev_inlined_method;
4454 MonoInst **prev_locals, **prev_args;
4455 MonoType **prev_arg_types;
4456 guint prev_real_offset;
4457 GHashTable *prev_cbb_hash;
4458 MonoBasicBlock **prev_cil_offset_to_bb;
4459 MonoBasicBlock *prev_cbb;
4460 unsigned char* prev_cil_start;
4461 guint32 prev_cil_offset_to_bb_len;
4462 MonoMethod *prev_current_method;
4463 MonoGenericContext *prev_generic_context;
4464 gboolean ret_var_set, prev_ret_var_set;
4466 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4468 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4469 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4472 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4473 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4477 if (cfg->verbose_level > 2)
4478 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4480 if (!cmethod->inline_info) {
4481 mono_jit_stats.inlineable_methods++;
4482 cmethod->inline_info = 1;
4485 /* allocate local variables */
4486 cheader = mono_method_get_header (cmethod);
4488 if (cheader == NULL || mono_loader_get_last_error ()) {
4490 mono_metadata_free_mh (cheader);
4491 mono_loader_clear_error ();
4495 /* allocate space to store the return value */
4496 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4497 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4501 prev_locals = cfg->locals;
4502 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4503 for (i = 0; i < cheader->num_locals; ++i)
4504 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4506 /* allocate start and end blocks */
4507 /* This is needed so if the inline is aborted, we can clean up */
4508 NEW_BBLOCK (cfg, sbblock);
4509 sbblock->real_offset = real_offset;
4511 NEW_BBLOCK (cfg, ebblock);
4512 ebblock->block_num = cfg->num_bblocks++;
4513 ebblock->real_offset = real_offset;
4515 prev_args = cfg->args;
4516 prev_arg_types = cfg->arg_types;
4517 prev_inlined_method = cfg->inlined_method;
4518 cfg->inlined_method = cmethod;
4519 cfg->ret_var_set = FALSE;
4520 cfg->inline_depth ++;
4521 prev_real_offset = cfg->real_offset;
4522 prev_cbb_hash = cfg->cbb_hash;
4523 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4524 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4525 prev_cil_start = cfg->cil_start;
4526 prev_cbb = cfg->cbb;
4527 prev_current_method = cfg->current_method;
4528 prev_generic_context = cfg->generic_context;
4529 prev_ret_var_set = cfg->ret_var_set;
4531 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4533 ret_var_set = cfg->ret_var_set;
4535 cfg->inlined_method = prev_inlined_method;
4536 cfg->real_offset = prev_real_offset;
4537 cfg->cbb_hash = prev_cbb_hash;
4538 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4539 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4540 cfg->cil_start = prev_cil_start;
4541 cfg->locals = prev_locals;
4542 cfg->args = prev_args;
4543 cfg->arg_types = prev_arg_types;
4544 cfg->current_method = prev_current_method;
4545 cfg->generic_context = prev_generic_context;
4546 cfg->ret_var_set = prev_ret_var_set;
4547 cfg->inline_depth --;
4549 if ((costs >= 0 && costs < 60) || inline_allways) {
4550 if (cfg->verbose_level > 2)
4551 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4553 mono_jit_stats.inlined_methods++;
4555 /* always add some code to avoid block split failures */
4556 MONO_INST_NEW (cfg, ins, OP_NOP);
4557 MONO_ADD_INS (prev_cbb, ins);
4559 prev_cbb->next_bb = sbblock;
4560 link_bblock (cfg, prev_cbb, sbblock);
4563 * Get rid of the begin and end bblocks if possible to aid local
4566 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4568 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4569 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4571 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4572 MonoBasicBlock *prev = ebblock->in_bb [0];
4573 mono_merge_basic_blocks (cfg, prev, ebblock);
4575 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4576 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4577 cfg->cbb = prev_cbb;
4585 * If the inlined method contains only a throw, then the ret var is not
4586 * set, so set it to a dummy value.
4589 static double r8_0 = 0.0;
4591 switch (rvar->type) {
4593 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4596 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4601 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4604 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4605 ins->type = STACK_R8;
4606 ins->inst_p0 = (void*)&r8_0;
4607 ins->dreg = rvar->dreg;
4608 MONO_ADD_INS (cfg->cbb, ins);
4611 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4614 g_assert_not_reached ();
4618 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4621 mono_metadata_free_mh (cheader);
4624 if (cfg->verbose_level > 2)
4625 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4626 cfg->exception_type = MONO_EXCEPTION_NONE;
4627 mono_loader_clear_error ();
4629 /* This gets rid of the newly added bblocks */
4630 cfg->cbb = prev_cbb;
4632 mono_metadata_free_mh (cheader);
4637 * Some of these comments may well be out-of-date.
4638 * Design decisions: we do a single pass over the IL code (and we do bblock
4639 * splitting/merging in the few cases when it's required: a back jump to an IL
4640 * address that was not already seen as bblock starting point).
4641 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4642 * Complex operations are decomposed in simpler ones right away. We need to let the
4643 * arch-specific code peek and poke inside this process somehow (except when the
4644 * optimizations can take advantage of the full semantic info of coarse opcodes).
4645 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4646 * MonoInst->opcode initially is the IL opcode or some simplification of that
4647 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4648 * opcode with value bigger than OP_LAST.
4649 * At this point the IR can be handed over to an interpreter, a dumb code generator
4650 * or to the optimizing code generator that will translate it to SSA form.
4652 * Profiling directed optimizations.
4653 * We may compile by default with few or no optimizations and instrument the code
4654 * or the user may indicate what methods to optimize the most either in a config file
4655 * or through repeated runs where the compiler applies offline the optimizations to
4656 * each method and then decides if it was worth it.
4659 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4660 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4661 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4662 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4663 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4664 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4665 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4666 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4668 /* offset from br.s -> br like opcodes */
4669 #define BIG_BRANCH_OFFSET 13
4672 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4674 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4676 return b == NULL || b == bb;
4680 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4682 unsigned char *ip = start;
4683 unsigned char *target;
4686 MonoBasicBlock *bblock;
4687 const MonoOpcode *opcode;
4690 cli_addr = ip - start;
4691 i = mono_opcode_value ((const guint8 **)&ip, end);
4694 opcode = &mono_opcodes [i];
4695 switch (opcode->argument) {
4696 case MonoInlineNone:
4699 case MonoInlineString:
4700 case MonoInlineType:
4701 case MonoInlineField:
4702 case MonoInlineMethod:
4705 case MonoShortInlineR:
4712 case MonoShortInlineVar:
4713 case MonoShortInlineI:
4716 case MonoShortInlineBrTarget:
4717 target = start + cli_addr + 2 + (signed char)ip [1];
4718 GET_BBLOCK (cfg, bblock, target);
4721 GET_BBLOCK (cfg, bblock, ip);
4723 case MonoInlineBrTarget:
4724 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4725 GET_BBLOCK (cfg, bblock, target);
4728 GET_BBLOCK (cfg, bblock, ip);
4730 case MonoInlineSwitch: {
4731 guint32 n = read32 (ip + 1);
4734 cli_addr += 5 + 4 * n;
4735 target = start + cli_addr;
4736 GET_BBLOCK (cfg, bblock, target);
4738 for (j = 0; j < n; ++j) {
4739 target = start + cli_addr + (gint32)read32 (ip);
4740 GET_BBLOCK (cfg, bblock, target);
4750 g_assert_not_reached ();
4753 if (i == CEE_THROW) {
4754 unsigned char *bb_start = ip - 1;
4756 /* Find the start of the bblock containing the throw */
4758 while ((bb_start >= start) && !bblock) {
4759 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4763 bblock->out_of_line = 1;
4772 static inline MonoMethod *
4773 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4777 if (m->wrapper_type != MONO_WRAPPER_NONE)
4778 return mono_method_get_wrapper_data (m, token);
4780 method = mono_get_method_full (m->klass->image, token, klass, context);
4785 static inline MonoMethod *
4786 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4788 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4790 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4796 static inline MonoClass*
4797 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4801 if (method->wrapper_type != MONO_WRAPPER_NONE)
4802 klass = mono_method_get_wrapper_data (method, token);
4804 klass = mono_class_get_full (method->klass->image, token, context);
4806 mono_class_init (klass);
4811 * Returns TRUE if the JIT should abort inlining because "callee"
4812 * is influenced by security attributes.
4815 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4819 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4823 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4824 if (result == MONO_JIT_SECURITY_OK)
4827 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4828 /* Generate code to throw a SecurityException before the actual call/link */
4829 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4832 NEW_ICONST (cfg, args [0], 4);
4833 NEW_METHODCONST (cfg, args [1], caller);
4834 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4835 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4836 /* don't hide previous results */
4837 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4838 cfg->exception_data = result;
4846 throw_exception (void)
4848 static MonoMethod *method = NULL;
4851 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4852 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4859 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4861 MonoMethod *thrower = throw_exception ();
4864 EMIT_NEW_PCONST (cfg, args [0], ex);
4865 mono_emit_method_call (cfg, thrower, args, NULL);
4869 * Return the original method is a wrapper is specified. We can only access
4870 * the custom attributes from the original method.
4873 get_original_method (MonoMethod *method)
4875 if (method->wrapper_type == MONO_WRAPPER_NONE)
4878 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4879 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4882 /* in other cases we need to find the original method */
4883 return mono_marshal_method_from_wrapper (method);
4887 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4888 MonoBasicBlock *bblock, unsigned char *ip)
4890 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4891 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4894 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4895 caller = get_original_method (caller);
4899 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4900 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4901 emit_throw_exception (cfg, mono_get_exception_field_access ());
4905 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4906 MonoBasicBlock *bblock, unsigned char *ip)
4908 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4909 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4912 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4913 caller = get_original_method (caller);
4917 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4918 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4919 emit_throw_exception (cfg, mono_get_exception_method_access ());
4923 * Check that the IL instructions at ip are the array initialization
4924 * sequence and return the pointer to the data and the size.
4927 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4930 * newarr[System.Int32]
4932 * ldtoken field valuetype ...
4933 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4935 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4936 guint32 token = read32 (ip + 7);
4937 guint32 field_token = read32 (ip + 2);
4938 guint32 field_index = field_token & 0xffffff;
4940 const char *data_ptr;
4942 MonoMethod *cmethod;
4943 MonoClass *dummy_class;
4944 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4950 *out_field_token = field_token;
4952 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4955 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4957 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4958 case MONO_TYPE_BOOLEAN:
4962 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4963 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4964 case MONO_TYPE_CHAR:
4974 return NULL; /* stupid ARM FP swapped format */
4984 if (size > mono_type_size (field->type, &dummy_align))
4987 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4988 if (!method->klass->image->dynamic) {
4989 field_index = read32 (ip + 2) & 0xffffff;
4990 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4991 data_ptr = mono_image_rva_map (method->klass->image, rva);
4992 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4993 /* for aot code we do the lookup on load */
4994 if (aot && data_ptr)
4995 return GUINT_TO_POINTER (rva);
4997 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4999 data_ptr = mono_field_get_data (field);
5007 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5009 char *method_fname = mono_method_full_name (method, TRUE);
5011 MonoMethodHeader *header = mono_method_get_header (method);
5013 if (header->code_size == 0)
5014 method_code = g_strdup ("method body is empty.");
5016 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5017 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5018 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5019 g_free (method_fname);
5020 g_free (method_code);
5021 mono_metadata_free_mh (header);
5025 set_exception_object (MonoCompile *cfg, MonoException *exception)
5027 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5028 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5029 cfg->exception_ptr = exception;
5033 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5037 if (cfg->generic_sharing_context)
5038 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5040 type = &klass->byval_arg;
5041 return MONO_TYPE_IS_REFERENCE (type);
5045 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5048 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5049 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5050 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5051 /* Optimize reg-reg moves away */
5053 * Can't optimize other opcodes, since sp[0] might point to
5054 * the last ins of a decomposed opcode.
5056 sp [0]->dreg = (cfg)->locals [n]->dreg;
5058 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5063 * ldloca inhibits many optimizations so try to get rid of it in common
5066 static inline unsigned char *
5067 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5076 local = read16 (ip + 2);
5080 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5081 gboolean skip = FALSE;
5083 /* From the INITOBJ case */
5084 token = read32 (ip + 2);
5085 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5086 CHECK_TYPELOAD (klass);
5087 if (generic_class_is_reference_type (cfg, klass)) {
5088 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5089 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5090 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5091 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5092 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5105 is_exception_class (MonoClass *class)
5108 if (class == mono_defaults.exception_class)
5110 class = class->parent;
5116 * mono_method_to_ir:
5118 * Translate the .net IL into linear IR.
5121 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5122 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5123 guint inline_offset, gboolean is_virtual_call)
5126 MonoInst *ins, **sp, **stack_start;
5127 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5128 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5129 MonoMethod *cmethod, *method_definition;
5130 MonoInst **arg_array;
5131 MonoMethodHeader *header;
5133 guint32 token, ins_flag;
5135 MonoClass *constrained_call = NULL;
5136 unsigned char *ip, *end, *target, *err_pos;
5137 static double r8_0 = 0.0;
5138 MonoMethodSignature *sig;
5139 MonoGenericContext *generic_context = NULL;
5140 MonoGenericContainer *generic_container = NULL;
5141 MonoType **param_types;
5142 int i, n, start_new_bblock, dreg;
5143 int num_calls = 0, inline_costs = 0;
5144 int breakpoint_id = 0;
5146 MonoBoolean security, pinvoke;
5147 MonoSecurityManager* secman = NULL;
5148 MonoDeclSecurityActions actions;
5149 GSList *class_inits = NULL;
5150 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5152 gboolean init_locals, seq_points, skip_dead_blocks;
5154 /* serialization and xdomain stuff may need access to private fields and methods */
5155 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5156 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5157 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5158 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5159 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5160 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5162 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5164 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5165 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5166 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5167 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5169 image = method->klass->image;
5170 header = mono_method_get_header (method);
5171 generic_container = mono_method_get_generic_container (method);
5172 sig = mono_method_signature (method);
5173 num_args = sig->hasthis + sig->param_count;
5174 ip = (unsigned char*)header->code;
5175 cfg->cil_start = ip;
5176 end = ip + header->code_size;
5177 mono_jit_stats.cil_code_size += header->code_size;
5178 init_locals = header->init_locals;
5180 seq_points = cfg->gen_seq_points && cfg->method == method;
5183 * Methods without init_locals set could cause asserts in various passes
5188 method_definition = method;
5189 while (method_definition->is_inflated) {
5190 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5191 method_definition = imethod->declaring;
5194 /* SkipVerification is not allowed if core-clr is enabled */
5195 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5197 dont_verify_stloc = TRUE;
5200 if (!dont_verify && mini_method_verify (cfg, method_definition))
5201 goto exception_exit;
5203 if (mono_debug_using_mono_debugger ())
5204 cfg->keep_cil_nops = TRUE;
5206 if (sig->is_inflated)
5207 generic_context = mono_method_get_context (method);
5208 else if (generic_container)
5209 generic_context = &generic_container->context;
5210 cfg->generic_context = generic_context;
5212 if (!cfg->generic_sharing_context)
5213 g_assert (!sig->has_type_parameters);
5215 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5216 g_assert (method->is_inflated);
5217 g_assert (mono_method_get_context (method)->method_inst);
5219 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5220 g_assert (sig->generic_param_count);
5222 if (cfg->method == method) {
5223 cfg->real_offset = 0;
5225 cfg->real_offset = inline_offset;
5228 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5229 cfg->cil_offset_to_bb_len = header->code_size;
5231 cfg->current_method = method;
5233 if (cfg->verbose_level > 2)
5234 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5236 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5238 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5239 for (n = 0; n < sig->param_count; ++n)
5240 param_types [n + sig->hasthis] = sig->params [n];
5241 cfg->arg_types = param_types;
5243 dont_inline = g_list_prepend (dont_inline, method);
5244 if (cfg->method == method) {
5246 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5247 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5250 NEW_BBLOCK (cfg, start_bblock);
5251 cfg->bb_entry = start_bblock;
5252 start_bblock->cil_code = NULL;
5253 start_bblock->cil_length = 0;
5256 NEW_BBLOCK (cfg, end_bblock);
5257 cfg->bb_exit = end_bblock;
5258 end_bblock->cil_code = NULL;
5259 end_bblock->cil_length = 0;
5260 g_assert (cfg->num_bblocks == 2);
5262 arg_array = cfg->args;
5264 if (header->num_clauses) {
5265 cfg->spvars = g_hash_table_new (NULL, NULL);
5266 cfg->exvars = g_hash_table_new (NULL, NULL);
5268 /* handle exception clauses */
5269 for (i = 0; i < header->num_clauses; ++i) {
5270 MonoBasicBlock *try_bb;
5271 MonoExceptionClause *clause = &header->clauses [i];
5272 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5273 try_bb->real_offset = clause->try_offset;
5274 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5275 tblock->real_offset = clause->handler_offset;
5276 tblock->flags |= BB_EXCEPTION_HANDLER;
5278 link_bblock (cfg, try_bb, tblock);
5280 if (*(ip + clause->handler_offset) == CEE_POP)
5281 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5283 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5284 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5285 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5286 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5287 MONO_ADD_INS (tblock, ins);
5289 /* todo: is a fault block unsafe to optimize? */
5290 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5291 tblock->flags |= BB_EXCEPTION_UNSAFE;
5295 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5297 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5299 /* catch and filter blocks get the exception object on the stack */
5300 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5301 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5302 MonoInst *dummy_use;
5304 /* mostly like handle_stack_args (), but just sets the input args */
5305 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5306 tblock->in_scount = 1;
5307 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5308 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5311 * Add a dummy use for the exvar so its liveness info will be
5315 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5317 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5318 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5319 tblock->flags |= BB_EXCEPTION_HANDLER;
5320 tblock->real_offset = clause->data.filter_offset;
5321 tblock->in_scount = 1;
5322 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5323 /* The filter block shares the exvar with the handler block */
5324 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5325 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5326 MONO_ADD_INS (tblock, ins);
5330 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5331 clause->data.catch_class &&
5332 cfg->generic_sharing_context &&
5333 mono_class_check_context_used (clause->data.catch_class)) {
5335 * In shared generic code with catch
5336 * clauses containing type variables
5337 * the exception handling code has to
5338 * be able to get to the rgctx.
5339 * Therefore we have to make sure that
5340 * the vtable/mrgctx argument (for
5341 * static or generic methods) or the
5342 * "this" argument (for non-static
5343 * methods) are live.
5345 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5346 mini_method_get_context (method)->method_inst ||
5347 method->klass->valuetype) {
5348 mono_get_vtable_var (cfg);
5350 MonoInst *dummy_use;
5352 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5357 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5358 cfg->cbb = start_bblock;
5359 cfg->args = arg_array;
5360 mono_save_args (cfg, sig, inline_args);
5363 /* FIRST CODE BLOCK */
5364 NEW_BBLOCK (cfg, bblock);
5365 bblock->cil_code = ip;
5369 ADD_BBLOCK (cfg, bblock);
5371 if (cfg->method == method) {
5372 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5373 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5374 MONO_INST_NEW (cfg, ins, OP_BREAK);
5375 MONO_ADD_INS (bblock, ins);
5379 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5380 secman = mono_security_manager_get_methods ();
5382 security = (secman && mono_method_has_declsec (method));
5383 /* at this point having security doesn't mean we have any code to generate */
5384 if (security && (cfg->method == method)) {
5385 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5386 * And we do not want to enter the next section (with allocation) if we
5387 * have nothing to generate */
5388 security = mono_declsec_get_demands (method, &actions);
5391 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5392 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5394 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5395 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5396 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5398 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5399 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5403 mono_custom_attrs_free (custom);
5406 custom = mono_custom_attrs_from_class (wrapped->klass);
5407 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5411 mono_custom_attrs_free (custom);
5414 /* not a P/Invoke after all */
5419 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5420 /* we use a separate basic block for the initialization code */
5421 NEW_BBLOCK (cfg, init_localsbb);
5422 cfg->bb_init = init_localsbb;
5423 init_localsbb->real_offset = cfg->real_offset;
5424 start_bblock->next_bb = init_localsbb;
5425 init_localsbb->next_bb = bblock;
5426 link_bblock (cfg, start_bblock, init_localsbb);
5427 link_bblock (cfg, init_localsbb, bblock);
5429 cfg->cbb = init_localsbb;
5431 start_bblock->next_bb = bblock;
5432 link_bblock (cfg, start_bblock, bblock);
5435 /* at this point we know, if security is TRUE, that some code needs to be generated */
5436 if (security && (cfg->method == method)) {
5439 mono_jit_stats.cas_demand_generation++;
5441 if (actions.demand.blob) {
5442 /* Add code for SecurityAction.Demand */
5443 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5444 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5445 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5446 mono_emit_method_call (cfg, secman->demand, args, NULL);
5448 if (actions.noncasdemand.blob) {
5449 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5450 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5451 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5452 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5453 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5454 mono_emit_method_call (cfg, secman->demand, args, NULL);
5456 if (actions.demandchoice.blob) {
5457 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5458 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5459 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5460 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5461 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5465 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5467 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5470 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5471 /* check if this is native code, e.g. an icall or a p/invoke */
5472 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5473 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5475 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5476 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5478 /* if this ia a native call then it can only be JITted from platform code */
5479 if ((icall || pinvk) && method->klass && method->klass->image) {
5480 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5481 MonoException *ex = icall ? mono_get_exception_security () :
5482 mono_get_exception_method_access ();
5483 emit_throw_exception (cfg, ex);
5490 if (header->code_size == 0)
5493 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5498 if (cfg->method == method)
5499 mono_debug_init_method (cfg, bblock, breakpoint_id);
5501 for (n = 0; n < header->num_locals; ++n) {
5502 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5507 /* We force the vtable variable here for all shared methods
5508 for the possibility that they might show up in a stack
5509 trace where their exact instantiation is needed. */
5510 if (cfg->generic_sharing_context && method == cfg->method) {
5511 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5512 mini_method_get_context (method)->method_inst ||
5513 method->klass->valuetype) {
5514 mono_get_vtable_var (cfg);
5516 /* FIXME: Is there a better way to do this?
5517 We need the variable live for the duration
5518 of the whole method. */
5519 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5523 /* add a check for this != NULL to inlined methods */
5524 if (is_virtual_call) {
5527 NEW_ARGLOAD (cfg, arg_ins, 0);
5528 MONO_ADD_INS (cfg->cbb, arg_ins);
5529 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5532 skip_dead_blocks = !dont_verify;
5533 if (skip_dead_blocks) {
5534 original_bb = bb = mono_basic_block_split (method, &error);
5535 if (!mono_error_ok (&error)) {
5536 mono_error_cleanup (&error);
5542 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5543 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5546 start_new_bblock = 0;
5549 if (cfg->method == method)
5550 cfg->real_offset = ip - header->code;
5552 cfg->real_offset = inline_offset;
5557 if (start_new_bblock) {
5558 bblock->cil_length = ip - bblock->cil_code;
5559 if (start_new_bblock == 2) {
5560 g_assert (ip == tblock->cil_code);
5562 GET_BBLOCK (cfg, tblock, ip);
5564 bblock->next_bb = tblock;
5567 start_new_bblock = 0;
5568 for (i = 0; i < bblock->in_scount; ++i) {
5569 if (cfg->verbose_level > 3)
5570 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5571 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5575 g_slist_free (class_inits);
5578 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5579 link_bblock (cfg, bblock, tblock);
5580 if (sp != stack_start) {
5581 handle_stack_args (cfg, stack_start, sp - stack_start);
5583 CHECK_UNVERIFIABLE (cfg);
5585 bblock->next_bb = tblock;
5588 for (i = 0; i < bblock->in_scount; ++i) {
5589 if (cfg->verbose_level > 3)
5590 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5591 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5594 g_slist_free (class_inits);
5599 if (skip_dead_blocks) {
5600 int ip_offset = ip - header->code;
5602 if (ip_offset == bb->end)
5606 int op_size = mono_opcode_size (ip, end);
5607 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5609 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5611 if (ip_offset + op_size == bb->end) {
5612 MONO_INST_NEW (cfg, ins, OP_NOP);
5613 MONO_ADD_INS (bblock, ins);
5614 start_new_bblock = 1;
5622 * Sequence points are points where the debugger can place a breakpoint.
5623 * Currently, we generate these automatically at points where the IL
5626 if (seq_points && sp == stack_start) {
5627 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5628 MONO_ADD_INS (cfg->cbb, ins);
5631 bblock->real_offset = cfg->real_offset;
5633 if ((cfg->method == method) && cfg->coverage_info) {
5634 guint32 cil_offset = ip - header->code;
5635 cfg->coverage_info->data [cil_offset].cil_code = ip;
5637 /* TODO: Use an increment here */
5638 #if defined(TARGET_X86)
5639 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5640 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5642 MONO_ADD_INS (cfg->cbb, ins);
5644 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5649 if (cfg->verbose_level > 3)
5650 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5654 if (cfg->keep_cil_nops)
5655 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5657 MONO_INST_NEW (cfg, ins, OP_NOP);
5659 MONO_ADD_INS (bblock, ins);
5662 if (should_insert_brekpoint (cfg->method))
5663 MONO_INST_NEW (cfg, ins, OP_BREAK);
5665 MONO_INST_NEW (cfg, ins, OP_NOP);
5667 MONO_ADD_INS (bblock, ins);
5673 CHECK_STACK_OVF (1);
5674 n = (*ip)-CEE_LDARG_0;
5676 EMIT_NEW_ARGLOAD (cfg, ins, n);
5684 CHECK_STACK_OVF (1);
5685 n = (*ip)-CEE_LDLOC_0;
5687 EMIT_NEW_LOCLOAD (cfg, ins, n);
5696 n = (*ip)-CEE_STLOC_0;
5699 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5701 emit_stloc_ir (cfg, sp, header, n);
5708 CHECK_STACK_OVF (1);
5711 EMIT_NEW_ARGLOAD (cfg, ins, n);
5717 CHECK_STACK_OVF (1);
5720 NEW_ARGLOADA (cfg, ins, n);
5721 MONO_ADD_INS (cfg->cbb, ins);
5731 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5733 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5738 CHECK_STACK_OVF (1);
5741 EMIT_NEW_LOCLOAD (cfg, ins, n);
5745 case CEE_LDLOCA_S: {
5746 unsigned char *tmp_ip;
5748 CHECK_STACK_OVF (1);
5749 CHECK_LOCAL (ip [1]);
5751 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5757 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5766 CHECK_LOCAL (ip [1]);
5767 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5769 emit_stloc_ir (cfg, sp, header, ip [1]);
5774 CHECK_STACK_OVF (1);
5775 EMIT_NEW_PCONST (cfg, ins, NULL);
5776 ins->type = STACK_OBJ;
5781 CHECK_STACK_OVF (1);
5782 EMIT_NEW_ICONST (cfg, ins, -1);
5795 CHECK_STACK_OVF (1);
5796 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5802 CHECK_STACK_OVF (1);
5804 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5810 CHECK_STACK_OVF (1);
5811 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5817 CHECK_STACK_OVF (1);
5818 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5819 ins->type = STACK_I8;
5820 ins->dreg = alloc_dreg (cfg, STACK_I8);
5822 ins->inst_l = (gint64)read64 (ip);
5823 MONO_ADD_INS (bblock, ins);
5829 gboolean use_aotconst = FALSE;
5831 #ifdef TARGET_POWERPC
5832 /* FIXME: Clean this up */
5833 if (cfg->compile_aot)
5834 use_aotconst = TRUE;
5837 /* FIXME: we should really allocate this only late in the compilation process */
5838 f = mono_domain_alloc (cfg->domain, sizeof (float));
5840 CHECK_STACK_OVF (1);
5846 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5848 dreg = alloc_freg (cfg);
5849 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5850 ins->type = STACK_R8;
5852 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5853 ins->type = STACK_R8;
5854 ins->dreg = alloc_dreg (cfg, STACK_R8);
5856 MONO_ADD_INS (bblock, ins);
5866 gboolean use_aotconst = FALSE;
5868 #ifdef TARGET_POWERPC
5869 /* FIXME: Clean this up */
5870 if (cfg->compile_aot)
5871 use_aotconst = TRUE;
5874 /* FIXME: we should really allocate this only late in the compilation process */
5875 d = mono_domain_alloc (cfg->domain, sizeof (double));
5877 CHECK_STACK_OVF (1);
5883 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5885 dreg = alloc_freg (cfg);
5886 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5887 ins->type = STACK_R8;
5889 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5890 ins->type = STACK_R8;
5891 ins->dreg = alloc_dreg (cfg, STACK_R8);
5893 MONO_ADD_INS (bblock, ins);
5902 MonoInst *temp, *store;
5904 CHECK_STACK_OVF (1);
5908 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5909 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5911 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5914 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5927 if (sp [0]->type == STACK_R8)
5928 /* we need to pop the value from the x86 FP stack */
5929 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5938 if (stack_start != sp)
5940 token = read32 (ip + 1);
5941 /* FIXME: check the signature matches */
5942 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5947 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5948 GENERIC_SHARING_FAILURE (CEE_JMP);
5950 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5951 CHECK_CFG_EXCEPTION;
5953 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5955 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5958 /* Handle tail calls similarly to calls */
5959 n = fsig->param_count + fsig->hasthis;
5961 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5962 call->method = cmethod;
5963 call->tail_call = TRUE;
5964 call->signature = mono_method_signature (cmethod);
5965 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5966 call->inst.inst_p0 = cmethod;
5967 for (i = 0; i < n; ++i)
5968 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5970 mono_arch_emit_call (cfg, call);
5971 MONO_ADD_INS (bblock, (MonoInst*)call);
5974 for (i = 0; i < num_args; ++i)
5975 /* Prevent arguments from being optimized away */
5976 arg_array [i]->flags |= MONO_INST_VOLATILE;
5978 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5979 ins = (MonoInst*)call;
5980 ins->inst_p0 = cmethod;
5981 MONO_ADD_INS (bblock, ins);
5985 start_new_bblock = 1;
5990 case CEE_CALLVIRT: {
5991 MonoInst *addr = NULL;
5992 MonoMethodSignature *fsig = NULL;
5994 int virtual = *ip == CEE_CALLVIRT;
5995 int calli = *ip == CEE_CALLI;
5996 gboolean pass_imt_from_rgctx = FALSE;
5997 MonoInst *imt_arg = NULL;
5998 gboolean pass_vtable = FALSE;
5999 gboolean pass_mrgctx = FALSE;
6000 MonoInst *vtable_arg = NULL;
6001 gboolean check_this = FALSE;
6002 gboolean supported_tail_call = FALSE;
6005 token = read32 (ip + 1);
6012 if (method->wrapper_type != MONO_WRAPPER_NONE)
6013 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6015 fsig = mono_metadata_parse_signature (image, token);
6017 n = fsig->param_count + fsig->hasthis;
6019 if (method->dynamic && fsig->pinvoke) {
6023 * This is a call through a function pointer using a pinvoke
6024 * signature. Have to create a wrapper and call that instead.
6025 * FIXME: This is very slow, need to create a wrapper at JIT time
6026 * instead based on the signature.
6028 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6029 EMIT_NEW_PCONST (cfg, args [1], fsig);
6031 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6034 MonoMethod *cil_method;
6036 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6037 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6038 cil_method = cmethod;
6039 } else if (constrained_call) {
6040 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6042 * This is needed since get_method_constrained can't find
6043 * the method in klass representing a type var.
6044 * The type var is guaranteed to be a reference type in this
6047 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6048 cil_method = cmethod;
6049 g_assert (!cmethod->klass->valuetype);
6051 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6054 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6055 cil_method = cmethod;
6060 if (!dont_verify && !cfg->skip_visibility) {
6061 MonoMethod *target_method = cil_method;
6062 if (method->is_inflated) {
6063 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6065 if (!mono_method_can_access_method (method_definition, target_method) &&
6066 !mono_method_can_access_method (method, cil_method))
6067 METHOD_ACCESS_FAILURE;
6070 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6071 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6073 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6074 /* MS.NET seems to silently convert this to a callvirt */
6077 if (!cmethod->klass->inited)
6078 if (!mono_class_init (cmethod->klass))
6081 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6082 mini_class_is_system_array (cmethod->klass)) {
6083 array_rank = cmethod->klass->rank;
6084 fsig = mono_method_signature (cmethod);
6086 fsig = mono_method_signature (cmethod);
6091 if (fsig->pinvoke) {
6092 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6093 check_for_pending_exc, FALSE);
6094 fsig = mono_method_signature (wrapper);
6095 } else if (constrained_call) {
6096 fsig = mono_method_signature (cmethod);
6098 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6102 mono_save_token_info (cfg, image, token, cil_method);
6104 n = fsig->param_count + fsig->hasthis;
6106 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6107 if (check_linkdemand (cfg, method, cmethod))
6109 CHECK_CFG_EXCEPTION;
6112 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6113 g_assert_not_reached ();
6116 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6119 if (!cfg->generic_sharing_context && cmethod)
6120 g_assert (!mono_method_check_context_used (cmethod));
6124 //g_assert (!virtual || fsig->hasthis);
6128 if (constrained_call) {
6130 * We have the `constrained.' prefix opcode.
6132 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6134 * The type parameter is instantiated as a valuetype,
6135 * but that type doesn't override the method we're
6136 * calling, so we need to box `this'.
6138 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6139 ins->klass = constrained_call;
6140 sp [0] = handle_box (cfg, ins, constrained_call);
6141 CHECK_CFG_EXCEPTION;
6142 } else if (!constrained_call->valuetype) {
6143 int dreg = alloc_preg (cfg);
6146 * The type parameter is instantiated as a reference
6147 * type. We have a managed pointer on the stack, so
6148 * we need to dereference it here.
6150 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6151 ins->type = STACK_OBJ;
6153 } else if (cmethod->klass->valuetype)
6155 constrained_call = NULL;
6158 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6162 * If the callee is a shared method, then its static cctor
6163 * might not get called after the call was patched.
6165 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6166 emit_generic_class_init (cfg, cmethod->klass);
6167 CHECK_TYPELOAD (cmethod->klass);
6170 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6171 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6172 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6173 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6174 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6177 * Pass vtable iff target method might
6178 * be shared, which means that sharing
6179 * is enabled for its class and its
6180 * context is sharable (and it's not a
6183 if (sharing_enabled && context_sharable &&
6184 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6188 if (cmethod && mini_method_get_context (cmethod) &&
6189 mini_method_get_context (cmethod)->method_inst) {
6190 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6191 MonoGenericContext *context = mini_method_get_context (cmethod);
6192 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6194 g_assert (!pass_vtable);
6196 if (sharing_enabled && context_sharable)
6200 if (cfg->generic_sharing_context && cmethod) {
6201 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6203 context_used = mono_method_check_context_used (cmethod);
6205 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6206 /* Generic method interface
6207 calls are resolved via a
6208 helper function and don't
6210 if (!cmethod_context || !cmethod_context->method_inst)
6211 pass_imt_from_rgctx = TRUE;
6215 * If a shared method calls another
6216 * shared method then the caller must
6217 * have a generic sharing context
6218 * because the magic trampoline
6219 * requires it. FIXME: We shouldn't
6220 * have to force the vtable/mrgctx
6221 * variable here. Instead there
6222 * should be a flag in the cfg to
6223 * request a generic sharing context.
6226 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6227 mono_get_vtable_var (cfg);
6232 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6234 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6236 CHECK_TYPELOAD (cmethod->klass);
6237 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6242 g_assert (!vtable_arg);
6244 if (!cfg->compile_aot) {
6246 * emit_get_rgctx_method () calls mono_class_vtable () so check
6247 * for type load errors before.
6249 mono_class_setup_vtable (cmethod->klass);
6250 CHECK_TYPELOAD (cmethod->klass);
6253 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6255 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6256 MONO_METHOD_IS_FINAL (cmethod)) {
6263 if (pass_imt_from_rgctx) {
6264 g_assert (!pass_vtable);
6267 imt_arg = emit_get_rgctx_method (cfg, context_used,
6268 cmethod, MONO_RGCTX_INFO_METHOD);
6272 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6274 /* Calling virtual generic methods */
6275 if (cmethod && virtual &&
6276 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6277 !(MONO_METHOD_IS_FINAL (cmethod) &&
6278 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6279 mono_method_signature (cmethod)->generic_param_count) {
6280 MonoInst *this_temp, *this_arg_temp, *store;
6281 MonoInst *iargs [4];
6283 g_assert (mono_method_signature (cmethod)->is_inflated);
6285 /* Prevent inlining of methods that contain indirect calls */
6288 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6289 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6290 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6291 g_assert (!imt_arg);
6293 g_assert (cmethod->is_inflated);
6294 imt_arg = emit_get_rgctx_method (cfg, context_used,
6295 cmethod, MONO_RGCTX_INFO_METHOD);
6296 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6300 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6301 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6302 MONO_ADD_INS (bblock, store);
6304 /* FIXME: This should be a managed pointer */
6305 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6307 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6308 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6309 cmethod, MONO_RGCTX_INFO_METHOD);
6310 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6311 addr = mono_emit_jit_icall (cfg,
6312 mono_helper_compile_generic_method, iargs);
6314 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6316 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6319 if (!MONO_TYPE_IS_VOID (fsig->ret))
6320 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6322 CHECK_CFG_EXCEPTION;
6329 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6330 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6332 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6336 /* FIXME: runtime generic context pointer for jumps? */
6337 /* FIXME: handle this for generic sharing eventually */
6338 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6341 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6344 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6345 /* Handle tail calls similarly to calls */
6346 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6348 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6349 call->tail_call = TRUE;
6350 call->method = cmethod;
6351 call->signature = mono_method_signature (cmethod);
6354 * We implement tail calls by storing the actual arguments into the
6355 * argument variables, then emitting a CEE_JMP.
6357 for (i = 0; i < n; ++i) {
6358 /* Prevent argument from being register allocated */
6359 arg_array [i]->flags |= MONO_INST_VOLATILE;
6360 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6364 ins = (MonoInst*)call;
6365 ins->inst_p0 = cmethod;
6366 ins->inst_p1 = arg_array [0];
6367 MONO_ADD_INS (bblock, ins);
6368 link_bblock (cfg, bblock, end_bblock);
6369 start_new_bblock = 1;
6371 CHECK_CFG_EXCEPTION;
6373 /* skip CEE_RET as well */
6379 /* Conversion to a JIT intrinsic */
6380 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6381 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6382 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6387 CHECK_CFG_EXCEPTION;
6395 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6396 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6397 mono_method_check_inlining (cfg, cmethod) &&
6398 !g_list_find (dont_inline, cmethod)) {
6400 gboolean allways = FALSE;
6402 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6403 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6404 /* Prevent inlining of methods that call wrappers */
6406 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6410 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6412 cfg->real_offset += 5;
6415 if (!MONO_TYPE_IS_VOID (fsig->ret))
6416 /* *sp is already set by inline_method */
6419 inline_costs += costs;
6425 inline_costs += 10 * num_calls++;
6427 /* Tail recursion elimination */
6428 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6429 gboolean has_vtargs = FALSE;
6432 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6435 /* keep it simple */
6436 for (i = fsig->param_count - 1; i >= 0; i--) {
6437 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6442 for (i = 0; i < n; ++i)
6443 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6444 MONO_INST_NEW (cfg, ins, OP_BR);
6445 MONO_ADD_INS (bblock, ins);
6446 tblock = start_bblock->out_bb [0];
6447 link_bblock (cfg, bblock, tblock);
6448 ins->inst_target_bb = tblock;
6449 start_new_bblock = 1;
6451 /* skip the CEE_RET, too */
6452 if (ip_in_bb (cfg, bblock, ip + 5))
6462 /* Generic sharing */
6463 /* FIXME: only do this for generic methods if
6464 they are not shared! */
6465 if (context_used && !imt_arg && !array_rank &&
6466 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6467 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6468 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6469 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6472 g_assert (cfg->generic_sharing_context && cmethod);
6476 * We are compiling a call to a
6477 * generic method from shared code,
6478 * which means that we have to look up
6479 * the method in the rgctx and do an
6482 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6485 /* Indirect calls */
6487 g_assert (!imt_arg);
6489 if (*ip == CEE_CALL)
6490 g_assert (context_used);
6491 else if (*ip == CEE_CALLI)
6492 g_assert (!vtable_arg);
6494 /* FIXME: what the hell is this??? */
6495 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6496 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6498 /* Prevent inlining of methods with indirect calls */
6502 #ifdef MONO_ARCH_RGCTX_REG
6504 int rgctx_reg = mono_alloc_preg (cfg);
6506 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6507 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6508 call = (MonoCallInst*)ins;
6509 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6510 cfg->uses_rgctx_reg = TRUE;
6511 call->rgctx_reg = TRUE;
6516 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6518 * Instead of emitting an indirect call, emit a direct call
6519 * with the contents of the aotconst as the patch info.
6521 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6523 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6524 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6527 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6530 if (!MONO_TYPE_IS_VOID (fsig->ret))
6531 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6533 CHECK_CFG_EXCEPTION;
6544 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6545 if (sp [fsig->param_count]->type == STACK_OBJ) {
6546 MonoInst *iargs [2];
6549 iargs [1] = sp [fsig->param_count];
6551 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6554 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6555 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6556 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6557 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6559 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6562 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6563 if (!cmethod->klass->element_class->valuetype && !readonly)
6564 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6565 CHECK_TYPELOAD (cmethod->klass);
6568 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6571 g_assert_not_reached ();
6574 CHECK_CFG_EXCEPTION;
6581 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6583 if (!MONO_TYPE_IS_VOID (fsig->ret))
6584 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6586 CHECK_CFG_EXCEPTION;
6596 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6598 } else if (imt_arg) {
6599 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6601 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6604 if (!MONO_TYPE_IS_VOID (fsig->ret))
6605 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6607 CHECK_CFG_EXCEPTION;
6614 if (cfg->method != method) {
6615 /* return from inlined method */
6617 * If in_count == 0, that means the ret is unreachable due to
6618 * being preceeded by a throw. In that case, inline_method () will
6619 * handle setting the return value
6620 * (test case: test_0_inline_throw ()).
6622 if (return_var && cfg->cbb->in_count) {
6626 //g_assert (returnvar != -1);
6627 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6628 cfg->ret_var_set = TRUE;
6632 MonoType *ret_type = mono_method_signature (method)->ret;
6636 * Place a seq point here too even through the IL stack is not
6637 * empty, so a step over on
6640 * will work correctly.
6642 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6643 MONO_ADD_INS (cfg->cbb, ins);
6646 g_assert (!return_var);
6649 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6652 if (!cfg->vret_addr) {
6655 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6657 EMIT_NEW_RETLOADA (cfg, ret_addr);
6659 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6660 ins->klass = mono_class_from_mono_type (ret_type);
6663 #ifdef MONO_ARCH_SOFT_FLOAT
6664 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6665 MonoInst *iargs [1];
6669 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6670 mono_arch_emit_setret (cfg, method, conv);
6672 mono_arch_emit_setret (cfg, method, *sp);
6675 mono_arch_emit_setret (cfg, method, *sp);
6680 if (sp != stack_start)
6682 MONO_INST_NEW (cfg, ins, OP_BR);
6684 ins->inst_target_bb = end_bblock;
6685 MONO_ADD_INS (bblock, ins);
6686 link_bblock (cfg, bblock, end_bblock);
6687 start_new_bblock = 1;
6691 MONO_INST_NEW (cfg, ins, OP_BR);
6693 target = ip + 1 + (signed char)(*ip);
6695 GET_BBLOCK (cfg, tblock, target);
6696 link_bblock (cfg, bblock, tblock);
6697 ins->inst_target_bb = tblock;
6698 if (sp != stack_start) {
6699 handle_stack_args (cfg, stack_start, sp - stack_start);
6701 CHECK_UNVERIFIABLE (cfg);
6703 MONO_ADD_INS (bblock, ins);
6704 start_new_bblock = 1;
6705 inline_costs += BRANCH_COST;
6719 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6721 target = ip + 1 + *(signed char*)ip;
6727 inline_costs += BRANCH_COST;
6731 MONO_INST_NEW (cfg, ins, OP_BR);
6734 target = ip + 4 + (gint32)read32(ip);
6736 GET_BBLOCK (cfg, tblock, target);
6737 link_bblock (cfg, bblock, tblock);
6738 ins->inst_target_bb = tblock;
6739 if (sp != stack_start) {
6740 handle_stack_args (cfg, stack_start, sp - stack_start);
6742 CHECK_UNVERIFIABLE (cfg);
6745 MONO_ADD_INS (bblock, ins);
6747 start_new_bblock = 1;
6748 inline_costs += BRANCH_COST;
6755 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6756 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6757 guint32 opsize = is_short ? 1 : 4;
6759 CHECK_OPSIZE (opsize);
6761 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6764 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6769 GET_BBLOCK (cfg, tblock, target);
6770 link_bblock (cfg, bblock, tblock);
6771 GET_BBLOCK (cfg, tblock, ip);
6772 link_bblock (cfg, bblock, tblock);
6774 if (sp != stack_start) {
6775 handle_stack_args (cfg, stack_start, sp - stack_start);
6776 CHECK_UNVERIFIABLE (cfg);
6779 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6780 cmp->sreg1 = sp [0]->dreg;
6781 type_from_op (cmp, sp [0], NULL);
6784 #if SIZEOF_REGISTER == 4
6785 if (cmp->opcode == OP_LCOMPARE_IMM) {
6786 /* Convert it to OP_LCOMPARE */
6787 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6788 ins->type = STACK_I8;
6789 ins->dreg = alloc_dreg (cfg, STACK_I8);
6791 MONO_ADD_INS (bblock, ins);
6792 cmp->opcode = OP_LCOMPARE;
6793 cmp->sreg2 = ins->dreg;
6796 MONO_ADD_INS (bblock, cmp);
6798 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6799 type_from_op (ins, sp [0], NULL);
6800 MONO_ADD_INS (bblock, ins);
6801 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6802 GET_BBLOCK (cfg, tblock, target);
6803 ins->inst_true_bb = tblock;
6804 GET_BBLOCK (cfg, tblock, ip);
6805 ins->inst_false_bb = tblock;
6806 start_new_bblock = 2;
6809 inline_costs += BRANCH_COST;
6824 MONO_INST_NEW (cfg, ins, *ip);
6826 target = ip + 4 + (gint32)read32(ip);
6832 inline_costs += BRANCH_COST;
6836 MonoBasicBlock **targets;
6837 MonoBasicBlock *default_bblock;
6838 MonoJumpInfoBBTable *table;
6839 int offset_reg = alloc_preg (cfg);
6840 int target_reg = alloc_preg (cfg);
6841 int table_reg = alloc_preg (cfg);
6842 int sum_reg = alloc_preg (cfg);
6843 gboolean use_op_switch;
6847 n = read32 (ip + 1);
6850 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6854 CHECK_OPSIZE (n * sizeof (guint32));
6855 target = ip + n * sizeof (guint32);
6857 GET_BBLOCK (cfg, default_bblock, target);
6859 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6860 for (i = 0; i < n; ++i) {
6861 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6862 targets [i] = tblock;
6866 if (sp != stack_start) {
6868 * Link the current bb with the targets as well, so handle_stack_args
6869 * will set their in_stack correctly.
6871 link_bblock (cfg, bblock, default_bblock);
6872 for (i = 0; i < n; ++i)
6873 link_bblock (cfg, bblock, targets [i]);
6875 handle_stack_args (cfg, stack_start, sp - stack_start);
6877 CHECK_UNVERIFIABLE (cfg);
6880 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6881 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6884 for (i = 0; i < n; ++i)
6885 link_bblock (cfg, bblock, targets [i]);
6887 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6888 table->table = targets;
6889 table->table_size = n;
6891 use_op_switch = FALSE;
6893 /* ARM implements SWITCH statements differently */
6894 /* FIXME: Make it use the generic implementation */
6895 if (!cfg->compile_aot)
6896 use_op_switch = TRUE;
6899 if (COMPILE_LLVM (cfg))
6900 use_op_switch = TRUE;
6902 cfg->cbb->has_jump_table = 1;
6904 if (use_op_switch) {
6905 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6906 ins->sreg1 = src1->dreg;
6907 ins->inst_p0 = table;
6908 ins->inst_many_bb = targets;
6909 ins->klass = GUINT_TO_POINTER (n);
6910 MONO_ADD_INS (cfg->cbb, ins);
6912 if (sizeof (gpointer) == 8)
6913 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6917 #if SIZEOF_REGISTER == 8
6918 /* The upper word might not be zero, and we add it to a 64 bit address later */
6919 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6922 if (cfg->compile_aot) {
6923 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6925 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6926 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6927 ins->inst_p0 = table;
6928 ins->dreg = table_reg;
6929 MONO_ADD_INS (cfg->cbb, ins);
6932 /* FIXME: Use load_memindex */
6933 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6934 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6935 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6937 start_new_bblock = 1;
6938 inline_costs += (BRANCH_COST * 2);
6958 dreg = alloc_freg (cfg);
6961 dreg = alloc_lreg (cfg);
6964 dreg = alloc_preg (cfg);
6967 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6968 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6969 ins->flags |= ins_flag;
6971 MONO_ADD_INS (bblock, ins);
6986 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6987 ins->flags |= ins_flag;
6989 MONO_ADD_INS (bblock, ins);
6991 #if HAVE_WRITE_BARRIERS
6992 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6993 MonoInst *dummy_use;
6994 /* insert call to write barrier */
6995 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6996 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6997 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7008 MONO_INST_NEW (cfg, ins, (*ip));
7010 ins->sreg1 = sp [0]->dreg;
7011 ins->sreg2 = sp [1]->dreg;
7012 type_from_op (ins, sp [0], sp [1]);
7014 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7016 /* Use the immediate opcodes if possible */
7017 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7018 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7019 if (imm_opcode != -1) {
7020 ins->opcode = imm_opcode;
7021 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7024 sp [1]->opcode = OP_NOP;
7028 MONO_ADD_INS ((cfg)->cbb, (ins));
7030 *sp++ = mono_decompose_opcode (cfg, ins);
7047 MONO_INST_NEW (cfg, ins, (*ip));
7049 ins->sreg1 = sp [0]->dreg;
7050 ins->sreg2 = sp [1]->dreg;
7051 type_from_op (ins, sp [0], sp [1]);
7053 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7054 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7056 /* FIXME: Pass opcode to is_inst_imm */
7058 /* Use the immediate opcodes if possible */
7059 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7062 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7063 if (imm_opcode != -1) {
7064 ins->opcode = imm_opcode;
7065 if (sp [1]->opcode == OP_I8CONST) {
7066 #if SIZEOF_REGISTER == 8
7067 ins->inst_imm = sp [1]->inst_l;
7069 ins->inst_ls_word = sp [1]->inst_ls_word;
7070 ins->inst_ms_word = sp [1]->inst_ms_word;
7074 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7077 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7078 if (sp [1]->next == NULL)
7079 sp [1]->opcode = OP_NOP;
7082 MONO_ADD_INS ((cfg)->cbb, (ins));
7084 *sp++ = mono_decompose_opcode (cfg, ins);
7097 case CEE_CONV_OVF_I8:
7098 case CEE_CONV_OVF_U8:
7102 /* Special case this earlier so we have long constants in the IR */
7103 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7104 int data = sp [-1]->inst_c0;
7105 sp [-1]->opcode = OP_I8CONST;
7106 sp [-1]->type = STACK_I8;
7107 #if SIZEOF_REGISTER == 8
7108 if ((*ip) == CEE_CONV_U8)
7109 sp [-1]->inst_c0 = (guint32)data;
7111 sp [-1]->inst_c0 = data;
7113 sp [-1]->inst_ls_word = data;
7114 if ((*ip) == CEE_CONV_U8)
7115 sp [-1]->inst_ms_word = 0;
7117 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7119 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7126 case CEE_CONV_OVF_I4:
7127 case CEE_CONV_OVF_I1:
7128 case CEE_CONV_OVF_I2:
7129 case CEE_CONV_OVF_I:
7130 case CEE_CONV_OVF_U:
7133 if (sp [-1]->type == STACK_R8) {
7134 ADD_UNOP (CEE_CONV_OVF_I8);
7141 case CEE_CONV_OVF_U1:
7142 case CEE_CONV_OVF_U2:
7143 case CEE_CONV_OVF_U4:
7146 if (sp [-1]->type == STACK_R8) {
7147 ADD_UNOP (CEE_CONV_OVF_U8);
7154 case CEE_CONV_OVF_I1_UN:
7155 case CEE_CONV_OVF_I2_UN:
7156 case CEE_CONV_OVF_I4_UN:
7157 case CEE_CONV_OVF_I8_UN:
7158 case CEE_CONV_OVF_U1_UN:
7159 case CEE_CONV_OVF_U2_UN:
7160 case CEE_CONV_OVF_U4_UN:
7161 case CEE_CONV_OVF_U8_UN:
7162 case CEE_CONV_OVF_I_UN:
7163 case CEE_CONV_OVF_U_UN:
7170 CHECK_CFG_EXCEPTION;
7174 case CEE_ADD_OVF_UN:
7176 case CEE_MUL_OVF_UN:
7178 case CEE_SUB_OVF_UN:
7186 token = read32 (ip + 1);
7187 klass = mini_get_class (method, token, generic_context);
7188 CHECK_TYPELOAD (klass);
7190 if (generic_class_is_reference_type (cfg, klass)) {
7191 MonoInst *store, *load;
7192 int dreg = alloc_preg (cfg);
7194 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7195 load->flags |= ins_flag;
7196 MONO_ADD_INS (cfg->cbb, load);
7198 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7199 store->flags |= ins_flag;
7200 MONO_ADD_INS (cfg->cbb, store);
7202 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7214 token = read32 (ip + 1);
7215 klass = mini_get_class (method, token, generic_context);
7216 CHECK_TYPELOAD (klass);
7218 /* Optimize the common ldobj+stloc combination */
7228 loc_index = ip [5] - CEE_STLOC_0;
7235 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7236 CHECK_LOCAL (loc_index);
7238 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7239 ins->dreg = cfg->locals [loc_index]->dreg;
7245 /* Optimize the ldobj+stobj combination */
7246 /* The reference case ends up being a load+store anyway */
7247 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7252 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7259 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7268 CHECK_STACK_OVF (1);
7270 n = read32 (ip + 1);
7272 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7273 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7274 ins->type = STACK_OBJ;
7277 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7278 MonoInst *iargs [1];
7280 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7281 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7283 if (cfg->opt & MONO_OPT_SHARED) {
7284 MonoInst *iargs [3];
7286 if (cfg->compile_aot) {
7287 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7289 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7290 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7291 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7292 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7293 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7295 if (bblock->out_of_line) {
7296 MonoInst *iargs [2];
7298 if (image == mono_defaults.corlib) {
7300 * Avoid relocations in AOT and save some space by using a
7301 * version of helper_ldstr specialized to mscorlib.
7303 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7304 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7306 /* Avoid creating the string object */
7307 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7308 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7309 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7313 if (cfg->compile_aot) {
7314 NEW_LDSTRCONST (cfg, ins, image, n);
7316 MONO_ADD_INS (bblock, ins);
7319 NEW_PCONST (cfg, ins, NULL);
7320 ins->type = STACK_OBJ;
7321 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7323 MONO_ADD_INS (bblock, ins);
7332 MonoInst *iargs [2];
7333 MonoMethodSignature *fsig;
7336 MonoInst *vtable_arg = NULL;
7339 token = read32 (ip + 1);
7340 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7343 fsig = mono_method_get_signature (cmethod, image, token);
7347 mono_save_token_info (cfg, image, token, cmethod);
7349 if (!mono_class_init (cmethod->klass))
7352 if (cfg->generic_sharing_context)
7353 context_used = mono_method_check_context_used (cmethod);
7355 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7356 if (check_linkdemand (cfg, method, cmethod))
7358 CHECK_CFG_EXCEPTION;
7359 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7360 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7363 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7364 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7365 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7366 mono_class_vtable (cfg->domain, cmethod->klass);
7367 CHECK_TYPELOAD (cmethod->klass);
7369 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7370 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7373 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7374 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7376 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7378 CHECK_TYPELOAD (cmethod->klass);
7379 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7384 n = fsig->param_count;
7388 * Generate smaller code for the common newobj <exception> instruction in
7389 * argument checking code.
7391 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7392 is_exception_class (cmethod->klass) && n <= 2 &&
7393 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7394 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7395 MonoInst *iargs [3];
7397 g_assert (!vtable_arg);
7401 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7404 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7408 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7413 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7416 g_assert_not_reached ();
7424 /* move the args to allow room for 'this' in the first position */
7430 /* check_call_signature () requires sp[0] to be set */
7431 this_ins.type = STACK_OBJ;
7433 if (check_call_signature (cfg, fsig, sp))
7438 if (mini_class_is_system_array (cmethod->klass)) {
7439 g_assert (!vtable_arg);
7441 *sp = emit_get_rgctx_method (cfg, context_used,
7442 cmethod, MONO_RGCTX_INFO_METHOD);
7444 /* Avoid varargs in the common case */
7445 if (fsig->param_count == 1)
7446 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7447 else if (fsig->param_count == 2)
7448 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7449 else if (fsig->param_count == 3)
7450 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7452 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7453 } else if (cmethod->string_ctor) {
7454 g_assert (!context_used);
7455 g_assert (!vtable_arg);
7456 /* we simply pass a null pointer */
7457 EMIT_NEW_PCONST (cfg, *sp, NULL);
7458 /* now call the string ctor */
7459 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7461 MonoInst* callvirt_this_arg = NULL;
7463 if (cmethod->klass->valuetype) {
7464 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7465 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7466 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7471 * The code generated by mini_emit_virtual_call () expects
7472 * iargs [0] to be a boxed instance, but luckily the vcall
7473 * will be transformed into a normal call there.
7475 } else if (context_used) {
7479 if (cfg->opt & MONO_OPT_SHARED)
7480 rgctx_info = MONO_RGCTX_INFO_KLASS;
7482 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7483 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7485 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7488 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7490 CHECK_TYPELOAD (cmethod->klass);
7493 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7494 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7495 * As a workaround, we call class cctors before allocating objects.
7497 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7498 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7499 if (cfg->verbose_level > 2)
7500 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7501 class_inits = g_slist_prepend (class_inits, vtable);
7504 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7507 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7510 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7512 /* Now call the actual ctor */
7513 /* Avoid virtual calls to ctors if possible */
7514 if (cmethod->klass->marshalbyref)
7515 callvirt_this_arg = sp [0];
7517 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7518 mono_method_check_inlining (cfg, cmethod) &&
7519 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7520 !g_list_find (dont_inline, cmethod)) {
7523 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7524 cfg->real_offset += 5;
7527 inline_costs += costs - 5;
7530 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7532 } else if (context_used &&
7533 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7534 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7535 MonoInst *cmethod_addr;
7537 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7538 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7540 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7543 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7544 callvirt_this_arg, NULL, vtable_arg);
7548 if (alloc == NULL) {
7550 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7551 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7565 token = read32 (ip + 1);
7566 klass = mini_get_class (method, token, generic_context);
7567 CHECK_TYPELOAD (klass);
7568 if (sp [0]->type != STACK_OBJ)
7571 if (cfg->generic_sharing_context)
7572 context_used = mono_class_check_context_used (klass);
7574 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7581 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7583 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7587 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7588 MonoMethod *mono_castclass;
7589 MonoInst *iargs [1];
7592 mono_castclass = mono_marshal_get_castclass (klass);
7595 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7596 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7597 g_assert (costs > 0);
7600 cfg->real_offset += 5;
7605 inline_costs += costs;
7608 ins = handle_castclass (cfg, klass, *sp, context_used);
7609 CHECK_CFG_EXCEPTION;
7619 token = read32 (ip + 1);
7620 klass = mini_get_class (method, token, generic_context);
7621 CHECK_TYPELOAD (klass);
7622 if (sp [0]->type != STACK_OBJ)
7625 if (cfg->generic_sharing_context)
7626 context_used = mono_class_check_context_used (klass);
7628 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7635 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7637 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7641 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7642 MonoMethod *mono_isinst;
7643 MonoInst *iargs [1];
7646 mono_isinst = mono_marshal_get_isinst (klass);
7649 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7650 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7651 g_assert (costs > 0);
7654 cfg->real_offset += 5;
7659 inline_costs += costs;
7662 ins = handle_isinst (cfg, klass, *sp, context_used);
7663 CHECK_CFG_EXCEPTION;
7670 case CEE_UNBOX_ANY: {
7674 token = read32 (ip + 1);
7675 klass = mini_get_class (method, token, generic_context);
7676 CHECK_TYPELOAD (klass);
7678 mono_save_token_info (cfg, image, token, klass);
7680 if (cfg->generic_sharing_context)
7681 context_used = mono_class_check_context_used (klass);
7683 if (generic_class_is_reference_type (cfg, klass)) {
7684 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7686 MonoInst *iargs [2];
7691 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7692 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7696 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7697 MonoMethod *mono_castclass;
7698 MonoInst *iargs [1];
7701 mono_castclass = mono_marshal_get_castclass (klass);
7704 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7705 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7707 g_assert (costs > 0);
7710 cfg->real_offset += 5;
7714 inline_costs += costs;
7716 ins = handle_castclass (cfg, klass, *sp, 0);
7717 CHECK_CFG_EXCEPTION;
7725 if (mono_class_is_nullable (klass)) {
7726 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7733 ins = handle_unbox (cfg, klass, sp, context_used);
7739 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7752 token = read32 (ip + 1);
7753 klass = mini_get_class (method, token, generic_context);
7754 CHECK_TYPELOAD (klass);
7756 mono_save_token_info (cfg, image, token, klass);
7758 if (cfg->generic_sharing_context)
7759 context_used = mono_class_check_context_used (klass);
7761 if (generic_class_is_reference_type (cfg, klass)) {
7767 if (klass == mono_defaults.void_class)
7769 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7771 /* frequent check in generic code: box (struct), brtrue */
7772 if (!mono_class_is_nullable (klass) &&
7773 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7774 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7776 MONO_INST_NEW (cfg, ins, OP_BR);
7777 if (*ip == CEE_BRTRUE_S) {
7780 target = ip + 1 + (signed char)(*ip);
7785 target = ip + 4 + (gint)(read32 (ip));
7788 GET_BBLOCK (cfg, tblock, target);
7789 link_bblock (cfg, bblock, tblock);
7790 ins->inst_target_bb = tblock;
7791 GET_BBLOCK (cfg, tblock, ip);
7793 * This leads to some inconsistency, since the two bblocks are
7794 * not really connected, but it is needed for handling stack
7795 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7796 * FIXME: This should only be needed if sp != stack_start, but that
7797 * doesn't work for some reason (test failure in mcs/tests on x86).
7799 link_bblock (cfg, bblock, tblock);
7800 if (sp != stack_start) {
7801 handle_stack_args (cfg, stack_start, sp - stack_start);
7803 CHECK_UNVERIFIABLE (cfg);
7805 MONO_ADD_INS (bblock, ins);
7806 start_new_bblock = 1;
7814 if (cfg->opt & MONO_OPT_SHARED)
7815 rgctx_info = MONO_RGCTX_INFO_KLASS;
7817 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7818 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7819 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7821 *sp++ = handle_box (cfg, val, klass);
7824 CHECK_CFG_EXCEPTION;
7833 token = read32 (ip + 1);
7834 klass = mini_get_class (method, token, generic_context);
7835 CHECK_TYPELOAD (klass);
7837 mono_save_token_info (cfg, image, token, klass);
7839 if (cfg->generic_sharing_context)
7840 context_used = mono_class_check_context_used (klass);
7842 if (mono_class_is_nullable (klass)) {
7845 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7846 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7850 ins = handle_unbox (cfg, klass, sp, context_used);
7860 MonoClassField *field;
7864 if (*ip == CEE_STFLD) {
7871 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7873 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7876 token = read32 (ip + 1);
7877 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7878 field = mono_method_get_wrapper_data (method, token);
7879 klass = field->parent;
7882 field = mono_field_from_token (image, token, &klass, generic_context);
7886 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7887 FIELD_ACCESS_FAILURE;
7888 mono_class_init (klass);
7890 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7891 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7892 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7893 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7896 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7897 if (*ip == CEE_STFLD) {
7898 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7900 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7901 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7902 MonoInst *iargs [5];
7905 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7906 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7907 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7911 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7912 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7913 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7914 g_assert (costs > 0);
7916 cfg->real_offset += 5;
7919 inline_costs += costs;
7921 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7926 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7928 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7930 #if HAVE_WRITE_BARRIERS
7931 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7932 /* insert call to write barrier */
7933 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7934 MonoInst *iargs [2], *dummy_use;
7937 dreg = alloc_preg (cfg);
7938 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7940 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7942 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7946 store->flags |= ins_flag;
7953 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7954 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7955 MonoInst *iargs [4];
7958 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7959 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7960 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7961 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7962 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7963 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7965 g_assert (costs > 0);
7967 cfg->real_offset += 5;
7971 inline_costs += costs;
7973 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7977 if (sp [0]->type == STACK_VTYPE) {
7980 /* Have to compute the address of the variable */
7982 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7984 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7986 g_assert (var->klass == klass);
7988 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7992 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7994 if (*ip == CEE_LDFLDA) {
7995 dreg = alloc_preg (cfg);
7997 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7998 ins->klass = mono_class_from_mono_type (field->type);
7999 ins->type = STACK_MP;
8004 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8005 load->flags |= ins_flag;
8006 load->flags |= MONO_INST_FAULT;
8017 MonoClassField *field;
8018 gpointer addr = NULL;
8019 gboolean is_special_static;
8022 token = read32 (ip + 1);
8024 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8025 field = mono_method_get_wrapper_data (method, token);
8026 klass = field->parent;
8029 field = mono_field_from_token (image, token, &klass, generic_context);
8032 mono_class_init (klass);
8033 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8034 FIELD_ACCESS_FAILURE;
8036 /* if the class is Critical then transparent code cannot access it's fields */
8037 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8038 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8041 * We can only support shared generic static
8042 * field access on architectures where the
8043 * trampoline code has been extended to handle
8044 * the generic class init.
8046 #ifndef MONO_ARCH_VTABLE_REG
8047 GENERIC_SHARING_FAILURE (*ip);
8050 if (cfg->generic_sharing_context)
8051 context_used = mono_class_check_context_used (klass);
8053 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8055 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8056 * to be called here.
8058 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8059 mono_class_vtable (cfg->domain, klass);
8060 CHECK_TYPELOAD (klass);
8062 mono_domain_lock (cfg->domain);
8063 if (cfg->domain->special_static_fields)
8064 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8065 mono_domain_unlock (cfg->domain);
8067 is_special_static = mono_class_field_is_special_static (field);
8069 /* Generate IR to compute the field address */
8070 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8072 * Fast access to TLS data
8073 * Inline version of get_thread_static_data () in
8077 int idx, static_data_reg, array_reg, dreg;
8078 MonoInst *thread_ins;
8080 // offset &= 0x7fffffff;
8081 // idx = (offset >> 24) - 1;
8082 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8084 thread_ins = mono_get_thread_intrinsic (cfg);
8085 MONO_ADD_INS (cfg->cbb, thread_ins);
8086 static_data_reg = alloc_ireg (cfg);
8087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8089 if (cfg->compile_aot) {
8090 int offset_reg, offset2_reg, idx_reg;
8092 /* For TLS variables, this will return the TLS offset */
8093 EMIT_NEW_SFLDACONST (cfg, ins, field);
8094 offset_reg = ins->dreg;
8095 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8096 idx_reg = alloc_ireg (cfg);
8097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8098 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8100 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8101 array_reg = alloc_ireg (cfg);
8102 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8103 offset2_reg = alloc_ireg (cfg);
8104 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8105 dreg = alloc_ireg (cfg);
8106 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8108 offset = (gsize)addr & 0x7fffffff;
8109 idx = (offset >> 24) - 1;
8111 array_reg = alloc_ireg (cfg);
8112 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8113 dreg = alloc_ireg (cfg);
8114 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8116 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8117 (cfg->compile_aot && is_special_static) ||
8118 (context_used && is_special_static)) {
8119 MonoInst *iargs [2];
8121 g_assert (field->parent);
8122 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8124 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8125 field, MONO_RGCTX_INFO_CLASS_FIELD);
8127 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8129 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8130 } else if (context_used) {
8131 MonoInst *static_data;
8134 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8135 method->klass->name_space, method->klass->name, method->name,
8136 depth, field->offset);
8139 if (mono_class_needs_cctor_run (klass, method)) {
8143 vtable = emit_get_rgctx_klass (cfg, context_used,
8144 klass, MONO_RGCTX_INFO_VTABLE);
8146 // FIXME: This doesn't work since it tries to pass the argument
8147 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8149 * The vtable pointer is always passed in a register regardless of
8150 * the calling convention, so assign it manually, and make a call
8151 * using a signature without parameters.
8153 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8154 #ifdef MONO_ARCH_VTABLE_REG
8155 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8156 cfg->uses_vtable_reg = TRUE;
8163 * The pointer we're computing here is
8165 * super_info.static_data + field->offset
8167 static_data = emit_get_rgctx_klass (cfg, context_used,
8168 klass, MONO_RGCTX_INFO_STATIC_DATA);
8170 if (field->offset == 0) {
8173 int addr_reg = mono_alloc_preg (cfg);
8174 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8176 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8177 MonoInst *iargs [2];
8179 g_assert (field->parent);
8180 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8181 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8182 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8184 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8186 CHECK_TYPELOAD (klass);
8188 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8189 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8190 if (cfg->verbose_level > 2)
8191 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8192 class_inits = g_slist_prepend (class_inits, vtable);
8194 if (cfg->run_cctors) {
8196 /* This makes so that inline cannot trigger */
8197 /* .cctors: too many apps depend on them */
8198 /* running with a specific order... */
8199 if (! vtable->initialized)
8201 ex = mono_runtime_class_init_full (vtable, FALSE);
8203 set_exception_object (cfg, ex);
8204 goto exception_exit;
8208 addr = (char*)vtable->data + field->offset;
8210 if (cfg->compile_aot)
8211 EMIT_NEW_SFLDACONST (cfg, ins, field);
8213 EMIT_NEW_PCONST (cfg, ins, addr);
8215 MonoInst *iargs [1];
8216 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8217 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8221 /* Generate IR to do the actual load/store operation */
8223 if (*ip == CEE_LDSFLDA) {
8224 ins->klass = mono_class_from_mono_type (field->type);
8225 ins->type = STACK_PTR;
8227 } else if (*ip == CEE_STSFLD) {
8232 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8233 store->flags |= ins_flag;
8235 gboolean is_const = FALSE;
8236 MonoVTable *vtable = NULL;
8238 if (!context_used) {
8239 vtable = mono_class_vtable (cfg->domain, klass);
8240 CHECK_TYPELOAD (klass);
8242 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8243 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8244 gpointer addr = (char*)vtable->data + field->offset;
8245 int ro_type = field->type->type;
8246 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8247 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8249 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8252 case MONO_TYPE_BOOLEAN:
8254 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8258 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8261 case MONO_TYPE_CHAR:
8263 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8267 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8272 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8276 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8279 #ifndef HAVE_MOVING_COLLECTOR
8282 case MONO_TYPE_STRING:
8283 case MONO_TYPE_OBJECT:
8284 case MONO_TYPE_CLASS:
8285 case MONO_TYPE_SZARRAY:
8287 case MONO_TYPE_FNPTR:
8288 case MONO_TYPE_ARRAY:
8289 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8290 type_to_eval_stack_type ((cfg), field->type, *sp);
8296 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8301 case MONO_TYPE_VALUETYPE:
8311 CHECK_STACK_OVF (1);
8313 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8314 load->flags |= ins_flag;
8327 token = read32 (ip + 1);
8328 klass = mini_get_class (method, token, generic_context);
8329 CHECK_TYPELOAD (klass);
8330 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8331 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8342 const char *data_ptr;
8344 guint32 field_token;
8350 token = read32 (ip + 1);
8352 klass = mini_get_class (method, token, generic_context);
8353 CHECK_TYPELOAD (klass);
8355 if (cfg->generic_sharing_context)
8356 context_used = mono_class_check_context_used (klass);
8358 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8359 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8360 ins->sreg1 = sp [0]->dreg;
8361 ins->type = STACK_I4;
8362 ins->dreg = alloc_ireg (cfg);
8363 MONO_ADD_INS (cfg->cbb, ins);
8364 *sp = mono_decompose_opcode (cfg, ins);
8369 MonoClass *array_class = mono_array_class_get (klass, 1);
8370 /* FIXME: we cannot get a managed
8371 allocator because we can't get the
8372 open generic class's vtable. We
8373 have the same problem in
8374 handle_alloc_from_inst(). This
8375 needs to be solved so that we can
8376 have managed allocs of shared
8379 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8380 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8382 MonoMethod *managed_alloc = NULL;
8384 /* FIXME: Decompose later to help abcrem */
8387 args [0] = emit_get_rgctx_klass (cfg, context_used,
8388 array_class, MONO_RGCTX_INFO_VTABLE);
8393 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8395 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8397 if (cfg->opt & MONO_OPT_SHARED) {
8398 /* Decompose now to avoid problems with references to the domainvar */
8399 MonoInst *iargs [3];
8401 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8402 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8405 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8407 /* Decompose later since it is needed by abcrem */
8408 MonoClass *array_type = mono_array_class_get (klass, 1);
8409 mono_class_vtable (cfg->domain, array_type);
8410 CHECK_TYPELOAD (array_type);
8412 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8413 ins->dreg = alloc_preg (cfg);
8414 ins->sreg1 = sp [0]->dreg;
8415 ins->inst_newa_class = klass;
8416 ins->type = STACK_OBJ;
8418 MONO_ADD_INS (cfg->cbb, ins);
8419 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8420 cfg->cbb->has_array_access = TRUE;
8422 /* Needed so mono_emit_load_get_addr () gets called */
8423 mono_get_got_var (cfg);
8433 * we inline/optimize the initialization sequence if possible.
8434 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8435 * for small sizes open code the memcpy
8436 * ensure the rva field is big enough
8438 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8439 MonoMethod *memcpy_method = get_memcpy_method ();
8440 MonoInst *iargs [3];
8441 int add_reg = alloc_preg (cfg);
8443 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8444 if (cfg->compile_aot) {
8445 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8447 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8449 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8450 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8459 if (sp [0]->type != STACK_OBJ)
8462 dreg = alloc_preg (cfg);
8463 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8464 ins->dreg = alloc_preg (cfg);
8465 ins->sreg1 = sp [0]->dreg;
8466 ins->type = STACK_I4;
8467 MONO_ADD_INS (cfg->cbb, ins);
8468 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8469 cfg->cbb->has_array_access = TRUE;
8477 if (sp [0]->type != STACK_OBJ)
8480 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8482 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8483 CHECK_TYPELOAD (klass);
8484 /* we need to make sure that this array is exactly the type it needs
8485 * to be for correctness. the wrappers are lax with their usage
8486 * so we need to ignore them here
8488 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8489 MonoClass *array_class = mono_array_class_get (klass, 1);
8490 mini_emit_check_array_type (cfg, sp [0], array_class);
8491 CHECK_TYPELOAD (array_class);
8495 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8510 case CEE_LDELEM_REF: {
8516 if (*ip == CEE_LDELEM) {
8518 token = read32 (ip + 1);
8519 klass = mini_get_class (method, token, generic_context);
8520 CHECK_TYPELOAD (klass);
8521 mono_class_init (klass);
8524 klass = array_access_to_klass (*ip);
8526 if (sp [0]->type != STACK_OBJ)
8529 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8531 if (sp [1]->opcode == OP_ICONST) {
8532 int array_reg = sp [0]->dreg;
8533 int index_reg = sp [1]->dreg;
8534 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8536 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8537 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8539 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8540 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8543 if (*ip == CEE_LDELEM)
8556 case CEE_STELEM_REF:
8563 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8565 if (*ip == CEE_STELEM) {
8567 token = read32 (ip + 1);
8568 klass = mini_get_class (method, token, generic_context);
8569 CHECK_TYPELOAD (klass);
8570 mono_class_init (klass);
8573 klass = array_access_to_klass (*ip);
8575 if (sp [0]->type != STACK_OBJ)
8578 /* storing a NULL doesn't need any of the complex checks in stelemref */
8579 if (generic_class_is_reference_type (cfg, klass) &&
8580 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8581 MonoMethod* helper = mono_marshal_get_stelemref ();
8582 MonoInst *iargs [3];
8584 if (sp [0]->type != STACK_OBJ)
8586 if (sp [2]->type != STACK_OBJ)
8593 mono_emit_method_call (cfg, helper, iargs, NULL);
8595 if (sp [1]->opcode == OP_ICONST) {
8596 int array_reg = sp [0]->dreg;
8597 int index_reg = sp [1]->dreg;
8598 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8600 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8601 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8603 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8604 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8608 if (*ip == CEE_STELEM)
8615 case CEE_CKFINITE: {
8619 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8620 ins->sreg1 = sp [0]->dreg;
8621 ins->dreg = alloc_freg (cfg);
8622 ins->type = STACK_R8;
8623 MONO_ADD_INS (bblock, ins);
8625 *sp++ = mono_decompose_opcode (cfg, ins);
8630 case CEE_REFANYVAL: {
8631 MonoInst *src_var, *src;
8633 int klass_reg = alloc_preg (cfg);
8634 int dreg = alloc_preg (cfg);
8637 MONO_INST_NEW (cfg, ins, *ip);
8640 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8641 CHECK_TYPELOAD (klass);
8642 mono_class_init (klass);
8644 if (cfg->generic_sharing_context)
8645 context_used = mono_class_check_context_used (klass);
8648 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8650 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8651 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8655 MonoInst *klass_ins;
8657 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8658 klass, MONO_RGCTX_INFO_KLASS);
8661 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8662 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8664 mini_emit_class_check (cfg, klass_reg, klass);
8666 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8667 ins->type = STACK_MP;
8672 case CEE_MKREFANY: {
8673 MonoInst *loc, *addr;
8676 MONO_INST_NEW (cfg, ins, *ip);
8679 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8680 CHECK_TYPELOAD (klass);
8681 mono_class_init (klass);
8683 if (cfg->generic_sharing_context)
8684 context_used = mono_class_check_context_used (klass);
8686 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8687 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8690 MonoInst *const_ins;
8691 int type_reg = alloc_preg (cfg);
8693 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8696 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8697 } else if (cfg->compile_aot) {
8698 int const_reg = alloc_preg (cfg);
8699 int type_reg = alloc_preg (cfg);
8701 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8702 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8704 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8706 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8707 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8711 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8712 ins->type = STACK_VTYPE;
8713 ins->klass = mono_defaults.typed_reference_class;
8720 MonoClass *handle_class;
8722 CHECK_STACK_OVF (1);
8725 n = read32 (ip + 1);
8727 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8728 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8729 handle = mono_method_get_wrapper_data (method, n);
8730 handle_class = mono_method_get_wrapper_data (method, n + 1);
8731 if (handle_class == mono_defaults.typehandle_class)
8732 handle = &((MonoClass*)handle)->byval_arg;
8735 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8739 mono_class_init (handle_class);
8740 if (cfg->generic_sharing_context) {
8741 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8742 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8743 /* This case handles ldtoken
8744 of an open type, like for
8747 } else if (handle_class == mono_defaults.typehandle_class) {
8748 /* If we get a MONO_TYPE_CLASS
8749 then we need to provide the
8751 instantiation of it. */
8752 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8755 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8756 } else if (handle_class == mono_defaults.fieldhandle_class)
8757 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8758 else if (handle_class == mono_defaults.methodhandle_class)
8759 context_used = mono_method_check_context_used (handle);
8761 g_assert_not_reached ();
8764 if ((cfg->opt & MONO_OPT_SHARED) &&
8765 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8766 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8767 MonoInst *addr, *vtvar, *iargs [3];
8768 int method_context_used;
8770 if (cfg->generic_sharing_context)
8771 method_context_used = mono_method_check_context_used (method);
8773 method_context_used = 0;
8775 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8777 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8778 EMIT_NEW_ICONST (cfg, iargs [1], n);
8779 if (method_context_used) {
8780 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8781 method, MONO_RGCTX_INFO_METHOD);
8782 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8784 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8785 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8787 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8789 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8791 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8793 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8794 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8795 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8796 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8797 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8798 MonoClass *tclass = mono_class_from_mono_type (handle);
8800 mono_class_init (tclass);
8802 ins = emit_get_rgctx_klass (cfg, context_used,
8803 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8804 } else if (cfg->compile_aot) {
8805 if (method->wrapper_type) {
8806 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8807 /* Special case for static synchronized wrappers */
8808 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8810 /* FIXME: n is not a normal token */
8811 cfg->disable_aot = TRUE;
8812 EMIT_NEW_PCONST (cfg, ins, NULL);
8815 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8818 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8820 ins->type = STACK_OBJ;
8821 ins->klass = cmethod->klass;
8824 MonoInst *addr, *vtvar;
8826 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8829 if (handle_class == mono_defaults.typehandle_class) {
8830 ins = emit_get_rgctx_klass (cfg, context_used,
8831 mono_class_from_mono_type (handle),
8832 MONO_RGCTX_INFO_TYPE);
8833 } else if (handle_class == mono_defaults.methodhandle_class) {
8834 ins = emit_get_rgctx_method (cfg, context_used,
8835 handle, MONO_RGCTX_INFO_METHOD);
8836 } else if (handle_class == mono_defaults.fieldhandle_class) {
8837 ins = emit_get_rgctx_field (cfg, context_used,
8838 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8840 g_assert_not_reached ();
8842 } else if (cfg->compile_aot) {
8843 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8845 EMIT_NEW_PCONST (cfg, ins, handle);
8847 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8848 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8849 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8859 MONO_INST_NEW (cfg, ins, OP_THROW);
8861 ins->sreg1 = sp [0]->dreg;
8863 bblock->out_of_line = TRUE;
8864 MONO_ADD_INS (bblock, ins);
8865 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8866 MONO_ADD_INS (bblock, ins);
8869 link_bblock (cfg, bblock, end_bblock);
8870 start_new_bblock = 1;
8872 case CEE_ENDFINALLY:
8873 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8874 MONO_ADD_INS (bblock, ins);
8876 start_new_bblock = 1;
8879 * Control will leave the method so empty the stack, otherwise
8880 * the next basic block will start with a nonempty stack.
8882 while (sp != stack_start) {
8890 if (*ip == CEE_LEAVE) {
8892 target = ip + 5 + (gint32)read32(ip + 1);
8895 target = ip + 2 + (signed char)(ip [1]);
8898 /* empty the stack */
8899 while (sp != stack_start) {
8904 * If this leave statement is in a catch block, check for a
8905 * pending exception, and rethrow it if necessary.
8906 * We avoid doing this in runtime invoke wrappers, since those are called
8907 * by native code which excepts the wrapper to catch all exceptions.
8909 for (i = 0; i < header->num_clauses; ++i) {
8910 MonoExceptionClause *clause = &header->clauses [i];
8913 * Use <= in the final comparison to handle clauses with multiple
8914 * leave statements, like in bug #78024.
8915 * The ordering of the exception clauses guarantees that we find the
8918 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8920 MonoBasicBlock *dont_throw;
8925 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8928 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8930 NEW_BBLOCK (cfg, dont_throw);
8933 * Currently, we allways rethrow the abort exception, despite the
8934 * fact that this is not correct. See thread6.cs for an example.
8935 * But propagating the abort exception is more important than
8936 * getting the sematics right.
8938 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8939 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8940 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8942 MONO_START_BB (cfg, dont_throw);
8947 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8949 MonoExceptionClause *clause;
8951 for (tmp = handlers; tmp; tmp = tmp->next) {
8953 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
8955 link_bblock (cfg, bblock, tblock);
8956 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8957 ins->inst_target_bb = tblock;
8958 ins->inst_eh_block = clause;
8959 MONO_ADD_INS (bblock, ins);
8960 bblock->has_call_handler = 1;
8961 if (COMPILE_LLVM (cfg)) {
8962 MonoBasicBlock *target_bb;
8965 * Link the finally bblock with the target, since it will
8966 * conceptually branch there.
8967 * FIXME: Have to link the bblock containing the endfinally.
8969 GET_BBLOCK (cfg, target_bb, target);
8970 link_bblock (cfg, tblock, target_bb);
8973 g_list_free (handlers);
8976 MONO_INST_NEW (cfg, ins, OP_BR);
8977 MONO_ADD_INS (bblock, ins);
8978 GET_BBLOCK (cfg, tblock, target);
8979 link_bblock (cfg, bblock, tblock);
8980 ins->inst_target_bb = tblock;
8981 start_new_bblock = 1;
8983 if (*ip == CEE_LEAVE)
8992 * Mono specific opcodes
8994 case MONO_CUSTOM_PREFIX: {
8996 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9000 case CEE_MONO_ICALL: {
9002 MonoJitICallInfo *info;
9004 token = read32 (ip + 2);
9005 func = mono_method_get_wrapper_data (method, token);
9006 info = mono_find_jit_icall_by_addr (func);
9009 CHECK_STACK (info->sig->param_count);
9010 sp -= info->sig->param_count;
9012 ins = mono_emit_jit_icall (cfg, info->func, sp);
9013 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9017 inline_costs += 10 * num_calls++;
9021 case CEE_MONO_LDPTR: {
9024 CHECK_STACK_OVF (1);
9026 token = read32 (ip + 2);
9028 ptr = mono_method_get_wrapper_data (method, token);
9029 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9030 MonoJitICallInfo *callinfo;
9031 const char *icall_name;
9033 icall_name = method->name + strlen ("__icall_wrapper_");
9034 g_assert (icall_name);
9035 callinfo = mono_find_jit_icall_by_name (icall_name);
9036 g_assert (callinfo);
9038 if (ptr == callinfo->func) {
9039 /* Will be transformed into an AOTCONST later */
9040 EMIT_NEW_PCONST (cfg, ins, ptr);
9046 /* FIXME: Generalize this */
9047 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9048 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9053 EMIT_NEW_PCONST (cfg, ins, ptr);
9056 inline_costs += 10 * num_calls++;
9057 /* Can't embed random pointers into AOT code */
9058 cfg->disable_aot = 1;
9061 case CEE_MONO_ICALL_ADDR: {
9062 MonoMethod *cmethod;
9065 CHECK_STACK_OVF (1);
9067 token = read32 (ip + 2);
9069 cmethod = mono_method_get_wrapper_data (method, token);
9071 if (cfg->compile_aot) {
9072 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9074 ptr = mono_lookup_internal_call (cmethod);
9076 EMIT_NEW_PCONST (cfg, ins, ptr);
9082 case CEE_MONO_VTADDR: {
9083 MonoInst *src_var, *src;
9089 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9090 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9095 case CEE_MONO_NEWOBJ: {
9096 MonoInst *iargs [2];
9098 CHECK_STACK_OVF (1);
9100 token = read32 (ip + 2);
9101 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9102 mono_class_init (klass);
9103 NEW_DOMAINCONST (cfg, iargs [0]);
9104 MONO_ADD_INS (cfg->cbb, iargs [0]);
9105 NEW_CLASSCONST (cfg, iargs [1], klass);
9106 MONO_ADD_INS (cfg->cbb, iargs [1]);
9107 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9109 inline_costs += 10 * num_calls++;
9112 case CEE_MONO_OBJADDR:
9115 MONO_INST_NEW (cfg, ins, OP_MOVE);
9116 ins->dreg = alloc_preg (cfg);
9117 ins->sreg1 = sp [0]->dreg;
9118 ins->type = STACK_MP;
9119 MONO_ADD_INS (cfg->cbb, ins);
9123 case CEE_MONO_LDNATIVEOBJ:
9125 * Similar to LDOBJ, but instead load the unmanaged
9126 * representation of the vtype to the stack.
9131 token = read32 (ip + 2);
9132 klass = mono_method_get_wrapper_data (method, token);
9133 g_assert (klass->valuetype);
9134 mono_class_init (klass);
9137 MonoInst *src, *dest, *temp;
9140 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9141 temp->backend.is_pinvoke = 1;
9142 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9143 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9145 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9146 dest->type = STACK_VTYPE;
9147 dest->klass = klass;
9153 case CEE_MONO_RETOBJ: {
9155 * Same as RET, but return the native representation of a vtype
9158 g_assert (cfg->ret);
9159 g_assert (mono_method_signature (method)->pinvoke);
9164 token = read32 (ip + 2);
9165 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9167 if (!cfg->vret_addr) {
9168 g_assert (cfg->ret_var_is_local);
9170 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9172 EMIT_NEW_RETLOADA (cfg, ins);
9174 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9176 if (sp != stack_start)
9179 MONO_INST_NEW (cfg, ins, OP_BR);
9180 ins->inst_target_bb = end_bblock;
9181 MONO_ADD_INS (bblock, ins);
9182 link_bblock (cfg, bblock, end_bblock);
9183 start_new_bblock = 1;
9187 case CEE_MONO_CISINST:
9188 case CEE_MONO_CCASTCLASS: {
9193 token = read32 (ip + 2);
9194 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9195 if (ip [1] == CEE_MONO_CISINST)
9196 ins = handle_cisinst (cfg, klass, sp [0]);
9198 ins = handle_ccastclass (cfg, klass, sp [0]);
9204 case CEE_MONO_SAVE_LMF:
9205 case CEE_MONO_RESTORE_LMF:
9206 #ifdef MONO_ARCH_HAVE_LMF_OPS
9207 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9208 MONO_ADD_INS (bblock, ins);
9209 cfg->need_lmf_area = TRUE;
9213 case CEE_MONO_CLASSCONST:
9214 CHECK_STACK_OVF (1);
9216 token = read32 (ip + 2);
9217 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9220 inline_costs += 10 * num_calls++;
9222 case CEE_MONO_NOT_TAKEN:
9223 bblock->out_of_line = TRUE;
9227 CHECK_STACK_OVF (1);
9229 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9230 ins->dreg = alloc_preg (cfg);
9231 ins->inst_offset = (gint32)read32 (ip + 2);
9232 ins->type = STACK_PTR;
9233 MONO_ADD_INS (bblock, ins);
9237 case CEE_MONO_DYN_CALL: {
9240 /* It would be easier to call a trampoline, but that would put an
9241 * extra frame on the stack, confusing exception handling. So
9242 * implement it inline using an opcode for now.
9245 if (!cfg->dyn_call_var) {
9246 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9247 /* prevent it from being register allocated */
9248 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9251 /* Has to use a call inst since it local regalloc expects it */
9252 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9253 ins = (MonoInst*)call;
9255 ins->sreg1 = sp [0]->dreg;
9256 ins->sreg2 = sp [1]->dreg;
9257 MONO_ADD_INS (bblock, ins);
9259 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9260 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9264 inline_costs += 10 * num_calls++;
9269 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9279 /* somewhat similar to LDTOKEN */
9280 MonoInst *addr, *vtvar;
9281 CHECK_STACK_OVF (1);
9282 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9284 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9285 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9287 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9288 ins->type = STACK_VTYPE;
9289 ins->klass = mono_defaults.argumenthandle_class;
9302 * The following transforms:
9303 * CEE_CEQ into OP_CEQ
9304 * CEE_CGT into OP_CGT
9305 * CEE_CGT_UN into OP_CGT_UN
9306 * CEE_CLT into OP_CLT
9307 * CEE_CLT_UN into OP_CLT_UN
9309 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9311 MONO_INST_NEW (cfg, ins, cmp->opcode);
9313 cmp->sreg1 = sp [0]->dreg;
9314 cmp->sreg2 = sp [1]->dreg;
9315 type_from_op (cmp, sp [0], sp [1]);
9317 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9318 cmp->opcode = OP_LCOMPARE;
9319 else if (sp [0]->type == STACK_R8)
9320 cmp->opcode = OP_FCOMPARE;
9322 cmp->opcode = OP_ICOMPARE;
9323 MONO_ADD_INS (bblock, cmp);
9324 ins->type = STACK_I4;
9325 ins->dreg = alloc_dreg (cfg, ins->type);
9326 type_from_op (ins, sp [0], sp [1]);
9328 if (cmp->opcode == OP_FCOMPARE) {
9330 * The backends expect the fceq opcodes to do the
9333 cmp->opcode = OP_NOP;
9334 ins->sreg1 = cmp->sreg1;
9335 ins->sreg2 = cmp->sreg2;
9337 MONO_ADD_INS (bblock, ins);
9344 MonoMethod *cil_method;
9345 gboolean needs_static_rgctx_invoke;
9347 CHECK_STACK_OVF (1);
9349 n = read32 (ip + 2);
9350 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9353 mono_class_init (cmethod->klass);
9355 mono_save_token_info (cfg, image, n, cmethod);
9357 if (cfg->generic_sharing_context)
9358 context_used = mono_method_check_context_used (cmethod);
9360 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9362 cil_method = cmethod;
9363 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9364 METHOD_ACCESS_FAILURE;
9366 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9367 if (check_linkdemand (cfg, method, cmethod))
9369 CHECK_CFG_EXCEPTION;
9370 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9371 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9375 * Optimize the common case of ldftn+delegate creation
9377 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9378 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9379 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9381 int invoke_context_used = 0;
9383 invoke = mono_get_delegate_invoke (ctor_method->klass);
9384 if (!invoke || !mono_method_signature (invoke))
9387 if (cfg->generic_sharing_context)
9388 invoke_context_used = mono_method_check_context_used (invoke);
9390 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9391 /* FIXME: SGEN support */
9392 if (invoke_context_used == 0) {
9393 MonoInst *target_ins;
9396 if (cfg->verbose_level > 3)
9397 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9398 target_ins = sp [-1];
9400 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9401 CHECK_CFG_EXCEPTION;
9410 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9411 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9415 inline_costs += 10 * num_calls++;
9418 case CEE_LDVIRTFTN: {
9423 n = read32 (ip + 2);
9424 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9427 mono_class_init (cmethod->klass);
9429 if (cfg->generic_sharing_context)
9430 context_used = mono_method_check_context_used (cmethod);
9432 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9433 if (check_linkdemand (cfg, method, cmethod))
9435 CHECK_CFG_EXCEPTION;
9436 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9437 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9443 args [1] = emit_get_rgctx_method (cfg, context_used,
9444 cmethod, MONO_RGCTX_INFO_METHOD);
9447 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9449 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9452 inline_costs += 10 * num_calls++;
9456 CHECK_STACK_OVF (1);
9458 n = read16 (ip + 2);
9460 EMIT_NEW_ARGLOAD (cfg, ins, n);
9465 CHECK_STACK_OVF (1);
9467 n = read16 (ip + 2);
9469 NEW_ARGLOADA (cfg, ins, n);
9470 MONO_ADD_INS (cfg->cbb, ins);
9478 n = read16 (ip + 2);
9480 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9482 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9486 CHECK_STACK_OVF (1);
9488 n = read16 (ip + 2);
9490 EMIT_NEW_LOCLOAD (cfg, ins, n);
9495 unsigned char *tmp_ip;
9496 CHECK_STACK_OVF (1);
9498 n = read16 (ip + 2);
9501 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9507 EMIT_NEW_LOCLOADA (cfg, ins, n);
9516 n = read16 (ip + 2);
9518 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9520 emit_stloc_ir (cfg, sp, header, n);
9527 if (sp != stack_start)
9529 if (cfg->method != method)
9531 * Inlining this into a loop in a parent could lead to
9532 * stack overflows which is different behavior than the
9533 * non-inlined case, thus disable inlining in this case.
9535 goto inline_failure;
9537 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9538 ins->dreg = alloc_preg (cfg);
9539 ins->sreg1 = sp [0]->dreg;
9540 ins->type = STACK_PTR;
9541 MONO_ADD_INS (cfg->cbb, ins);
9543 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9545 ins->flags |= MONO_INST_INIT;
9550 case CEE_ENDFILTER: {
9551 MonoExceptionClause *clause, *nearest;
9552 int cc, nearest_num;
9556 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9558 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9559 ins->sreg1 = (*sp)->dreg;
9560 MONO_ADD_INS (bblock, ins);
9561 start_new_bblock = 1;
9566 for (cc = 0; cc < header->num_clauses; ++cc) {
9567 clause = &header->clauses [cc];
9568 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9569 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9570 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9576 if ((ip - header->code) != nearest->handler_offset)
9581 case CEE_UNALIGNED_:
9582 ins_flag |= MONO_INST_UNALIGNED;
9583 /* FIXME: record alignment? we can assume 1 for now */
9588 ins_flag |= MONO_INST_VOLATILE;
9592 ins_flag |= MONO_INST_TAILCALL;
9593 cfg->flags |= MONO_CFG_HAS_TAIL;
9594 /* Can't inline tail calls at this time */
9595 inline_costs += 100000;
9602 token = read32 (ip + 2);
9603 klass = mini_get_class (method, token, generic_context);
9604 CHECK_TYPELOAD (klass);
9605 if (generic_class_is_reference_type (cfg, klass))
9606 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9608 mini_emit_initobj (cfg, *sp, NULL, klass);
9612 case CEE_CONSTRAINED_:
9614 token = read32 (ip + 2);
9615 if (method->wrapper_type != MONO_WRAPPER_NONE)
9616 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9618 constrained_call = mono_class_get_full (image, token, generic_context);
9619 CHECK_TYPELOAD (constrained_call);
9624 MonoInst *iargs [3];
9628 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9629 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9630 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9631 /* emit_memset only works when val == 0 */
9632 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9637 if (ip [1] == CEE_CPBLK) {
9638 MonoMethod *memcpy_method = get_memcpy_method ();
9639 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9641 MonoMethod *memset_method = get_memset_method ();
9642 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9652 ins_flag |= MONO_INST_NOTYPECHECK;
9654 ins_flag |= MONO_INST_NORANGECHECK;
9655 /* we ignore the no-nullcheck for now since we
9656 * really do it explicitly only when doing callvirt->call
9662 int handler_offset = -1;
9664 for (i = 0; i < header->num_clauses; ++i) {
9665 MonoExceptionClause *clause = &header->clauses [i];
9666 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9667 handler_offset = clause->handler_offset;
9672 bblock->flags |= BB_EXCEPTION_UNSAFE;
9674 g_assert (handler_offset != -1);
9676 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9677 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9678 ins->sreg1 = load->dreg;
9679 MONO_ADD_INS (bblock, ins);
9681 link_bblock (cfg, bblock, end_bblock);
9682 start_new_bblock = 1;
9690 CHECK_STACK_OVF (1);
9692 token = read32 (ip + 2);
9693 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9694 MonoType *type = mono_type_create_from_typespec (image, token);
9695 token = mono_type_size (type, &ialign);
9697 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9698 CHECK_TYPELOAD (klass);
9699 mono_class_init (klass);
9700 token = mono_class_value_size (klass, &align);
9702 EMIT_NEW_ICONST (cfg, ins, token);
9707 case CEE_REFANYTYPE: {
9708 MonoInst *src_var, *src;
9714 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9716 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9717 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9718 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9736 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9746 g_warning ("opcode 0x%02x not handled", *ip);
9750 if (start_new_bblock != 1)
9753 bblock->cil_length = ip - bblock->cil_code;
9754 bblock->next_bb = end_bblock;
9756 if (cfg->method == method && cfg->domainvar) {
9758 MonoInst *get_domain;
9760 cfg->cbb = init_localsbb;
9762 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9763 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9766 get_domain->dreg = alloc_preg (cfg);
9767 MONO_ADD_INS (cfg->cbb, get_domain);
9769 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9770 MONO_ADD_INS (cfg->cbb, store);
9773 #ifdef TARGET_POWERPC
9774 if (cfg->compile_aot)
9775 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9776 mono_get_got_var (cfg);
9779 if (cfg->method == method && cfg->got_var)
9780 mono_emit_load_got_addr (cfg);
9785 cfg->cbb = init_localsbb;
9787 for (i = 0; i < header->num_locals; ++i) {
9788 MonoType *ptype = header->locals [i];
9789 int t = ptype->type;
9790 dreg = cfg->locals [i]->dreg;
9792 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9793 t = mono_class_enum_basetype (ptype->data.klass)->type;
9795 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9796 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9797 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9798 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9799 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9800 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9801 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9802 ins->type = STACK_R8;
9803 ins->inst_p0 = (void*)&r8_0;
9804 ins->dreg = alloc_dreg (cfg, STACK_R8);
9805 MONO_ADD_INS (init_localsbb, ins);
9806 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9807 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9808 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9809 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9811 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9816 if (cfg->init_ref_vars && cfg->method == method) {
9817 /* Emit initialization for ref vars */
9818 // FIXME: Avoid duplication initialization for IL locals.
9819 for (i = 0; i < cfg->num_varinfo; ++i) {
9820 MonoInst *ins = cfg->varinfo [i];
9822 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9823 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9827 /* Add a sequence point for method entry/exit events */
9829 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9830 MONO_ADD_INS (init_localsbb, ins);
9831 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9832 MONO_ADD_INS (cfg->bb_exit, ins);
9837 if (cfg->method == method) {
9839 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9840 bb->region = mono_find_block_region (cfg, bb->real_offset);
9842 mono_create_spvar_for_region (cfg, bb->region);
9843 if (cfg->verbose_level > 2)
9844 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9848 g_slist_free (class_inits);
9849 dont_inline = g_list_remove (dont_inline, method);
9851 if (inline_costs < 0) {
9854 /* Method is too large */
9855 mname = mono_method_full_name (method, TRUE);
9856 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9857 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9859 mono_metadata_free_mh (header);
9860 mono_basic_block_free (original_bb);
9864 if ((cfg->verbose_level > 2) && (cfg->method == method))
9865 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9867 mono_metadata_free_mh (header);
9868 mono_basic_block_free (original_bb);
9869 return inline_costs;
9872 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9879 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9883 set_exception_type_from_invalid_il (cfg, method, ip);
9887 g_slist_free (class_inits);
9888 mono_basic_block_free (original_bb);
9889 dont_inline = g_list_remove (dont_inline, method);
9890 mono_metadata_free_mh (header);
9895 store_membase_reg_to_store_membase_imm (int opcode)
9898 case OP_STORE_MEMBASE_REG:
9899 return OP_STORE_MEMBASE_IMM;
9900 case OP_STOREI1_MEMBASE_REG:
9901 return OP_STOREI1_MEMBASE_IMM;
9902 case OP_STOREI2_MEMBASE_REG:
9903 return OP_STOREI2_MEMBASE_IMM;
9904 case OP_STOREI4_MEMBASE_REG:
9905 return OP_STOREI4_MEMBASE_IMM;
9906 case OP_STOREI8_MEMBASE_REG:
9907 return OP_STOREI8_MEMBASE_IMM;
9909 g_assert_not_reached ();
9915 #endif /* DISABLE_JIT */
9918 mono_op_to_op_imm (int opcode)
9928 return OP_IDIV_UN_IMM;
9932 return OP_IREM_UN_IMM;
9946 return OP_ISHR_UN_IMM;
9963 return OP_LSHR_UN_IMM;
9966 return OP_COMPARE_IMM;
9968 return OP_ICOMPARE_IMM;
9970 return OP_LCOMPARE_IMM;
9972 case OP_STORE_MEMBASE_REG:
9973 return OP_STORE_MEMBASE_IMM;
9974 case OP_STOREI1_MEMBASE_REG:
9975 return OP_STOREI1_MEMBASE_IMM;
9976 case OP_STOREI2_MEMBASE_REG:
9977 return OP_STOREI2_MEMBASE_IMM;
9978 case OP_STOREI4_MEMBASE_REG:
9979 return OP_STOREI4_MEMBASE_IMM;
9981 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9983 return OP_X86_PUSH_IMM;
9984 case OP_X86_COMPARE_MEMBASE_REG:
9985 return OP_X86_COMPARE_MEMBASE_IMM;
9987 #if defined(TARGET_AMD64)
9988 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9989 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9991 case OP_VOIDCALL_REG:
10000 return OP_LOCALLOC_IMM;
10007 ldind_to_load_membase (int opcode)
10011 return OP_LOADI1_MEMBASE;
10013 return OP_LOADU1_MEMBASE;
10015 return OP_LOADI2_MEMBASE;
10017 return OP_LOADU2_MEMBASE;
10019 return OP_LOADI4_MEMBASE;
10021 return OP_LOADU4_MEMBASE;
10023 return OP_LOAD_MEMBASE;
10024 case CEE_LDIND_REF:
10025 return OP_LOAD_MEMBASE;
10027 return OP_LOADI8_MEMBASE;
10029 return OP_LOADR4_MEMBASE;
10031 return OP_LOADR8_MEMBASE;
10033 g_assert_not_reached ();
10040 stind_to_store_membase (int opcode)
10044 return OP_STOREI1_MEMBASE_REG;
10046 return OP_STOREI2_MEMBASE_REG;
10048 return OP_STOREI4_MEMBASE_REG;
10050 case CEE_STIND_REF:
10051 return OP_STORE_MEMBASE_REG;
10053 return OP_STOREI8_MEMBASE_REG;
10055 return OP_STORER4_MEMBASE_REG;
10057 return OP_STORER8_MEMBASE_REG;
10059 g_assert_not_reached ();
10066 mono_load_membase_to_load_mem (int opcode)
10068 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10069 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10071 case OP_LOAD_MEMBASE:
10072 return OP_LOAD_MEM;
10073 case OP_LOADU1_MEMBASE:
10074 return OP_LOADU1_MEM;
10075 case OP_LOADU2_MEMBASE:
10076 return OP_LOADU2_MEM;
10077 case OP_LOADI4_MEMBASE:
10078 return OP_LOADI4_MEM;
10079 case OP_LOADU4_MEMBASE:
10080 return OP_LOADU4_MEM;
10081 #if SIZEOF_REGISTER == 8
10082 case OP_LOADI8_MEMBASE:
10083 return OP_LOADI8_MEM;
10092 op_to_op_dest_membase (int store_opcode, int opcode)
10094 #if defined(TARGET_X86)
10095 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10100 return OP_X86_ADD_MEMBASE_REG;
10102 return OP_X86_SUB_MEMBASE_REG;
10104 return OP_X86_AND_MEMBASE_REG;
10106 return OP_X86_OR_MEMBASE_REG;
10108 return OP_X86_XOR_MEMBASE_REG;
10111 return OP_X86_ADD_MEMBASE_IMM;
10114 return OP_X86_SUB_MEMBASE_IMM;
10117 return OP_X86_AND_MEMBASE_IMM;
10120 return OP_X86_OR_MEMBASE_IMM;
10123 return OP_X86_XOR_MEMBASE_IMM;
10129 #if defined(TARGET_AMD64)
10130 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10135 return OP_X86_ADD_MEMBASE_REG;
10137 return OP_X86_SUB_MEMBASE_REG;
10139 return OP_X86_AND_MEMBASE_REG;
10141 return OP_X86_OR_MEMBASE_REG;
10143 return OP_X86_XOR_MEMBASE_REG;
10145 return OP_X86_ADD_MEMBASE_IMM;
10147 return OP_X86_SUB_MEMBASE_IMM;
10149 return OP_X86_AND_MEMBASE_IMM;
10151 return OP_X86_OR_MEMBASE_IMM;
10153 return OP_X86_XOR_MEMBASE_IMM;
10155 return OP_AMD64_ADD_MEMBASE_REG;
10157 return OP_AMD64_SUB_MEMBASE_REG;
10159 return OP_AMD64_AND_MEMBASE_REG;
10161 return OP_AMD64_OR_MEMBASE_REG;
10163 return OP_AMD64_XOR_MEMBASE_REG;
10166 return OP_AMD64_ADD_MEMBASE_IMM;
10169 return OP_AMD64_SUB_MEMBASE_IMM;
10172 return OP_AMD64_AND_MEMBASE_IMM;
10175 return OP_AMD64_OR_MEMBASE_IMM;
10178 return OP_AMD64_XOR_MEMBASE_IMM;
10188 op_to_op_store_membase (int store_opcode, int opcode)
10190 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10193 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10194 return OP_X86_SETEQ_MEMBASE;
10196 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10197 return OP_X86_SETNE_MEMBASE;
10205 op_to_op_src1_membase (int load_opcode, int opcode)
10208 /* FIXME: This has sign extension issues */
10210 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10211 return OP_X86_COMPARE_MEMBASE8_IMM;
10214 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10219 return OP_X86_PUSH_MEMBASE;
10220 case OP_COMPARE_IMM:
10221 case OP_ICOMPARE_IMM:
10222 return OP_X86_COMPARE_MEMBASE_IMM;
10225 return OP_X86_COMPARE_MEMBASE_REG;
10229 #ifdef TARGET_AMD64
10230 /* FIXME: This has sign extension issues */
10232 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10233 return OP_X86_COMPARE_MEMBASE8_IMM;
10238 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10239 return OP_X86_PUSH_MEMBASE;
10241 /* FIXME: This only works for 32 bit immediates
10242 case OP_COMPARE_IMM:
10243 case OP_LCOMPARE_IMM:
10244 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10245 return OP_AMD64_COMPARE_MEMBASE_IMM;
10247 case OP_ICOMPARE_IMM:
10248 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10249 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10253 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10254 return OP_AMD64_COMPARE_MEMBASE_REG;
10257 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10258 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10267 op_to_op_src2_membase (int load_opcode, int opcode)
10270 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10276 return OP_X86_COMPARE_REG_MEMBASE;
10278 return OP_X86_ADD_REG_MEMBASE;
10280 return OP_X86_SUB_REG_MEMBASE;
10282 return OP_X86_AND_REG_MEMBASE;
10284 return OP_X86_OR_REG_MEMBASE;
10286 return OP_X86_XOR_REG_MEMBASE;
10290 #ifdef TARGET_AMD64
10293 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10294 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10298 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10299 return OP_AMD64_COMPARE_REG_MEMBASE;
10302 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10303 return OP_X86_ADD_REG_MEMBASE;
10305 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10306 return OP_X86_SUB_REG_MEMBASE;
10308 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10309 return OP_X86_AND_REG_MEMBASE;
10311 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10312 return OP_X86_OR_REG_MEMBASE;
10314 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10315 return OP_X86_XOR_REG_MEMBASE;
10317 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10318 return OP_AMD64_ADD_REG_MEMBASE;
10320 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10321 return OP_AMD64_SUB_REG_MEMBASE;
10323 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10324 return OP_AMD64_AND_REG_MEMBASE;
10326 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10327 return OP_AMD64_OR_REG_MEMBASE;
10329 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10330 return OP_AMD64_XOR_REG_MEMBASE;
10338 mono_op_to_op_imm_noemul (int opcode)
10341 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10346 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10354 return mono_op_to_op_imm (opcode);
10358 #ifndef DISABLE_JIT
10361 * mono_handle_global_vregs:
10363 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10367 mono_handle_global_vregs (MonoCompile *cfg)
10369 gint32 *vreg_to_bb;
10370 MonoBasicBlock *bb;
10373 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10375 #ifdef MONO_ARCH_SIMD_INTRINSICS
10376 if (cfg->uses_simd_intrinsics)
10377 mono_simd_simplify_indirection (cfg);
10380 /* Find local vregs used in more than one bb */
10381 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10382 MonoInst *ins = bb->code;
10383 int block_num = bb->block_num;
10385 if (cfg->verbose_level > 2)
10386 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10389 for (; ins; ins = ins->next) {
10390 const char *spec = INS_INFO (ins->opcode);
10391 int regtype = 0, regindex;
10394 if (G_UNLIKELY (cfg->verbose_level > 2))
10395 mono_print_ins (ins);
10397 g_assert (ins->opcode >= MONO_CEE_LAST);
10399 for (regindex = 0; regindex < 4; regindex ++) {
10402 if (regindex == 0) {
10403 regtype = spec [MONO_INST_DEST];
10404 if (regtype == ' ')
10407 } else if (regindex == 1) {
10408 regtype = spec [MONO_INST_SRC1];
10409 if (regtype == ' ')
10412 } else if (regindex == 2) {
10413 regtype = spec [MONO_INST_SRC2];
10414 if (regtype == ' ')
10417 } else if (regindex == 3) {
10418 regtype = spec [MONO_INST_SRC3];
10419 if (regtype == ' ')
10424 #if SIZEOF_REGISTER == 4
10425 /* In the LLVM case, the long opcodes are not decomposed */
10426 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10428 * Since some instructions reference the original long vreg,
10429 * and some reference the two component vregs, it is quite hard
10430 * to determine when it needs to be global. So be conservative.
10432 if (!get_vreg_to_inst (cfg, vreg)) {
10433 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10435 if (cfg->verbose_level > 2)
10436 printf ("LONG VREG R%d made global.\n", vreg);
10440 * Make the component vregs volatile since the optimizations can
10441 * get confused otherwise.
10443 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10444 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10448 g_assert (vreg != -1);
10450 prev_bb = vreg_to_bb [vreg];
10451 if (prev_bb == 0) {
10452 /* 0 is a valid block num */
10453 vreg_to_bb [vreg] = block_num + 1;
10454 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10455 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10458 if (!get_vreg_to_inst (cfg, vreg)) {
10459 if (G_UNLIKELY (cfg->verbose_level > 2))
10460 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10464 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10467 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10470 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10473 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10476 g_assert_not_reached ();
10480 /* Flag as having been used in more than one bb */
10481 vreg_to_bb [vreg] = -1;
10487 /* If a variable is used in only one bblock, convert it into a local vreg */
10488 for (i = 0; i < cfg->num_varinfo; i++) {
10489 MonoInst *var = cfg->varinfo [i];
10490 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10492 switch (var->type) {
10498 #if SIZEOF_REGISTER == 8
10501 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10502 /* Enabling this screws up the fp stack on x86 */
10505 /* Arguments are implicitly global */
10506 /* Putting R4 vars into registers doesn't work currently */
10507 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10509 * Make that the variable's liveness interval doesn't contain a call, since
10510 * that would cause the lvreg to be spilled, making the whole optimization
10513 /* This is too slow for JIT compilation */
10515 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10517 int def_index, call_index, ins_index;
10518 gboolean spilled = FALSE;
10523 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10524 const char *spec = INS_INFO (ins->opcode);
10526 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10527 def_index = ins_index;
10529 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10530 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10531 if (call_index > def_index) {
10537 if (MONO_IS_CALL (ins))
10538 call_index = ins_index;
10548 if (G_UNLIKELY (cfg->verbose_level > 2))
10549 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10550 var->flags |= MONO_INST_IS_DEAD;
10551 cfg->vreg_to_inst [var->dreg] = NULL;
10558 * Compress the varinfo and vars tables so the liveness computation is faster and
10559 * takes up less space.
10562 for (i = 0; i < cfg->num_varinfo; ++i) {
10563 MonoInst *var = cfg->varinfo [i];
10564 if (pos < i && cfg->locals_start == i)
10565 cfg->locals_start = pos;
10566 if (!(var->flags & MONO_INST_IS_DEAD)) {
10568 cfg->varinfo [pos] = cfg->varinfo [i];
10569 cfg->varinfo [pos]->inst_c0 = pos;
10570 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10571 cfg->vars [pos].idx = pos;
10572 #if SIZEOF_REGISTER == 4
10573 if (cfg->varinfo [pos]->type == STACK_I8) {
10574 /* Modify the two component vars too */
10577 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10578 var1->inst_c0 = pos;
10579 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10580 var1->inst_c0 = pos;
10587 cfg->num_varinfo = pos;
10588 if (cfg->locals_start > cfg->num_varinfo)
10589 cfg->locals_start = cfg->num_varinfo;
10593 * mono_spill_global_vars:
10595 * Generate spill code for variables which are not allocated to registers,
10596 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10597 * code is generated which could be optimized by the local optimization passes.
10600 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10602 MonoBasicBlock *bb;
10604 int orig_next_vreg;
10605 guint32 *vreg_to_lvreg;
10607 guint32 i, lvregs_len;
10608 gboolean dest_has_lvreg = FALSE;
10609 guint32 stacktypes [128];
10610 MonoInst **live_range_start, **live_range_end;
10611 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10613 *need_local_opts = FALSE;
10615 memset (spec2, 0, sizeof (spec2));
10617 /* FIXME: Move this function to mini.c */
10618 stacktypes ['i'] = STACK_PTR;
10619 stacktypes ['l'] = STACK_I8;
10620 stacktypes ['f'] = STACK_R8;
10621 #ifdef MONO_ARCH_SIMD_INTRINSICS
10622 stacktypes ['x'] = STACK_VTYPE;
10625 #if SIZEOF_REGISTER == 4
10626 /* Create MonoInsts for longs */
10627 for (i = 0; i < cfg->num_varinfo; i++) {
10628 MonoInst *ins = cfg->varinfo [i];
10630 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10631 switch (ins->type) {
10636 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10639 g_assert (ins->opcode == OP_REGOFFSET);
10641 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10643 tree->opcode = OP_REGOFFSET;
10644 tree->inst_basereg = ins->inst_basereg;
10645 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10647 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10649 tree->opcode = OP_REGOFFSET;
10650 tree->inst_basereg = ins->inst_basereg;
10651 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10661 /* FIXME: widening and truncation */
10664 * As an optimization, when a variable allocated to the stack is first loaded into
10665 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10666 * the variable again.
10668 orig_next_vreg = cfg->next_vreg;
10669 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10670 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10674 * These arrays contain the first and last instructions accessing a given
10676 * Since we emit bblocks in the same order we process them here, and we
10677 * don't split live ranges, these will precisely describe the live range of
10678 * the variable, i.e. the instruction range where a valid value can be found
10679 * in the variables location.
10680 * The live range is computed using the liveness info computed by the liveness pass.
10681 * We can't use vmv->range, since that is an abstract live range, and we need
10682 * one which is instruction precise.
10683 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10685 /* FIXME: Only do this if debugging info is requested */
10686 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10687 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10688 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10689 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10691 /* Add spill loads/stores */
10692 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10695 if (cfg->verbose_level > 2)
10696 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10698 /* Clear vreg_to_lvreg array */
10699 for (i = 0; i < lvregs_len; i++)
10700 vreg_to_lvreg [lvregs [i]] = 0;
10704 MONO_BB_FOR_EACH_INS (bb, ins) {
10705 const char *spec = INS_INFO (ins->opcode);
10706 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10707 gboolean store, no_lvreg;
10708 int sregs [MONO_MAX_SRC_REGS];
10710 if (G_UNLIKELY (cfg->verbose_level > 2))
10711 mono_print_ins (ins);
10713 if (ins->opcode == OP_NOP)
10717 * We handle LDADDR here as well, since it can only be decomposed
10718 * when variable addresses are known.
10720 if (ins->opcode == OP_LDADDR) {
10721 MonoInst *var = ins->inst_p0;
10723 if (var->opcode == OP_VTARG_ADDR) {
10724 /* Happens on SPARC/S390 where vtypes are passed by reference */
10725 MonoInst *vtaddr = var->inst_left;
10726 if (vtaddr->opcode == OP_REGVAR) {
10727 ins->opcode = OP_MOVE;
10728 ins->sreg1 = vtaddr->dreg;
10730 else if (var->inst_left->opcode == OP_REGOFFSET) {
10731 ins->opcode = OP_LOAD_MEMBASE;
10732 ins->inst_basereg = vtaddr->inst_basereg;
10733 ins->inst_offset = vtaddr->inst_offset;
10737 g_assert (var->opcode == OP_REGOFFSET);
10739 ins->opcode = OP_ADD_IMM;
10740 ins->sreg1 = var->inst_basereg;
10741 ins->inst_imm = var->inst_offset;
10744 *need_local_opts = TRUE;
10745 spec = INS_INFO (ins->opcode);
10748 if (ins->opcode < MONO_CEE_LAST) {
10749 mono_print_ins (ins);
10750 g_assert_not_reached ();
10754 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10758 if (MONO_IS_STORE_MEMBASE (ins)) {
10759 tmp_reg = ins->dreg;
10760 ins->dreg = ins->sreg2;
10761 ins->sreg2 = tmp_reg;
10764 spec2 [MONO_INST_DEST] = ' ';
10765 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10766 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10767 spec2 [MONO_INST_SRC3] = ' ';
10769 } else if (MONO_IS_STORE_MEMINDEX (ins))
10770 g_assert_not_reached ();
10775 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10776 printf ("\t %.3s %d", spec, ins->dreg);
10777 num_sregs = mono_inst_get_src_registers (ins, sregs);
10778 for (srcindex = 0; srcindex < 3; ++srcindex)
10779 printf (" %d", sregs [srcindex]);
10786 regtype = spec [MONO_INST_DEST];
10787 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10790 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10791 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10792 MonoInst *store_ins;
10794 MonoInst *def_ins = ins;
10795 int dreg = ins->dreg; /* The original vreg */
10797 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10799 if (var->opcode == OP_REGVAR) {
10800 ins->dreg = var->dreg;
10801 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10803 * Instead of emitting a load+store, use a _membase opcode.
10805 g_assert (var->opcode == OP_REGOFFSET);
10806 if (ins->opcode == OP_MOVE) {
10810 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10811 ins->inst_basereg = var->inst_basereg;
10812 ins->inst_offset = var->inst_offset;
10815 spec = INS_INFO (ins->opcode);
10819 g_assert (var->opcode == OP_REGOFFSET);
10821 prev_dreg = ins->dreg;
10823 /* Invalidate any previous lvreg for this vreg */
10824 vreg_to_lvreg [ins->dreg] = 0;
10828 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10830 store_opcode = OP_STOREI8_MEMBASE_REG;
10833 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10835 if (regtype == 'l') {
10836 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10837 mono_bblock_insert_after_ins (bb, ins, store_ins);
10838 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10839 mono_bblock_insert_after_ins (bb, ins, store_ins);
10840 def_ins = store_ins;
10843 g_assert (store_opcode != OP_STOREV_MEMBASE);
10845 /* Try to fuse the store into the instruction itself */
10846 /* FIXME: Add more instructions */
10847 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10848 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10849 ins->inst_imm = ins->inst_c0;
10850 ins->inst_destbasereg = var->inst_basereg;
10851 ins->inst_offset = var->inst_offset;
10852 spec = INS_INFO (ins->opcode);
10853 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10854 ins->opcode = store_opcode;
10855 ins->inst_destbasereg = var->inst_basereg;
10856 ins->inst_offset = var->inst_offset;
10860 tmp_reg = ins->dreg;
10861 ins->dreg = ins->sreg2;
10862 ins->sreg2 = tmp_reg;
10865 spec2 [MONO_INST_DEST] = ' ';
10866 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10867 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10868 spec2 [MONO_INST_SRC3] = ' ';
10870 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10871 // FIXME: The backends expect the base reg to be in inst_basereg
10872 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10874 ins->inst_basereg = var->inst_basereg;
10875 ins->inst_offset = var->inst_offset;
10876 spec = INS_INFO (ins->opcode);
10878 /* printf ("INS: "); mono_print_ins (ins); */
10879 /* Create a store instruction */
10880 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10882 /* Insert it after the instruction */
10883 mono_bblock_insert_after_ins (bb, ins, store_ins);
10885 def_ins = store_ins;
10888 * We can't assign ins->dreg to var->dreg here, since the
10889 * sregs could use it. So set a flag, and do it after
10892 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10893 dest_has_lvreg = TRUE;
10898 if (def_ins && !live_range_start [dreg]) {
10899 live_range_start [dreg] = def_ins;
10900 live_range_start_bb [dreg] = bb;
10907 num_sregs = mono_inst_get_src_registers (ins, sregs);
10908 for (srcindex = 0; srcindex < 3; ++srcindex) {
10909 regtype = spec [MONO_INST_SRC1 + srcindex];
10910 sreg = sregs [srcindex];
10912 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10913 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10914 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10915 MonoInst *use_ins = ins;
10916 MonoInst *load_ins;
10917 guint32 load_opcode;
10919 if (var->opcode == OP_REGVAR) {
10920 sregs [srcindex] = var->dreg;
10921 //mono_inst_set_src_registers (ins, sregs);
10922 live_range_end [sreg] = use_ins;
10923 live_range_end_bb [sreg] = bb;
10927 g_assert (var->opcode == OP_REGOFFSET);
10929 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10931 g_assert (load_opcode != OP_LOADV_MEMBASE);
10933 if (vreg_to_lvreg [sreg]) {
10934 g_assert (vreg_to_lvreg [sreg] != -1);
10936 /* The variable is already loaded to an lvreg */
10937 if (G_UNLIKELY (cfg->verbose_level > 2))
10938 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10939 sregs [srcindex] = vreg_to_lvreg [sreg];
10940 //mono_inst_set_src_registers (ins, sregs);
10944 /* Try to fuse the load into the instruction */
10945 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10946 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10947 sregs [0] = var->inst_basereg;
10948 //mono_inst_set_src_registers (ins, sregs);
10949 ins->inst_offset = var->inst_offset;
10950 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10951 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10952 sregs [1] = var->inst_basereg;
10953 //mono_inst_set_src_registers (ins, sregs);
10954 ins->inst_offset = var->inst_offset;
10956 if (MONO_IS_REAL_MOVE (ins)) {
10957 ins->opcode = OP_NOP;
10960 //printf ("%d ", srcindex); mono_print_ins (ins);
10962 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10964 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10965 if (var->dreg == prev_dreg) {
10967 * sreg refers to the value loaded by the load
10968 * emitted below, but we need to use ins->dreg
10969 * since it refers to the store emitted earlier.
10973 g_assert (sreg != -1);
10974 vreg_to_lvreg [var->dreg] = sreg;
10975 g_assert (lvregs_len < 1024);
10976 lvregs [lvregs_len ++] = var->dreg;
10980 sregs [srcindex] = sreg;
10981 //mono_inst_set_src_registers (ins, sregs);
10983 if (regtype == 'l') {
10984 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10985 mono_bblock_insert_before_ins (bb, ins, load_ins);
10986 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10987 mono_bblock_insert_before_ins (bb, ins, load_ins);
10988 use_ins = load_ins;
10991 #if SIZEOF_REGISTER == 4
10992 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10994 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10995 mono_bblock_insert_before_ins (bb, ins, load_ins);
10996 use_ins = load_ins;
11000 if (var->dreg < orig_next_vreg) {
11001 live_range_end [var->dreg] = use_ins;
11002 live_range_end_bb [var->dreg] = bb;
11006 mono_inst_set_src_registers (ins, sregs);
11008 if (dest_has_lvreg) {
11009 g_assert (ins->dreg != -1);
11010 vreg_to_lvreg [prev_dreg] = ins->dreg;
11011 g_assert (lvregs_len < 1024);
11012 lvregs [lvregs_len ++] = prev_dreg;
11013 dest_has_lvreg = FALSE;
11017 tmp_reg = ins->dreg;
11018 ins->dreg = ins->sreg2;
11019 ins->sreg2 = tmp_reg;
11022 if (MONO_IS_CALL (ins)) {
11023 /* Clear vreg_to_lvreg array */
11024 for (i = 0; i < lvregs_len; i++)
11025 vreg_to_lvreg [lvregs [i]] = 0;
11027 } else if (ins->opcode == OP_NOP) {
11029 MONO_INST_NULLIFY_SREGS (ins);
11032 if (cfg->verbose_level > 2)
11033 mono_print_ins_index (1, ins);
11036 /* Extend the live range based on the liveness info */
11037 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11038 for (i = 0; i < cfg->num_varinfo; i ++) {
11039 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11041 if (vreg_is_volatile (cfg, vi->vreg))
11042 /* The liveness info is incomplete */
11045 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11046 /* Live from at least the first ins of this bb */
11047 live_range_start [vi->vreg] = bb->code;
11048 live_range_start_bb [vi->vreg] = bb;
11051 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11052 /* Live at least until the last ins of this bb */
11053 live_range_end [vi->vreg] = bb->last_ins;
11054 live_range_end_bb [vi->vreg] = bb;
11060 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11062 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11063 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11065 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11066 for (i = 0; i < cfg->num_varinfo; ++i) {
11067 int vreg = MONO_VARINFO (cfg, i)->vreg;
11070 if (live_range_start [vreg]) {
11071 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11073 ins->inst_c1 = vreg;
11074 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11076 if (live_range_end [vreg]) {
11077 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11079 ins->inst_c1 = vreg;
11080 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11081 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11083 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11089 g_free (live_range_start);
11090 g_free (live_range_end);
11091 g_free (live_range_start_bb);
11092 g_free (live_range_end_bb);
11097 * - use 'iadd' instead of 'int_add'
11098 * - handling ovf opcodes: decompose in method_to_ir.
11099 * - unify iregs/fregs
11100 * -> partly done, the missing parts are:
11101 * - a more complete unification would involve unifying the hregs as well, so
11102 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11103 * would no longer map to the machine hregs, so the code generators would need to
11104 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11105 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11106 * fp/non-fp branches speeds it up by about 15%.
11107 * - use sext/zext opcodes instead of shifts
11109 * - get rid of TEMPLOADs if possible and use vregs instead
11110 * - clean up usage of OP_P/OP_ opcodes
11111 * - cleanup usage of DUMMY_USE
11112 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11114 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11115 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11116 * - make sure handle_stack_args () is called before the branch is emitted
11117 * - when the new IR is done, get rid of all unused stuff
11118 * - COMPARE/BEQ as separate instructions or unify them ?
11119 * - keeping them separate allows specialized compare instructions like
11120 * compare_imm, compare_membase
11121 * - most back ends unify fp compare+branch, fp compare+ceq
11122 * - integrate mono_save_args into inline_method
11123 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11124 * - handle long shift opts on 32 bit platforms somehow: they require
11125 * 3 sregs (2 for arg1 and 1 for arg2)
11126 * - make byref a 'normal' type.
11127 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11128 * variable if needed.
11129 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11130 * like inline_method.
11131 * - remove inlining restrictions
11132 * - fix LNEG and enable cfold of INEG
11133 * - generalize x86 optimizations like ldelema as a peephole optimization
11134 * - add store_mem_imm for amd64
11135 * - optimize the loading of the interruption flag in the managed->native wrappers
11136 * - avoid special handling of OP_NOP in passes
11137 * - move code inserting instructions into one function/macro.
11138 * - try a coalescing phase after liveness analysis
11139 * - add float -> vreg conversion + local optimizations on !x86
11140 * - figure out how to handle decomposed branches during optimizations, ie.
11141 * compare+branch, op_jump_table+op_br etc.
11142 * - promote RuntimeXHandles to vregs
11143 * - vtype cleanups:
11144 * - add a NEW_VARLOADA_VREG macro
11145 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11146 * accessing vtype fields.
11147 * - get rid of I8CONST on 64 bit platforms
11148 * - dealing with the increase in code size due to branches created during opcode
11150 * - use extended basic blocks
11151 * - all parts of the JIT
11152 * - handle_global_vregs () && local regalloc
11153 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11154 * - sources of increase in code size:
11157 * - isinst and castclass
11158 * - lvregs not allocated to global registers even if used multiple times
11159 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11161 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11162 * - add all micro optimizations from the old JIT
11163 * - put tree optimizations into the deadce pass
11164 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11165 * specific function.
11166 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11167 * fcompare + branchCC.
11168 * - create a helper function for allocating a stack slot, taking into account
11169 * MONO_CFG_HAS_SPILLUP.
11171 * - merge the ia64 switch changes.
11172 * - optimize mono_regstate2_alloc_int/float.
11173 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11174 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11175 * parts of the tree could be separated by other instructions, killing the tree
11176 * arguments, or stores killing loads etc. Also, should we fold loads into other
11177 * instructions if the result of the load is used multiple times ?
11178 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11179 * - LAST MERGE: 108395.
11180 * - when returning vtypes in registers, generate IR and append it to the end of the
11181 * last bb instead of doing it in the epilog.
11182 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11190 - When to decompose opcodes:
11191 - earlier: this makes some optimizations hard to implement, since the low level IR
11192 no longer contains the neccessary information. But it is easier to do.
11193 - later: harder to implement, enables more optimizations.
11194 - Branches inside bblocks:
11195 - created when decomposing complex opcodes.
11196 - branches to another bblock: harmless, but not tracked by the branch
11197 optimizations, so need to branch to a label at the start of the bblock.
11198 - branches to inside the same bblock: very problematic, trips up the local
11199 reg allocator. Can be fixed by spitting the current bblock, but that is a
11200 complex operation, since some local vregs can become global vregs etc.
11201 - Local/global vregs:
11202 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11203 local register allocator.
11204 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11205 structure, created by mono_create_var (). Assigned to hregs or the stack by
11206 the global register allocator.
11207 - When to do optimizations like alu->alu_imm:
11208 - earlier -> saves work later on since the IR will be smaller/simpler
11209 - later -> can work on more instructions
11210 - Handling of valuetypes:
11211 - When a vtype is pushed on the stack, a new temporary is created, an
11212 instruction computing its address (LDADDR) is emitted and pushed on
11213 the stack. Need to optimize cases when the vtype is used immediately as in
11214 argument passing, stloc etc.
11215 - Instead of the to_end stuff in the old JIT, simply call the function handling
11216 the values on the stack before emitting the last instruction of the bb.
11219 #endif /* DISABLE_JIT */