2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethodHeader *header = cfg->header;
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethodHeader *header = cfg->header;
497 MonoExceptionClause *clause;
501 for (i = 0; i < header->num_clauses; ++i) {
502 clause = &header->clauses [i];
503 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
504 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
505 if (clause->flags == type)
506 res = g_list_append (res, clause);
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
561 inst->klass = klass = mono_class_from_mono_type (type);
563 inst->type = STACK_MP;
568 switch (type->type) {
570 inst->type = STACK_INV;
574 case MONO_TYPE_BOOLEAN:
580 inst->type = STACK_I4;
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
597 inst->type = STACK_I8;
601 inst->type = STACK_R8;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
609 inst->type = STACK_VTYPE;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
783 ins->opcode = OP_ICOMPARE;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
800 ins->opcode += beqops_op_map [src1->type];
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
833 ins->type = STACK_R8;
834 switch (src1->type) {
837 ins->opcode = OP_ICONV_TO_R_UN;
840 ins->opcode = OP_LCONV_TO_R_UN;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
868 ins->type = STACK_PTR;
869 switch (src1->type) {
871 ins->opcode = OP_ICONV_TO_U;
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
878 ins->opcode = OP_MOVE;
882 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_FCONV_TO_U;
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
910 ins->type = STACK_R8;
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
969 param_table [STACK_MAX] [STACK_MAX] = {
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
978 switch (args->type) {
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
993 if (!sig->params [i]->byref)
997 if (sig->params [i]->byref)
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1011 if (sig->params [i]->byref)
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 t = mono_type_get_underlying_type (t);
1099 case MONO_TYPE_BOOLEAN:
1102 case MONO_TYPE_CHAR:
1109 case MONO_TYPE_FNPTR:
1111 case MONO_TYPE_CLASS:
1112 case MONO_TYPE_STRING:
1113 case MONO_TYPE_OBJECT:
1114 case MONO_TYPE_SZARRAY:
1115 case MONO_TYPE_ARRAY:
1123 case MONO_TYPE_VALUETYPE:
1124 case MONO_TYPE_TYPEDBYREF:
1126 case MONO_TYPE_GENERICINST:
1127 if (mono_type_generic_inst_is_valuetype (t))
1133 g_assert_not_reached ();
1140 array_access_to_klass (int opcode)
1144 return mono_defaults.byte_class;
1146 return mono_defaults.uint16_class;
1149 return mono_defaults.int_class;
1152 return mono_defaults.sbyte_class;
1155 return mono_defaults.int16_class;
1158 return mono_defaults.int32_class;
1160 return mono_defaults.uint32_class;
1163 return mono_defaults.int64_class;
1166 return mono_defaults.single_class;
1169 return mono_defaults.double_class;
1170 case CEE_LDELEM_REF:
1171 case CEE_STELEM_REF:
1172 return mono_defaults.object_class;
1174 g_assert_not_reached ();
1180 * We try to share variables when possible
1183 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1188 /* inlining can result in deeper stacks */
1189 if (slot >= cfg->header->max_stack)
1190 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1192 pos = ins->type - 1 + slot * STACK_MAX;
1194 switch (ins->type) {
1201 if ((vnum = cfg->intvars [pos]))
1202 return cfg->varinfo [vnum];
1203 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1204 cfg->intvars [pos] = res->inst_c0;
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1213 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1216 * Don't use this if a generic_context is set, since that means AOT can't
1217 * look up the method using just the image+token.
1218 * table == 0 means this is a reference made from a wrapper.
1220 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1221 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1222 jump_info_token->image = image;
1223 jump_info_token->token = token;
1224 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1229 * This function is called to handle items that are left on the evaluation stack
1230 * at basic block boundaries. What happens is that we save the values to local variables
1231 * and we reload them later when first entering the target basic block (with the
1232 * handle_loaded_temps () function).
1233 * A single joint point will use the same variables (stored in the array bb->out_stack or
1234 * bb->in_stack, if the basic block is before or after the joint point).
1236 * This function needs to be called _before_ emitting the last instruction of
1237 * the bb (i.e. before emitting a branch).
1238 * If the stack merge fails at a join point, cfg->unverifiable is set.
1241 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1244 MonoBasicBlock *bb = cfg->cbb;
1245 MonoBasicBlock *outb;
1246 MonoInst *inst, **locals;
1251 if (cfg->verbose_level > 3)
1252 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1253 if (!bb->out_scount) {
1254 bb->out_scount = count;
1255 //printf ("bblock %d has out:", bb->block_num);
1257 for (i = 0; i < bb->out_count; ++i) {
1258 outb = bb->out_bb [i];
1259 /* exception handlers are linked, but they should not be considered for stack args */
1260 if (outb->flags & BB_EXCEPTION_HANDLER)
1262 //printf (" %d", outb->block_num);
1263 if (outb->in_stack) {
1265 bb->out_stack = outb->in_stack;
1271 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1272 for (i = 0; i < count; ++i) {
1274 * try to reuse temps already allocated for this purpouse, if they occupy the same
1275 * stack slot and if they are of the same type.
1276 * This won't cause conflicts since if 'local' is used to
1277 * store one of the values in the in_stack of a bblock, then
1278 * the same variable will be used for the same outgoing stack
1280 * This doesn't work when inlining methods, since the bblocks
1281 * in the inlined methods do not inherit their in_stack from
1282 * the bblock they are inlined to. See bug #58863 for an
1285 if (cfg->inlined_method)
1286 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1288 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1293 for (i = 0; i < bb->out_count; ++i) {
1294 outb = bb->out_bb [i];
1295 /* exception handlers are linked, but they should not be considered for stack args */
1296 if (outb->flags & BB_EXCEPTION_HANDLER)
1298 if (outb->in_scount) {
1299 if (outb->in_scount != bb->out_scount) {
1300 cfg->unverifiable = TRUE;
1303 continue; /* check they are the same locals */
1305 outb->in_scount = count;
1306 outb->in_stack = bb->out_stack;
1309 locals = bb->out_stack;
1311 for (i = 0; i < count; ++i) {
1312 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1313 inst->cil_code = sp [i]->cil_code;
1314 sp [i] = locals [i];
1315 if (cfg->verbose_level > 3)
1316 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1320 * It is possible that the out bblocks already have in_stack assigned, and
1321 * the in_stacks differ. In this case, we will store to all the different
1328 /* Find a bblock which has a different in_stack */
1330 while (bindex < bb->out_count) {
1331 outb = bb->out_bb [bindex];
1332 /* exception handlers are linked, but they should not be considered for stack args */
1333 if (outb->flags & BB_EXCEPTION_HANDLER) {
1337 if (outb->in_stack != locals) {
1338 for (i = 0; i < count; ++i) {
1339 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1340 inst->cil_code = sp [i]->cil_code;
1341 sp [i] = locals [i];
1342 if (cfg->verbose_level > 3)
1343 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1345 locals = outb->in_stack;
1354 /* Emit code which loads interface_offsets [klass->interface_id]
1355 * The array is stored in memory before vtable.
1358 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1360 if (cfg->compile_aot) {
1361 int ioffset_reg = alloc_preg (cfg);
1362 int iid_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1374 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1376 int ibitmap_reg = alloc_preg (cfg);
1377 #ifdef COMPRESSED_INTERFACE_BITMAP
1379 MonoInst *res, *ins;
1380 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1381 MONO_ADD_INS (cfg->cbb, ins);
1383 if (cfg->compile_aot)
1384 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1386 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1387 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1388 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1390 int ibitmap_byte_reg = alloc_preg (cfg);
1392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1394 if (cfg->compile_aot) {
1395 int iid_reg = alloc_preg (cfg);
1396 int shifted_iid_reg = alloc_preg (cfg);
1397 int ibitmap_byte_address_reg = alloc_preg (cfg);
1398 int masked_iid_reg = alloc_preg (cfg);
1399 int iid_one_bit_reg = alloc_preg (cfg);
1400 int iid_bit_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1406 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1417 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1418 * stored in "klass_reg" implements the interface "klass".
1421 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1423 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1427 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1428 * stored in "vtable_reg" implements the interface "klass".
1431 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1433 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1437 * Emit code which checks whenever the interface id of @klass is smaller than
1438 * than the value given by max_iid_reg.
1441 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1442 MonoBasicBlock *false_target)
1444 if (cfg->compile_aot) {
1445 int iid_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1454 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1457 /* Same as above, but obtains max_iid from a vtable */
1459 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 int max_iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1465 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1468 /* Same as above, but obtains max_iid from a klass */
1470 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1471 MonoBasicBlock *false_target)
1473 int max_iid_reg = alloc_preg (cfg);
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1476 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1482 int idepth_reg = alloc_preg (cfg);
1483 int stypes_reg = alloc_preg (cfg);
1484 int stype = alloc_preg (cfg);
1486 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1495 } else if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1547 } else if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1560 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1564 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1566 if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1577 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1580 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1583 int rank_reg = alloc_preg (cfg);
1584 int eclass_reg = alloc_preg (cfg);
1586 g_assert (!klass_inst);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1589 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1590 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1592 if (klass->cast_class == mono_defaults.object_class) {
1593 int parent_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1595 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1596 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1597 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1598 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class == mono_defaults.enum_class) {
1601 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1602 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1603 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1605 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1606 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1609 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1610 /* Check that the object is a vector too */
1611 int bounds_reg = alloc_preg (cfg);
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 int idepth_reg = alloc_preg (cfg);
1618 int stypes_reg = alloc_preg (cfg);
1619 int stype = alloc_preg (cfg);
1621 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1628 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1633 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1635 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1639 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1643 g_assert (val == 0);
1648 if ((size <= 4) && (size <= align)) {
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1659 #if SIZEOF_REGISTER == 8
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1667 val_reg = alloc_preg (cfg);
1669 if (SIZEOF_REGISTER == 8)
1670 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1672 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1675 /* This could be optimized further if neccesary */
1677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1684 #if !NO_UNALIGNED_ACCESS
1685 if (SIZEOF_REGISTER == 8) {
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1717 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1724 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1725 g_assert (size < 10000);
1728 /* This could be optimized further if neccesary */
1730 cur_reg = alloc_preg (cfg);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1739 #if !NO_UNALIGNED_ACCESS
1740 if (SIZEOF_REGISTER == 8) {
1742 cur_reg = alloc_preg (cfg);
1743 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1761 cur_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1769 cur_reg = alloc_preg (cfg);
1770 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1771 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1779 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1782 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1785 type = mini_get_basic_type_from_generic (gsctx, type);
1786 switch (type->type) {
1787 case MONO_TYPE_VOID:
1788 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1791 case MONO_TYPE_BOOLEAN:
1794 case MONO_TYPE_CHAR:
1797 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1801 case MONO_TYPE_FNPTR:
1802 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1803 case MONO_TYPE_CLASS:
1804 case MONO_TYPE_STRING:
1805 case MONO_TYPE_OBJECT:
1806 case MONO_TYPE_SZARRAY:
1807 case MONO_TYPE_ARRAY:
1808 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1811 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1814 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1815 case MONO_TYPE_VALUETYPE:
1816 if (type->data.klass->enumtype) {
1817 type = mono_class_enum_basetype (type->data.klass);
1820 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1821 case MONO_TYPE_TYPEDBYREF:
1822 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1823 case MONO_TYPE_GENERICINST:
1824 type = &type->data.generic_class->container_class->byval_arg;
1827 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1833 * target_type_is_incompatible:
1834 * @cfg: MonoCompile context
1836 * Check that the item @arg on the evaluation stack can be stored
1837 * in the target type (can be a local, or field, etc).
1838 * The cfg arg can be used to check if we need verification or just
1841 * Returns: non-0 value if arg can't be stored on a target.
1844 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1846 MonoType *simple_type;
1849 if (target->byref) {
1850 /* FIXME: check that the pointed to types match */
1851 if (arg->type == STACK_MP)
1852 return arg->klass != mono_class_from_mono_type (target);
1853 if (arg->type == STACK_PTR)
1858 simple_type = mono_type_get_underlying_type (target);
1859 switch (simple_type->type) {
1860 case MONO_TYPE_VOID:
1864 case MONO_TYPE_BOOLEAN:
1867 case MONO_TYPE_CHAR:
1870 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1874 /* STACK_MP is needed when setting pinned locals */
1875 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1880 case MONO_TYPE_FNPTR:
1881 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1884 case MONO_TYPE_CLASS:
1885 case MONO_TYPE_STRING:
1886 case MONO_TYPE_OBJECT:
1887 case MONO_TYPE_SZARRAY:
1888 case MONO_TYPE_ARRAY:
1889 if (arg->type != STACK_OBJ)
1891 /* FIXME: check type compatibility */
1895 if (arg->type != STACK_I8)
1900 if (arg->type != STACK_R8)
1903 case MONO_TYPE_VALUETYPE:
1904 if (arg->type != STACK_VTYPE)
1906 klass = mono_class_from_mono_type (simple_type);
1907 if (klass != arg->klass)
1910 case MONO_TYPE_TYPEDBYREF:
1911 if (arg->type != STACK_VTYPE)
1913 klass = mono_class_from_mono_type (simple_type);
1914 if (klass != arg->klass)
1917 case MONO_TYPE_GENERICINST:
1918 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1919 if (arg->type != STACK_VTYPE)
1921 klass = mono_class_from_mono_type (simple_type);
1922 if (klass != arg->klass)
1926 if (arg->type != STACK_OBJ)
1928 /* FIXME: check type compatibility */
1932 case MONO_TYPE_MVAR:
1933 /* FIXME: all the arguments must be references for now,
1934 * later look inside cfg and see if the arg num is
1935 * really a reference
1937 g_assert (cfg->generic_sharing_context);
1938 if (arg->type != STACK_OBJ)
1942 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1948 * Prepare arguments for passing to a function call.
1949 * Return a non-zero value if the arguments can't be passed to the given
1951 * The type checks are not yet complete and some conversions may need
1952 * casts on 32 or 64 bit architectures.
1954 * FIXME: implement this using target_type_is_incompatible ()
1957 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1959 MonoType *simple_type;
1963 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1967 for (i = 0; i < sig->param_count; ++i) {
1968 if (sig->params [i]->byref) {
1969 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1973 simple_type = sig->params [i];
1974 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1976 switch (simple_type->type) {
1977 case MONO_TYPE_VOID:
1982 case MONO_TYPE_BOOLEAN:
1985 case MONO_TYPE_CHAR:
1988 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1994 case MONO_TYPE_FNPTR:
1995 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1998 case MONO_TYPE_CLASS:
1999 case MONO_TYPE_STRING:
2000 case MONO_TYPE_OBJECT:
2001 case MONO_TYPE_SZARRAY:
2002 case MONO_TYPE_ARRAY:
2003 if (args [i]->type != STACK_OBJ)
2008 if (args [i]->type != STACK_I8)
2013 if (args [i]->type != STACK_R8)
2016 case MONO_TYPE_VALUETYPE:
2017 if (simple_type->data.klass->enumtype) {
2018 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2021 if (args [i]->type != STACK_VTYPE)
2024 case MONO_TYPE_TYPEDBYREF:
2025 if (args [i]->type != STACK_VTYPE)
2028 case MONO_TYPE_GENERICINST:
2029 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2033 g_error ("unknown type 0x%02x in check_call_signature",
2041 callvirt_to_call (int opcode)
2046 case OP_VOIDCALLVIRT:
2055 g_assert_not_reached ();
2062 callvirt_to_call_membase (int opcode)
2066 return OP_CALL_MEMBASE;
2067 case OP_VOIDCALLVIRT:
2068 return OP_VOIDCALL_MEMBASE;
2070 return OP_FCALL_MEMBASE;
2072 return OP_LCALL_MEMBASE;
2074 return OP_VCALL_MEMBASE;
2076 g_assert_not_reached ();
2082 #ifdef MONO_ARCH_HAVE_IMT
2084 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2086 #ifdef MONO_ARCH_IMT_REG
2087 int method_reg = alloc_preg (cfg);
2090 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2091 } else if (cfg->compile_aot) {
2092 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2095 MONO_INST_NEW (cfg, ins, OP_PCONST);
2096 ins->inst_p0 = call->method;
2097 ins->dreg = method_reg;
2098 MONO_ADD_INS (cfg->cbb, ins);
2101 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2103 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2108 static MonoJumpInfo *
2109 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2111 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2115 ji->data.target = target;
2120 inline static MonoCallInst *
2121 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2122 MonoInst **args, int calli, int virtual, int tail)
2125 #ifdef MONO_ARCH_SOFT_FLOAT
2130 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2132 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2135 call->signature = sig;
2137 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2140 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2141 call->vret_var = cfg->vret_addr;
2142 //g_assert_not_reached ();
2144 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2145 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2148 temp->backend.is_pinvoke = sig->pinvoke;
2151 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2152 * address of return value to increase optimization opportunities.
2153 * Before vtype decomposition, the dreg of the call ins itself represents the
2154 * fact the call modifies the return value. After decomposition, the call will
2155 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2156 * will be transformed into an LDADDR.
2158 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2159 loada->dreg = alloc_preg (cfg);
2160 loada->inst_p0 = temp;
2161 /* We reference the call too since call->dreg could change during optimization */
2162 loada->inst_p1 = call;
2163 MONO_ADD_INS (cfg->cbb, loada);
2165 call->inst.dreg = temp->dreg;
2167 call->vret_var = loada;
2168 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2169 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2171 #ifdef MONO_ARCH_SOFT_FLOAT
2172 if (COMPILE_SOFT_FLOAT (cfg)) {
2174 * If the call has a float argument, we would need to do an r8->r4 conversion using
2175 * an icall, but that cannot be done during the call sequence since it would clobber
2176 * the call registers + the stack. So we do it before emitting the call.
2178 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2180 MonoInst *in = call->args [i];
2182 if (i >= sig->hasthis)
2183 t = sig->params [i - sig->hasthis];
2185 t = &mono_defaults.int_class->byval_arg;
2186 t = mono_type_get_underlying_type (t);
2188 if (!t->byref && t->type == MONO_TYPE_R4) {
2189 MonoInst *iargs [1];
2193 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2195 /* The result will be in an int vreg */
2196 call->args [i] = conv;
2203 if (COMPILE_LLVM (cfg))
2204 mono_llvm_emit_call (cfg, call);
2206 mono_arch_emit_call (cfg, call);
2208 mono_arch_emit_call (cfg, call);
2211 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2212 cfg->flags |= MONO_CFG_HAS_CALLS;
2217 inline static MonoInst*
2218 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2220 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2222 call->inst.sreg1 = addr->dreg;
2224 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2226 return (MonoInst*)call;
2229 inline static MonoInst*
2230 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2232 #ifdef MONO_ARCH_RGCTX_REG
2237 rgctx_reg = mono_alloc_preg (cfg);
2238 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2240 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2242 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2243 cfg->uses_rgctx_reg = TRUE;
2244 call->rgctx_reg = TRUE;
2246 return (MonoInst*)call;
2248 g_assert_not_reached ();
2254 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2256 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2259 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2260 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2262 gboolean might_be_remote;
2263 gboolean virtual = this != NULL;
2264 gboolean enable_for_aot = TRUE;
2268 if (method->string_ctor) {
2269 /* Create the real signature */
2270 /* FIXME: Cache these */
2271 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2272 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2277 might_be_remote = this && sig->hasthis &&
2278 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2279 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2281 context_used = mono_method_check_context_used (method);
2282 if (might_be_remote && context_used) {
2285 g_assert (cfg->generic_sharing_context);
2287 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2289 return mono_emit_calli (cfg, sig, args, addr);
2292 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2294 if (might_be_remote)
2295 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2297 call->method = method;
2298 call->inst.flags |= MONO_INST_HAS_METHOD;
2299 call->inst.inst_left = this;
2302 int vtable_reg, slot_reg, this_reg;
2304 this_reg = this->dreg;
2306 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2307 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2308 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2310 /* Make a call to delegate->invoke_impl */
2311 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2312 call->inst.inst_basereg = this_reg;
2313 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2314 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2316 return (MonoInst*)call;
2320 if ((!cfg->compile_aot || enable_for_aot) &&
2321 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2322 (MONO_METHOD_IS_FINAL (method) &&
2323 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2324 !(method->klass->marshalbyref && context_used)) {
2326 * the method is not virtual, we just need to ensure this is not null
2327 * and then we can call the method directly.
2329 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2331 * The check above ensures method is not gshared, this is needed since
2332 * gshared methods can't have wrappers.
2334 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2337 if (!method->string_ctor)
2338 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2340 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2342 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2344 return (MonoInst*)call;
2347 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2349 * the method is virtual, but we can statically dispatch since either
2350 * it's class or the method itself are sealed.
2351 * But first we need to ensure it's not a null reference.
2353 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2355 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2356 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2358 return (MonoInst*)call;
2361 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2363 vtable_reg = alloc_preg (cfg);
2364 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2365 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2367 #ifdef MONO_ARCH_HAVE_IMT
2369 guint32 imt_slot = mono_method_get_imt_slot (method);
2370 emit_imt_argument (cfg, call, imt_arg);
2371 slot_reg = vtable_reg;
2372 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2375 if (slot_reg == -1) {
2376 slot_reg = alloc_preg (cfg);
2377 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2378 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2381 slot_reg = vtable_reg;
2382 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2383 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2384 #ifdef MONO_ARCH_HAVE_IMT
2386 g_assert (mono_method_signature (method)->generic_param_count);
2387 emit_imt_argument (cfg, call, imt_arg);
2392 call->inst.sreg1 = slot_reg;
2393 call->virtual = TRUE;
2396 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2398 return (MonoInst*)call;
2402 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2403 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2405 #ifdef MONO_ARCH_RGCTX_REG
2412 #ifdef MONO_ARCH_RGCTX_REG
2413 rgctx_reg = mono_alloc_preg (cfg);
2414 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2419 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2421 call = (MonoCallInst*)ins;
2423 #ifdef MONO_ARCH_RGCTX_REG
2424 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2425 cfg->uses_rgctx_reg = TRUE;
2426 call->rgctx_reg = TRUE;
2436 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2438 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2442 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2449 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2452 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2454 return (MonoInst*)call;
2458 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2460 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2464 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2468 * mono_emit_abs_call:
2470 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2472 inline static MonoInst*
2473 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2474 MonoMethodSignature *sig, MonoInst **args)
2476 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2480 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2483 if (cfg->abs_patches == NULL)
2484 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2485 g_hash_table_insert (cfg->abs_patches, ji, ji);
2486 ins = mono_emit_native_call (cfg, ji, sig, args);
2487 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2492 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2494 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2495 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2499 * Native code might return non register sized integers
2500 * without initializing the upper bits.
2502 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2503 case OP_LOADI1_MEMBASE:
2504 widen_op = OP_ICONV_TO_I1;
2506 case OP_LOADU1_MEMBASE:
2507 widen_op = OP_ICONV_TO_U1;
2509 case OP_LOADI2_MEMBASE:
2510 widen_op = OP_ICONV_TO_I2;
2512 case OP_LOADU2_MEMBASE:
2513 widen_op = OP_ICONV_TO_U2;
2519 if (widen_op != -1) {
2520 int dreg = alloc_preg (cfg);
2523 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2524 widen->type = ins->type;
2534 get_memcpy_method (void)
2536 static MonoMethod *memcpy_method = NULL;
2537 if (!memcpy_method) {
2538 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2540 g_error ("Old corlib found. Install a new one");
2542 return memcpy_method;
2546 * Emit code to copy a valuetype of type @klass whose address is stored in
2547 * @src->dreg to memory whose address is stored at @dest->dreg.
2550 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2552 MonoInst *iargs [3];
2555 MonoMethod *memcpy_method;
2559 * This check breaks with spilled vars... need to handle it during verification anyway.
2560 * g_assert (klass && klass == src->klass && klass == dest->klass);
2564 n = mono_class_native_size (klass, &align);
2566 n = mono_class_value_size (klass, &align);
2568 #if HAVE_WRITE_BARRIERS
2569 /* if native is true there should be no references in the struct */
2570 if (klass->has_references && !native) {
2571 /* Avoid barriers when storing to the stack */
2572 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2573 (dest->opcode == OP_LDADDR))) {
2574 int context_used = 0;
2579 if (cfg->generic_sharing_context)
2580 context_used = mono_class_check_context_used (klass);
2582 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2584 if (cfg->compile_aot) {
2585 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2587 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2588 mono_class_compute_gc_descriptor (klass);
2592 /* FIXME: this does the memcpy as well (or
2593 should), so we don't need the memcpy
2595 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2600 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2601 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2602 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2606 EMIT_NEW_ICONST (cfg, iargs [2], n);
2608 memcpy_method = get_memcpy_method ();
2609 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2614 get_memset_method (void)
2616 static MonoMethod *memset_method = NULL;
2617 if (!memset_method) {
2618 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2620 g_error ("Old corlib found. Install a new one");
2622 return memset_method;
2626 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2628 MonoInst *iargs [3];
2631 MonoMethod *memset_method;
2633 /* FIXME: Optimize this for the case when dest is an LDADDR */
2635 mono_class_init (klass);
2636 n = mono_class_value_size (klass, &align);
2638 if (n <= sizeof (gpointer) * 5) {
2639 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2642 memset_method = get_memset_method ();
2644 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2645 EMIT_NEW_ICONST (cfg, iargs [2], n);
2646 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2651 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2653 MonoInst *this = NULL;
2655 g_assert (cfg->generic_sharing_context);
2657 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2658 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2659 !method->klass->valuetype)
2660 EMIT_NEW_ARGLOAD (cfg, this, 0);
2662 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2663 MonoInst *mrgctx_loc, *mrgctx_var;
2666 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2668 mrgctx_loc = mono_get_vtable_var (cfg);
2669 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2672 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2673 MonoInst *vtable_loc, *vtable_var;
2677 vtable_loc = mono_get_vtable_var (cfg);
2678 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2680 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2681 MonoInst *mrgctx_var = vtable_var;
2684 vtable_reg = alloc_preg (cfg);
2685 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2686 vtable_var->type = STACK_PTR;
2692 int vtable_reg, res_reg;
2694 vtable_reg = alloc_preg (cfg);
2695 res_reg = alloc_preg (cfg);
2696 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2701 static MonoJumpInfoRgctxEntry *
2702 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2704 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2705 res->method = method;
2706 res->in_mrgctx = in_mrgctx;
2707 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2708 res->data->type = patch_type;
2709 res->data->data.target = patch_data;
2710 res->info_type = info_type;
2715 static inline MonoInst*
2716 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2718 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2722 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2723 MonoClass *klass, int rgctx_type)
2725 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2726 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2728 return emit_rgctx_fetch (cfg, rgctx, entry);
2732 * emit_get_rgctx_method:
2734 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2735 * normal constants, else emit a load from the rgctx.
2738 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2739 MonoMethod *cmethod, int rgctx_type)
2741 if (!context_used) {
2744 switch (rgctx_type) {
2745 case MONO_RGCTX_INFO_METHOD:
2746 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2748 case MONO_RGCTX_INFO_METHOD_RGCTX:
2749 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2752 g_assert_not_reached ();
2755 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2756 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2758 return emit_rgctx_fetch (cfg, rgctx, entry);
2763 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2764 MonoClassField *field, int rgctx_type)
2766 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2767 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2769 return emit_rgctx_fetch (cfg, rgctx, entry);
2773 * On return the caller must check @klass for load errors.
2776 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2778 MonoInst *vtable_arg;
2780 int context_used = 0;
2782 if (cfg->generic_sharing_context)
2783 context_used = mono_class_check_context_used (klass);
2786 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2787 klass, MONO_RGCTX_INFO_VTABLE);
2789 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2793 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2796 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2797 #ifdef MONO_ARCH_VTABLE_REG
2798 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2799 cfg->uses_vtable_reg = TRUE;
2806 * On return the caller must check @array_class for load errors
2809 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2811 int vtable_reg = alloc_preg (cfg);
2812 int context_used = 0;
2814 if (cfg->generic_sharing_context)
2815 context_used = mono_class_check_context_used (array_class);
2817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2819 if (cfg->opt & MONO_OPT_SHARED) {
2820 int class_reg = alloc_preg (cfg);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2822 if (cfg->compile_aot) {
2823 int klass_reg = alloc_preg (cfg);
2824 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2825 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2827 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2829 } else if (context_used) {
2830 MonoInst *vtable_ins;
2832 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2833 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2835 if (cfg->compile_aot) {
2839 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2841 vt_reg = alloc_preg (cfg);
2842 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2846 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2848 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2852 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2856 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2858 if (mini_get_debug_options ()->better_cast_details) {
2859 int to_klass_reg = alloc_preg (cfg);
2860 int vtable_reg = alloc_preg (cfg);
2861 int klass_reg = alloc_preg (cfg);
2862 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2865 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2869 MONO_ADD_INS (cfg->cbb, tls_get);
2870 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2874 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2880 reset_cast_details (MonoCompile *cfg)
2882 /* Reset the variables holding the cast details */
2883 if (mini_get_debug_options ()->better_cast_details) {
2884 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2886 MONO_ADD_INS (cfg->cbb, tls_get);
2887 /* It is enough to reset the from field */
2888 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2893 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2894 * generic code is generated.
2897 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2899 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2902 MonoInst *rgctx, *addr;
2904 /* FIXME: What if the class is shared? We might not
2905 have to get the address of the method from the
2907 addr = emit_get_rgctx_method (cfg, context_used, method,
2908 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2910 rgctx = emit_get_rgctx (cfg, method, context_used);
2912 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2914 return mono_emit_method_call (cfg, method, &val, NULL);
2919 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2923 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2924 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2925 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2926 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2928 obj_reg = sp [0]->dreg;
2929 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2932 /* FIXME: generics */
2933 g_assert (klass->rank == 0);
2936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2937 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2943 MonoInst *element_class;
2945 /* This assertion is from the unboxcast insn */
2946 g_assert (klass->rank == 0);
2948 element_class = emit_get_rgctx_klass (cfg, context_used,
2949 klass->element_class, MONO_RGCTX_INFO_KLASS);
2951 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2952 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2954 save_cast_details (cfg, klass->element_class, obj_reg);
2955 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2956 reset_cast_details (cfg);
2959 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2960 MONO_ADD_INS (cfg->cbb, add);
2961 add->type = STACK_MP;
2968 * Returns NULL and set the cfg exception on error.
2971 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
2973 MonoInst *iargs [2];
2979 MonoInst *iargs [2];
2982 FIXME: we cannot get managed_alloc here because we can't get
2983 the class's vtable (because it's not a closed class)
2985 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2986 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2989 if (cfg->opt & MONO_OPT_SHARED)
2990 rgctx_info = MONO_RGCTX_INFO_KLASS;
2992 rgctx_info = MONO_RGCTX_INFO_VTABLE;
2993 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
2995 if (cfg->opt & MONO_OPT_SHARED) {
2996 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2998 alloc_ftn = mono_object_new;
3001 alloc_ftn = mono_object_new_specific;
3004 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3007 if (cfg->opt & MONO_OPT_SHARED) {
3008 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3009 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3011 alloc_ftn = mono_object_new;
3012 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3013 /* This happens often in argument checking code, eg. throw new FooException... */
3014 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3015 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3016 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3018 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3019 MonoMethod *managed_alloc = NULL;
3023 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3024 cfg->exception_ptr = klass;
3028 #ifndef MONO_CROSS_COMPILE
3029 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3032 if (managed_alloc) {
3033 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3034 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3036 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3038 guint32 lw = vtable->klass->instance_size;
3039 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3040 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3041 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3044 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3048 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3052 * Returns NULL and set the cfg exception on error.
3055 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3057 MonoInst *alloc, *ins;
3059 if (mono_class_is_nullable (klass)) {
3060 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3063 /* FIXME: What if the class is shared? We might not
3064 have to get the method address from the RGCTX. */
3065 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3066 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3067 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3069 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3071 return mono_emit_method_call (cfg, method, &val, NULL);
3075 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3079 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3084 // FIXME: This doesn't work yet (class libs tests fail?)
3085 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3088 * Returns NULL and set the cfg exception on error.
3091 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3093 MonoBasicBlock *is_null_bb;
3094 int obj_reg = src->dreg;
3095 int vtable_reg = alloc_preg (cfg);
3096 MonoInst *klass_inst = NULL;
3101 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3102 klass, MONO_RGCTX_INFO_KLASS);
3104 if (is_complex_isinst (klass)) {
3105 /* Complex case, handle by an icall */
3111 args [1] = klass_inst;
3113 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3115 /* Simple case, handled by the code below */
3119 NEW_BBLOCK (cfg, is_null_bb);
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3124 save_cast_details (cfg, klass, obj_reg);
3126 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3128 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3130 int klass_reg = alloc_preg (cfg);
3132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3134 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3135 /* the remoting code is broken, access the class for now */
3136 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3137 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3139 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3140 cfg->exception_ptr = klass;
3143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3148 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3151 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3155 MONO_START_BB (cfg, is_null_bb);
3157 reset_cast_details (cfg);
3163 * Returns NULL and set the cfg exception on error.
3166 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3169 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3170 int obj_reg = src->dreg;
3171 int vtable_reg = alloc_preg (cfg);
3172 int res_reg = alloc_preg (cfg);
3173 MonoInst *klass_inst = NULL;
3176 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3178 if (is_complex_isinst (klass)) {
3181 /* Complex case, handle by an icall */
3187 args [1] = klass_inst;
3189 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3191 /* Simple case, the code below can handle it */
3195 NEW_BBLOCK (cfg, is_null_bb);
3196 NEW_BBLOCK (cfg, false_bb);
3197 NEW_BBLOCK (cfg, end_bb);
3199 /* Do the assignment at the beginning, so the other assignment can be if converted */
3200 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3201 ins->type = STACK_OBJ;
3204 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3205 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3207 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3209 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3210 g_assert (!context_used);
3211 /* the is_null_bb target simply copies the input register to the output */
3212 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3214 int klass_reg = alloc_preg (cfg);
3217 int rank_reg = alloc_preg (cfg);
3218 int eclass_reg = alloc_preg (cfg);
3220 g_assert (!context_used);
3221 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3226 if (klass->cast_class == mono_defaults.object_class) {
3227 int parent_reg = alloc_preg (cfg);
3228 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3229 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3230 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3231 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3232 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3233 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3234 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3236 } else if (klass->cast_class == mono_defaults.enum_class) {
3237 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3238 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3239 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3240 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3242 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3243 /* Check that the object is a vector too */
3244 int bounds_reg = alloc_preg (cfg);
3245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3250 /* the is_null_bb target simply copies the input register to the output */
3251 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3253 } else if (mono_class_is_nullable (klass)) {
3254 g_assert (!context_used);
3255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3256 /* the is_null_bb target simply copies the input register to the output */
3257 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3259 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3260 g_assert (!context_used);
3261 /* the remoting code is broken, access the class for now */
3262 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3263 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3265 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3266 cfg->exception_ptr = klass;
3269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3272 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3274 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3275 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3277 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3278 /* the is_null_bb target simply copies the input register to the output */
3279 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3284 MONO_START_BB (cfg, false_bb);
3286 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3287 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3289 MONO_START_BB (cfg, is_null_bb);
3291 MONO_START_BB (cfg, end_bb);
3297 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3299 /* This opcode takes as input an object reference and a class, and returns:
3300 0) if the object is an instance of the class,
3301 1) if the object is not instance of the class,
3302 2) if the object is a proxy whose type cannot be determined */
3305 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3306 int obj_reg = src->dreg;
3307 int dreg = alloc_ireg (cfg);
3309 int klass_reg = alloc_preg (cfg);
3311 NEW_BBLOCK (cfg, true_bb);
3312 NEW_BBLOCK (cfg, false_bb);
3313 NEW_BBLOCK (cfg, false2_bb);
3314 NEW_BBLOCK (cfg, end_bb);
3315 NEW_BBLOCK (cfg, no_proxy_bb);
3317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3320 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3321 NEW_BBLOCK (cfg, interface_fail_bb);
3323 tmp_reg = alloc_preg (cfg);
3324 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3325 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3326 MONO_START_BB (cfg, interface_fail_bb);
3327 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3329 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3331 tmp_reg = alloc_preg (cfg);
3332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3336 tmp_reg = alloc_preg (cfg);
3337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3340 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3341 tmp_reg = alloc_preg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3345 tmp_reg = alloc_preg (cfg);
3346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3350 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3353 MONO_START_BB (cfg, no_proxy_bb);
3355 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3358 MONO_START_BB (cfg, false_bb);
3360 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3363 MONO_START_BB (cfg, false2_bb);
3365 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3368 MONO_START_BB (cfg, true_bb);
3370 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3372 MONO_START_BB (cfg, end_bb);
3375 MONO_INST_NEW (cfg, ins, OP_ICONST);
3377 ins->type = STACK_I4;
3383 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3385 /* This opcode takes as input an object reference and a class, and returns:
3386 0) if the object is an instance of the class,
3387 1) if the object is a proxy whose type cannot be determined
3388 an InvalidCastException exception is thrown otherwhise*/
3391 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3392 int obj_reg = src->dreg;
3393 int dreg = alloc_ireg (cfg);
3394 int tmp_reg = alloc_preg (cfg);
3395 int klass_reg = alloc_preg (cfg);
3397 NEW_BBLOCK (cfg, end_bb);
3398 NEW_BBLOCK (cfg, ok_result_bb);
3400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3401 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3403 save_cast_details (cfg, klass, obj_reg);
3405 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3406 NEW_BBLOCK (cfg, interface_fail_bb);
3408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3409 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3410 MONO_START_BB (cfg, interface_fail_bb);
3411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3413 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3415 tmp_reg = alloc_preg (cfg);
3416 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3418 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3420 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3421 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3424 NEW_BBLOCK (cfg, no_proxy_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3428 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3430 tmp_reg = alloc_preg (cfg);
3431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3434 tmp_reg = alloc_preg (cfg);
3435 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3437 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3439 NEW_BBLOCK (cfg, fail_1_bb);
3441 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3443 MONO_START_BB (cfg, fail_1_bb);
3445 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3448 MONO_START_BB (cfg, no_proxy_bb);
3450 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3453 MONO_START_BB (cfg, ok_result_bb);
3455 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3457 MONO_START_BB (cfg, end_bb);
3460 MONO_INST_NEW (cfg, ins, OP_ICONST);
3462 ins->type = STACK_I4;
3468 * Returns NULL and set the cfg exception on error.
3470 static G_GNUC_UNUSED MonoInst*
3471 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3473 gpointer *trampoline;
3474 MonoInst *obj, *method_ins, *tramp_ins;
3478 obj = handle_alloc (cfg, klass, FALSE, 0);
3482 /* Inline the contents of mono_delegate_ctor */
3484 /* Set target field */
3485 /* Optimize away setting of NULL target */
3486 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3487 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3489 /* Set method field */
3490 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3491 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3494 * To avoid looking up the compiled code belonging to the target method
3495 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3496 * store it, and we fill it after the method has been compiled.
3498 if (!cfg->compile_aot && !method->dynamic) {
3499 MonoInst *code_slot_ins;
3502 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3504 domain = mono_domain_get ();
3505 mono_domain_lock (domain);
3506 if (!domain_jit_info (domain)->method_code_hash)
3507 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3508 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3510 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3511 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3513 mono_domain_unlock (domain);
3515 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3517 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3520 /* Set invoke_impl field */
3521 if (cfg->compile_aot) {
3522 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3524 trampoline = mono_create_delegate_trampoline (klass);
3525 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3527 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3529 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3535 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3537 MonoJitICallInfo *info;
3539 /* Need to register the icall so it gets an icall wrapper */
3540 info = mono_get_array_new_va_icall (rank);
3542 cfg->flags |= MONO_CFG_HAS_VARARGS;
3544 /* mono_array_new_va () needs a vararg calling convention */
3545 cfg->disable_llvm = TRUE;
3547 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3548 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3552 mono_emit_load_got_addr (MonoCompile *cfg)
3554 MonoInst *getaddr, *dummy_use;
3556 if (!cfg->got_var || cfg->got_var_allocated)
3559 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3560 getaddr->dreg = cfg->got_var->dreg;
3562 /* Add it to the start of the first bblock */
3563 if (cfg->bb_entry->code) {
3564 getaddr->next = cfg->bb_entry->code;
3565 cfg->bb_entry->code = getaddr;
3568 MONO_ADD_INS (cfg->bb_entry, getaddr);
3570 cfg->got_var_allocated = TRUE;
3573 * Add a dummy use to keep the got_var alive, since real uses might
3574 * only be generated by the back ends.
3575 * Add it to end_bblock, so the variable's lifetime covers the whole
3577 * It would be better to make the usage of the got var explicit in all
3578 * cases when the backend needs it (i.e. calls, throw etc.), so this
3579 * wouldn't be needed.
3581 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3582 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3585 static int inline_limit;
3586 static gboolean inline_limit_inited;
3589 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3591 MonoMethodHeaderSummary header;
3593 #ifdef MONO_ARCH_SOFT_FLOAT
3594 MonoMethodSignature *sig = mono_method_signature (method);
3598 if (cfg->generic_sharing_context)
3601 if (cfg->inline_depth > 10)
3604 #ifdef MONO_ARCH_HAVE_LMF_OPS
3605 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3606 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3607 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3612 if (!mono_method_get_header_summary (method, &header))
3615 /*runtime, icall and pinvoke are checked by summary call*/
3616 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3617 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3618 (method->klass->marshalbyref) ||
3622 /* also consider num_locals? */
3623 /* Do the size check early to avoid creating vtables */
3624 if (!inline_limit_inited) {
3625 if (getenv ("MONO_INLINELIMIT"))
3626 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3628 inline_limit = INLINE_LENGTH_LIMIT;
3629 inline_limit_inited = TRUE;
3631 if (header.code_size >= inline_limit)
3635 * if we can initialize the class of the method right away, we do,
3636 * otherwise we don't allow inlining if the class needs initialization,
3637 * since it would mean inserting a call to mono_runtime_class_init()
3638 * inside the inlined code
3640 if (!(cfg->opt & MONO_OPT_SHARED)) {
3641 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3642 if (cfg->run_cctors && method->klass->has_cctor) {
3643 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3644 if (!method->klass->runtime_info)
3645 /* No vtable created yet */
3647 vtable = mono_class_vtable (cfg->domain, method->klass);
3650 /* This makes so that inline cannot trigger */
3651 /* .cctors: too many apps depend on them */
3652 /* running with a specific order... */
3653 if (! vtable->initialized)
3655 mono_runtime_class_init (vtable);
3657 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3658 if (!method->klass->runtime_info)
3659 /* No vtable created yet */
3661 vtable = mono_class_vtable (cfg->domain, method->klass);
3664 if (!vtable->initialized)
3669 * If we're compiling for shared code
3670 * the cctor will need to be run at aot method load time, for example,
3671 * or at the end of the compilation of the inlining method.
3673 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3678 * CAS - do not inline methods with declarative security
3679 * Note: this has to be before any possible return TRUE;
3681 if (mono_method_has_declsec (method))
3684 #ifdef MONO_ARCH_SOFT_FLOAT
3686 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3688 for (i = 0; i < sig->param_count; ++i)
3689 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3697 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3699 if (vtable->initialized && !cfg->compile_aot)
3702 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3705 if (!mono_class_needs_cctor_run (vtable->klass, method))
3708 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3709 /* The initialization is already done before the method is called */
3716 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3720 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3722 mono_class_init (klass);
3723 size = mono_class_array_element_size (klass);
3725 mult_reg = alloc_preg (cfg);
3726 array_reg = arr->dreg;
3727 index_reg = index->dreg;
3729 #if SIZEOF_REGISTER == 8
3730 /* The array reg is 64 bits but the index reg is only 32 */
3731 if (COMPILE_LLVM (cfg)) {
3733 index2_reg = index_reg;
3735 index2_reg = alloc_preg (cfg);
3736 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3739 if (index->type == STACK_I8) {
3740 index2_reg = alloc_preg (cfg);
3741 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3743 index2_reg = index_reg;
3748 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3750 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3751 if (size == 1 || size == 2 || size == 4 || size == 8) {
3752 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3754 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3755 ins->type = STACK_PTR;
3761 add_reg = alloc_preg (cfg);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3764 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3765 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3766 ins->type = STACK_PTR;
3767 MONO_ADD_INS (cfg->cbb, ins);
3772 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3774 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3776 int bounds_reg = alloc_preg (cfg);
3777 int add_reg = alloc_preg (cfg);
3778 int mult_reg = alloc_preg (cfg);
3779 int mult2_reg = alloc_preg (cfg);
3780 int low1_reg = alloc_preg (cfg);
3781 int low2_reg = alloc_preg (cfg);
3782 int high1_reg = alloc_preg (cfg);
3783 int high2_reg = alloc_preg (cfg);
3784 int realidx1_reg = alloc_preg (cfg);
3785 int realidx2_reg = alloc_preg (cfg);
3786 int sum_reg = alloc_preg (cfg);
3791 mono_class_init (klass);
3792 size = mono_class_array_element_size (klass);
3794 index1 = index_ins1->dreg;
3795 index2 = index_ins2->dreg;
3797 /* range checking */
3798 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3799 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3802 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3803 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3804 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3805 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3806 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3807 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3809 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3810 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3811 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3813 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3814 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3815 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3817 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3818 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3819 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3820 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3821 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3823 ins->type = STACK_MP;
3825 MONO_ADD_INS (cfg->cbb, ins);
3832 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3836 MonoMethod *addr_method;
3839 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3842 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3844 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3845 /* emit_ldelema_2 depends on OP_LMUL */
3846 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3847 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3851 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3852 addr_method = mono_marshal_get_array_address (rank, element_size);
3853 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3858 static MonoBreakPolicy
3859 always_insert_breakpoint (MonoMethod *method)
3861 return MONO_BREAK_POLICY_ALWAYS;
3864 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3867 * mono_set_break_policy:
3868 * policy_callback: the new callback function
3870 * Allow embedders to decide wherther to actually obey breakpoint instructions
3871 * (both break IL instructions and Debugger.Break () method calls), for example
3872 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3873 * untrusted or semi-trusted code.
3875 * @policy_callback will be called every time a break point instruction needs to
3876 * be inserted with the method argument being the method that calls Debugger.Break()
3877 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3878 * if it wants the breakpoint to not be effective in the given method.
3879 * #MONO_BREAK_POLICY_ALWAYS is the default.
3882 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3884 if (policy_callback)
3885 break_policy_func = policy_callback;
3887 break_policy_func = always_insert_breakpoint;
3891 should_insert_brekpoint (MonoMethod *method) {
3892 switch (break_policy_func (method)) {
3893 case MONO_BREAK_POLICY_ALWAYS:
3895 case MONO_BREAK_POLICY_NEVER:
3897 case MONO_BREAK_POLICY_ON_DBG:
3898 return mono_debug_using_mono_debugger ();
3900 g_warning ("Incorrect value returned from break policy callback");
3905 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
3907 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
3909 MonoInst *addr, *store, *load;
3910 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
3912 /* the bounds check is already done by the callers */
3913 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
3915 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
3916 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
3918 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3919 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3925 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3927 MonoInst *ins = NULL;
3929 static MonoClass *runtime_helpers_class = NULL;
3930 if (! runtime_helpers_class)
3931 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3932 "System.Runtime.CompilerServices", "RuntimeHelpers");
3934 if (cmethod->klass == mono_defaults.string_class) {
3935 if (strcmp (cmethod->name, "get_Chars") == 0) {
3936 int dreg = alloc_ireg (cfg);
3937 int index_reg = alloc_preg (cfg);
3938 int mult_reg = alloc_preg (cfg);
3939 int add_reg = alloc_preg (cfg);
3941 #if SIZEOF_REGISTER == 8
3942 /* The array reg is 64 bits but the index reg is only 32 */
3943 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3945 index_reg = args [1]->dreg;
3947 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3949 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3950 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3951 add_reg = ins->dreg;
3952 /* Avoid a warning */
3954 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3957 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3958 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3960 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3962 type_from_op (ins, NULL, NULL);
3964 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3965 int dreg = alloc_ireg (cfg);
3966 /* Decompose later to allow more optimizations */
3967 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3968 ins->type = STACK_I4;
3969 cfg->cbb->has_array_access = TRUE;
3970 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3973 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3974 int mult_reg = alloc_preg (cfg);
3975 int add_reg = alloc_preg (cfg);
3977 /* The corlib functions check for oob already. */
3978 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3979 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3980 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3981 return cfg->cbb->last_ins;
3984 } else if (cmethod->klass == mono_defaults.object_class) {
3986 if (strcmp (cmethod->name, "GetType") == 0) {
3987 int dreg = alloc_preg (cfg);
3988 int vt_reg = alloc_preg (cfg);
3989 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3990 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3991 type_from_op (ins, NULL, NULL);
3994 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3995 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3996 int dreg = alloc_ireg (cfg);
3997 int t1 = alloc_ireg (cfg);
3999 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4000 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4001 ins->type = STACK_I4;
4005 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4006 MONO_INST_NEW (cfg, ins, OP_NOP);
4007 MONO_ADD_INS (cfg->cbb, ins);
4011 } else if (cmethod->klass == mono_defaults.array_class) {
4012 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4013 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4014 if (cmethod->name [0] != 'g')
4017 if (strcmp (cmethod->name, "get_Rank") == 0) {
4018 int dreg = alloc_ireg (cfg);
4019 int vtable_reg = alloc_preg (cfg);
4020 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4021 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4022 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4023 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4024 type_from_op (ins, NULL, NULL);
4027 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4028 int dreg = alloc_ireg (cfg);
4030 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4031 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4032 type_from_op (ins, NULL, NULL);
4037 } else if (cmethod->klass == runtime_helpers_class) {
4039 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4040 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4044 } else if (cmethod->klass == mono_defaults.thread_class) {
4045 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4046 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4047 MONO_ADD_INS (cfg->cbb, ins);
4049 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4050 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4051 MONO_ADD_INS (cfg->cbb, ins);
4054 } else if (cmethod->klass == mono_defaults.monitor_class) {
4055 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4056 if (strcmp (cmethod->name, "Enter") == 0) {
4059 if (COMPILE_LLVM (cfg)) {
4061 * Pass the argument normally, the LLVM backend will handle the
4062 * calling convention problems.
4064 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4066 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4067 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4068 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4069 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4072 return (MonoInst*)call;
4073 } else if (strcmp (cmethod->name, "Exit") == 0) {
4076 if (COMPILE_LLVM (cfg)) {
4077 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4079 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4080 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4081 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4082 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4085 return (MonoInst*)call;
4087 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4088 MonoMethod *fast_method = NULL;
4090 /* Avoid infinite recursion */
4091 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4092 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4093 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4096 if (strcmp (cmethod->name, "Enter") == 0 ||
4097 strcmp (cmethod->name, "Exit") == 0)
4098 fast_method = mono_monitor_get_fast_path (cmethod);
4102 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4104 } else if (cmethod->klass->image == mono_defaults.corlib &&
4105 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4106 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4109 #if SIZEOF_REGISTER == 8
4110 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4111 /* 64 bit reads are already atomic */
4112 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4113 ins->dreg = mono_alloc_preg (cfg);
4114 ins->inst_basereg = args [0]->dreg;
4115 ins->inst_offset = 0;
4116 MONO_ADD_INS (cfg->cbb, ins);
4120 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4121 if (strcmp (cmethod->name, "Increment") == 0) {
4122 MonoInst *ins_iconst;
4125 if (fsig->params [0]->type == MONO_TYPE_I4)
4126 opcode = OP_ATOMIC_ADD_NEW_I4;
4127 #if SIZEOF_REGISTER == 8
4128 else if (fsig->params [0]->type == MONO_TYPE_I8)
4129 opcode = OP_ATOMIC_ADD_NEW_I8;
4132 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4133 ins_iconst->inst_c0 = 1;
4134 ins_iconst->dreg = mono_alloc_ireg (cfg);
4135 MONO_ADD_INS (cfg->cbb, ins_iconst);
4137 MONO_INST_NEW (cfg, ins, opcode);
4138 ins->dreg = mono_alloc_ireg (cfg);
4139 ins->inst_basereg = args [0]->dreg;
4140 ins->inst_offset = 0;
4141 ins->sreg2 = ins_iconst->dreg;
4142 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4143 MONO_ADD_INS (cfg->cbb, ins);
4145 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4146 MonoInst *ins_iconst;
4149 if (fsig->params [0]->type == MONO_TYPE_I4)
4150 opcode = OP_ATOMIC_ADD_NEW_I4;
4151 #if SIZEOF_REGISTER == 8
4152 else if (fsig->params [0]->type == MONO_TYPE_I8)
4153 opcode = OP_ATOMIC_ADD_NEW_I8;
4156 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4157 ins_iconst->inst_c0 = -1;
4158 ins_iconst->dreg = mono_alloc_ireg (cfg);
4159 MONO_ADD_INS (cfg->cbb, ins_iconst);
4161 MONO_INST_NEW (cfg, ins, opcode);
4162 ins->dreg = mono_alloc_ireg (cfg);
4163 ins->inst_basereg = args [0]->dreg;
4164 ins->inst_offset = 0;
4165 ins->sreg2 = ins_iconst->dreg;
4166 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4167 MONO_ADD_INS (cfg->cbb, ins);
4169 } else if (strcmp (cmethod->name, "Add") == 0) {
4172 if (fsig->params [0]->type == MONO_TYPE_I4)
4173 opcode = OP_ATOMIC_ADD_NEW_I4;
4174 #if SIZEOF_REGISTER == 8
4175 else if (fsig->params [0]->type == MONO_TYPE_I8)
4176 opcode = OP_ATOMIC_ADD_NEW_I8;
4180 MONO_INST_NEW (cfg, ins, opcode);
4181 ins->dreg = mono_alloc_ireg (cfg);
4182 ins->inst_basereg = args [0]->dreg;
4183 ins->inst_offset = 0;
4184 ins->sreg2 = args [1]->dreg;
4185 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4186 MONO_ADD_INS (cfg->cbb, ins);
4189 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4191 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4192 if (strcmp (cmethod->name, "Exchange") == 0) {
4194 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4196 if (fsig->params [0]->type == MONO_TYPE_I4)
4197 opcode = OP_ATOMIC_EXCHANGE_I4;
4198 #if SIZEOF_REGISTER == 8
4199 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4200 (fsig->params [0]->type == MONO_TYPE_I))
4201 opcode = OP_ATOMIC_EXCHANGE_I8;
4203 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4204 opcode = OP_ATOMIC_EXCHANGE_I4;
4209 MONO_INST_NEW (cfg, ins, opcode);
4210 ins->dreg = mono_alloc_ireg (cfg);
4211 ins->inst_basereg = args [0]->dreg;
4212 ins->inst_offset = 0;
4213 ins->sreg2 = args [1]->dreg;
4214 MONO_ADD_INS (cfg->cbb, ins);
4216 switch (fsig->params [0]->type) {
4218 ins->type = STACK_I4;
4222 ins->type = STACK_I8;
4224 case MONO_TYPE_OBJECT:
4225 ins->type = STACK_OBJ;
4228 g_assert_not_reached ();
4231 #if HAVE_WRITE_BARRIERS
4233 MonoInst *dummy_use;
4234 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4235 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4236 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4240 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4242 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4243 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4245 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4246 if (fsig->params [1]->type == MONO_TYPE_I4)
4248 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4249 size = sizeof (gpointer);
4250 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4253 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4254 ins->dreg = alloc_ireg (cfg);
4255 ins->sreg1 = args [0]->dreg;
4256 ins->sreg2 = args [1]->dreg;
4257 ins->sreg3 = args [2]->dreg;
4258 ins->type = STACK_I4;
4259 MONO_ADD_INS (cfg->cbb, ins);
4260 } else if (size == 8) {
4261 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4262 ins->dreg = alloc_ireg (cfg);
4263 ins->sreg1 = args [0]->dreg;
4264 ins->sreg2 = args [1]->dreg;
4265 ins->sreg3 = args [2]->dreg;
4266 ins->type = STACK_I8;
4267 MONO_ADD_INS (cfg->cbb, ins);
4269 /* g_assert_not_reached (); */
4271 #if HAVE_WRITE_BARRIERS
4273 MonoInst *dummy_use;
4274 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4275 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4276 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4280 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4284 } else if (cmethod->klass->image == mono_defaults.corlib) {
4285 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4286 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4287 if (should_insert_brekpoint (cfg->method))
4288 MONO_INST_NEW (cfg, ins, OP_BREAK);
4290 MONO_INST_NEW (cfg, ins, OP_NOP);
4291 MONO_ADD_INS (cfg->cbb, ins);
4294 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4295 && strcmp (cmethod->klass->name, "Environment") == 0) {
4297 EMIT_NEW_ICONST (cfg, ins, 1);
4299 EMIT_NEW_ICONST (cfg, ins, 0);
4303 } else if (cmethod->klass == mono_defaults.math_class) {
4305 * There is general branches code for Min/Max, but it does not work for
4307 * http://everything2.com/?node_id=1051618
4311 #ifdef MONO_ARCH_SIMD_INTRINSICS
4312 if (cfg->opt & MONO_OPT_SIMD) {
4313 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4319 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4323 * This entry point could be used later for arbitrary method
4326 inline static MonoInst*
4327 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4328 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4330 if (method->klass == mono_defaults.string_class) {
4331 /* managed string allocation support */
4332 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4333 MonoInst *iargs [2];
4334 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4335 MonoMethod *managed_alloc = NULL;
4337 g_assert (vtable); /*Should not fail since it System.String*/
4338 #ifndef MONO_CROSS_COMPILE
4339 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4343 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4344 iargs [1] = args [0];
4345 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4352 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4354 MonoInst *store, *temp;
4357 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4358 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4361 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4362 * would be different than the MonoInst's used to represent arguments, and
4363 * the ldelema implementation can't deal with that.
4364 * Solution: When ldelema is used on an inline argument, create a var for
4365 * it, emit ldelema on that var, and emit the saving code below in
4366 * inline_method () if needed.
4368 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4369 cfg->args [i] = temp;
4370 /* This uses cfg->args [i] which is set by the preceeding line */
4371 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4372 store->cil_code = sp [0]->cil_code;
4377 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4378 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4380 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4382 check_inline_called_method_name_limit (MonoMethod *called_method)
4385 static char *limit = NULL;
4387 if (limit == NULL) {
4388 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4390 if (limit_string != NULL)
4391 limit = limit_string;
4393 limit = (char *) "";
4396 if (limit [0] != '\0') {
4397 char *called_method_name = mono_method_full_name (called_method, TRUE);
4399 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4400 g_free (called_method_name);
4402 //return (strncmp_result <= 0);
4403 return (strncmp_result == 0);
4410 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4412 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4415 static char *limit = NULL;
4417 if (limit == NULL) {
4418 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4419 if (limit_string != NULL) {
4420 limit = limit_string;
4422 limit = (char *) "";
4426 if (limit [0] != '\0') {
4427 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4429 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4430 g_free (caller_method_name);
4432 //return (strncmp_result <= 0);
4433 return (strncmp_result == 0);
4441 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4442 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4444 MonoInst *ins, *rvar = NULL;
4445 MonoMethodHeader *cheader;
4446 MonoBasicBlock *ebblock, *sbblock;
4448 MonoMethod *prev_inlined_method;
4449 MonoInst **prev_locals, **prev_args;
4450 MonoType **prev_arg_types;
4451 guint prev_real_offset;
4452 GHashTable *prev_cbb_hash;
4453 MonoBasicBlock **prev_cil_offset_to_bb;
4454 MonoBasicBlock *prev_cbb;
4455 unsigned char* prev_cil_start;
4456 guint32 prev_cil_offset_to_bb_len;
4457 MonoMethod *prev_current_method;
4458 MonoGenericContext *prev_generic_context;
4459 gboolean ret_var_set, prev_ret_var_set;
4461 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4463 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4464 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4467 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4468 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4472 if (cfg->verbose_level > 2)
4473 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4475 if (!cmethod->inline_info) {
4476 mono_jit_stats.inlineable_methods++;
4477 cmethod->inline_info = 1;
4480 /* allocate local variables */
4481 cheader = mono_method_get_header (cmethod);
4483 if (cheader == NULL || mono_loader_get_last_error ()) {
4485 mono_metadata_free_mh (cheader);
4486 mono_loader_clear_error ();
4490 /* allocate space to store the return value */
4491 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4492 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4496 prev_locals = cfg->locals;
4497 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4498 for (i = 0; i < cheader->num_locals; ++i)
4499 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4501 /* allocate start and end blocks */
4502 /* This is needed so if the inline is aborted, we can clean up */
4503 NEW_BBLOCK (cfg, sbblock);
4504 sbblock->real_offset = real_offset;
4506 NEW_BBLOCK (cfg, ebblock);
4507 ebblock->block_num = cfg->num_bblocks++;
4508 ebblock->real_offset = real_offset;
4510 prev_args = cfg->args;
4511 prev_arg_types = cfg->arg_types;
4512 prev_inlined_method = cfg->inlined_method;
4513 cfg->inlined_method = cmethod;
4514 cfg->ret_var_set = FALSE;
4515 cfg->inline_depth ++;
4516 prev_real_offset = cfg->real_offset;
4517 prev_cbb_hash = cfg->cbb_hash;
4518 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4519 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4520 prev_cil_start = cfg->cil_start;
4521 prev_cbb = cfg->cbb;
4522 prev_current_method = cfg->current_method;
4523 prev_generic_context = cfg->generic_context;
4524 prev_ret_var_set = cfg->ret_var_set;
4526 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4528 ret_var_set = cfg->ret_var_set;
4530 cfg->inlined_method = prev_inlined_method;
4531 cfg->real_offset = prev_real_offset;
4532 cfg->cbb_hash = prev_cbb_hash;
4533 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4534 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4535 cfg->cil_start = prev_cil_start;
4536 cfg->locals = prev_locals;
4537 cfg->args = prev_args;
4538 cfg->arg_types = prev_arg_types;
4539 cfg->current_method = prev_current_method;
4540 cfg->generic_context = prev_generic_context;
4541 cfg->ret_var_set = prev_ret_var_set;
4542 cfg->inline_depth --;
4544 if ((costs >= 0 && costs < 60) || inline_allways) {
4545 if (cfg->verbose_level > 2)
4546 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4548 mono_jit_stats.inlined_methods++;
4550 /* always add some code to avoid block split failures */
4551 MONO_INST_NEW (cfg, ins, OP_NOP);
4552 MONO_ADD_INS (prev_cbb, ins);
4554 prev_cbb->next_bb = sbblock;
4555 link_bblock (cfg, prev_cbb, sbblock);
4558 * Get rid of the begin and end bblocks if possible to aid local
4561 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4563 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4564 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4566 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4567 MonoBasicBlock *prev = ebblock->in_bb [0];
4568 mono_merge_basic_blocks (cfg, prev, ebblock);
4570 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4571 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4572 cfg->cbb = prev_cbb;
4580 * If the inlined method contains only a throw, then the ret var is not
4581 * set, so set it to a dummy value.
4584 static double r8_0 = 0.0;
4586 switch (rvar->type) {
4588 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4591 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4596 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4599 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4600 ins->type = STACK_R8;
4601 ins->inst_p0 = (void*)&r8_0;
4602 ins->dreg = rvar->dreg;
4603 MONO_ADD_INS (cfg->cbb, ins);
4606 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4609 g_assert_not_reached ();
4613 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4616 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4619 if (cfg->verbose_level > 2)
4620 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4621 cfg->exception_type = MONO_EXCEPTION_NONE;
4622 mono_loader_clear_error ();
4624 /* This gets rid of the newly added bblocks */
4625 cfg->cbb = prev_cbb;
4627 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4632 * Some of these comments may well be out-of-date.
4633 * Design decisions: we do a single pass over the IL code (and we do bblock
4634 * splitting/merging in the few cases when it's required: a back jump to an IL
4635 * address that was not already seen as bblock starting point).
4636 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4637 * Complex operations are decomposed in simpler ones right away. We need to let the
4638 * arch-specific code peek and poke inside this process somehow (except when the
4639 * optimizations can take advantage of the full semantic info of coarse opcodes).
4640 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4641 * MonoInst->opcode initially is the IL opcode or some simplification of that
4642 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4643 * opcode with value bigger than OP_LAST.
4644 * At this point the IR can be handed over to an interpreter, a dumb code generator
4645 * or to the optimizing code generator that will translate it to SSA form.
4647 * Profiling directed optimizations.
4648 * We may compile by default with few or no optimizations and instrument the code
4649 * or the user may indicate what methods to optimize the most either in a config file
4650 * or through repeated runs where the compiler applies offline the optimizations to
4651 * each method and then decides if it was worth it.
4654 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4655 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4656 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4657 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4658 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4659 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4660 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4661 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4663 /* offset from br.s -> br like opcodes */
4664 #define BIG_BRANCH_OFFSET 13
4667 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4669 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4671 return b == NULL || b == bb;
4675 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4677 unsigned char *ip = start;
4678 unsigned char *target;
4681 MonoBasicBlock *bblock;
4682 const MonoOpcode *opcode;
4685 cli_addr = ip - start;
4686 i = mono_opcode_value ((const guint8 **)&ip, end);
4689 opcode = &mono_opcodes [i];
4690 switch (opcode->argument) {
4691 case MonoInlineNone:
4694 case MonoInlineString:
4695 case MonoInlineType:
4696 case MonoInlineField:
4697 case MonoInlineMethod:
4700 case MonoShortInlineR:
4707 case MonoShortInlineVar:
4708 case MonoShortInlineI:
4711 case MonoShortInlineBrTarget:
4712 target = start + cli_addr + 2 + (signed char)ip [1];
4713 GET_BBLOCK (cfg, bblock, target);
4716 GET_BBLOCK (cfg, bblock, ip);
4718 case MonoInlineBrTarget:
4719 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4720 GET_BBLOCK (cfg, bblock, target);
4723 GET_BBLOCK (cfg, bblock, ip);
4725 case MonoInlineSwitch: {
4726 guint32 n = read32 (ip + 1);
4729 cli_addr += 5 + 4 * n;
4730 target = start + cli_addr;
4731 GET_BBLOCK (cfg, bblock, target);
4733 for (j = 0; j < n; ++j) {
4734 target = start + cli_addr + (gint32)read32 (ip);
4735 GET_BBLOCK (cfg, bblock, target);
4745 g_assert_not_reached ();
4748 if (i == CEE_THROW) {
4749 unsigned char *bb_start = ip - 1;
4751 /* Find the start of the bblock containing the throw */
4753 while ((bb_start >= start) && !bblock) {
4754 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4758 bblock->out_of_line = 1;
4767 static inline MonoMethod *
4768 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4772 if (m->wrapper_type != MONO_WRAPPER_NONE)
4773 return mono_method_get_wrapper_data (m, token);
4775 method = mono_get_method_full (m->klass->image, token, klass, context);
4780 static inline MonoMethod *
4781 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4783 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4785 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4791 static inline MonoClass*
4792 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4796 if (method->wrapper_type != MONO_WRAPPER_NONE)
4797 klass = mono_method_get_wrapper_data (method, token);
4799 klass = mono_class_get_full (method->klass->image, token, context);
4801 mono_class_init (klass);
4806 * Returns TRUE if the JIT should abort inlining because "callee"
4807 * is influenced by security attributes.
4810 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4814 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4818 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4819 if (result == MONO_JIT_SECURITY_OK)
4822 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4823 /* Generate code to throw a SecurityException before the actual call/link */
4824 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4827 NEW_ICONST (cfg, args [0], 4);
4828 NEW_METHODCONST (cfg, args [1], caller);
4829 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4830 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4831 /* don't hide previous results */
4832 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4833 cfg->exception_data = result;
4841 throw_exception (void)
4843 static MonoMethod *method = NULL;
4846 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4847 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4854 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4856 MonoMethod *thrower = throw_exception ();
4859 EMIT_NEW_PCONST (cfg, args [0], ex);
4860 mono_emit_method_call (cfg, thrower, args, NULL);
4864 * Return the original method is a wrapper is specified. We can only access
4865 * the custom attributes from the original method.
4868 get_original_method (MonoMethod *method)
4870 if (method->wrapper_type == MONO_WRAPPER_NONE)
4873 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4874 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4877 /* in other cases we need to find the original method */
4878 return mono_marshal_method_from_wrapper (method);
4882 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4883 MonoBasicBlock *bblock, unsigned char *ip)
4885 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4886 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4888 emit_throw_exception (cfg, ex);
4892 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4893 MonoBasicBlock *bblock, unsigned char *ip)
4895 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4896 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4898 emit_throw_exception (cfg, ex);
4902 * Check that the IL instructions at ip are the array initialization
4903 * sequence and return the pointer to the data and the size.
4906 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4909 * newarr[System.Int32]
4911 * ldtoken field valuetype ...
4912 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4914 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4915 guint32 token = read32 (ip + 7);
4916 guint32 field_token = read32 (ip + 2);
4917 guint32 field_index = field_token & 0xffffff;
4919 const char *data_ptr;
4921 MonoMethod *cmethod;
4922 MonoClass *dummy_class;
4923 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4929 *out_field_token = field_token;
4931 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4934 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4936 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4937 case MONO_TYPE_BOOLEAN:
4941 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4942 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4943 case MONO_TYPE_CHAR:
4953 return NULL; /* stupid ARM FP swapped format */
4963 if (size > mono_type_size (field->type, &dummy_align))
4966 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4967 if (!method->klass->image->dynamic) {
4968 field_index = read32 (ip + 2) & 0xffffff;
4969 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4970 data_ptr = mono_image_rva_map (method->klass->image, rva);
4971 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4972 /* for aot code we do the lookup on load */
4973 if (aot && data_ptr)
4974 return GUINT_TO_POINTER (rva);
4976 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4978 data_ptr = mono_field_get_data (field);
4986 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4988 char *method_fname = mono_method_full_name (method, TRUE);
4990 MonoMethodHeader *header = mono_method_get_header (method);
4992 if (header->code_size == 0)
4993 method_code = g_strdup ("method body is empty.");
4995 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4996 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4997 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4998 g_free (method_fname);
4999 g_free (method_code);
5000 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5004 set_exception_object (MonoCompile *cfg, MonoException *exception)
5006 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5007 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5008 cfg->exception_ptr = exception;
5012 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5016 if (cfg->generic_sharing_context)
5017 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5019 type = &klass->byval_arg;
5020 return MONO_TYPE_IS_REFERENCE (type);
5024 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5027 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5028 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5029 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5030 /* Optimize reg-reg moves away */
5032 * Can't optimize other opcodes, since sp[0] might point to
5033 * the last ins of a decomposed opcode.
5035 sp [0]->dreg = (cfg)->locals [n]->dreg;
5037 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5042 * ldloca inhibits many optimizations so try to get rid of it in common
5045 static inline unsigned char *
5046 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5055 local = read16 (ip + 2);
5059 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5060 gboolean skip = FALSE;
5062 /* From the INITOBJ case */
5063 token = read32 (ip + 2);
5064 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5065 CHECK_TYPELOAD (klass);
5066 if (generic_class_is_reference_type (cfg, klass)) {
5067 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5068 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5069 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5070 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5071 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5084 is_exception_class (MonoClass *class)
5087 if (class == mono_defaults.exception_class)
5089 class = class->parent;
5095 * mono_method_to_ir:
5097 * Translate the .net IL into linear IR.
5100 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5101 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5102 guint inline_offset, gboolean is_virtual_call)
5105 MonoInst *ins, **sp, **stack_start;
5106 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5107 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5108 MonoMethod *cmethod, *method_definition;
5109 MonoInst **arg_array;
5110 MonoMethodHeader *header;
5112 guint32 token, ins_flag;
5114 MonoClass *constrained_call = NULL;
5115 unsigned char *ip, *end, *target, *err_pos;
5116 static double r8_0 = 0.0;
5117 MonoMethodSignature *sig;
5118 MonoGenericContext *generic_context = NULL;
5119 MonoGenericContainer *generic_container = NULL;
5120 MonoType **param_types;
5121 int i, n, start_new_bblock, dreg;
5122 int num_calls = 0, inline_costs = 0;
5123 int breakpoint_id = 0;
5125 MonoBoolean security, pinvoke;
5126 MonoSecurityManager* secman = NULL;
5127 MonoDeclSecurityActions actions;
5128 GSList *class_inits = NULL;
5129 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5131 gboolean init_locals, seq_points, skip_dead_blocks;
5133 /* serialization and xdomain stuff may need access to private fields and methods */
5134 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5135 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5136 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5137 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5138 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5139 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5141 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5143 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5144 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5145 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5146 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5148 image = method->klass->image;
5149 header = mono_method_get_header (method);
5150 generic_container = mono_method_get_generic_container (method);
5151 sig = mono_method_signature (method);
5152 num_args = sig->hasthis + sig->param_count;
5153 ip = (unsigned char*)header->code;
5154 cfg->cil_start = ip;
5155 end = ip + header->code_size;
5156 mono_jit_stats.cil_code_size += header->code_size;
5157 init_locals = header->init_locals;
5159 seq_points = cfg->gen_seq_points && cfg->method == method;
5162 * Methods without init_locals set could cause asserts in various passes
5167 method_definition = method;
5168 while (method_definition->is_inflated) {
5169 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5170 method_definition = imethod->declaring;
5173 /* SkipVerification is not allowed if core-clr is enabled */
5174 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5176 dont_verify_stloc = TRUE;
5179 if (!dont_verify && mini_method_verify (cfg, method_definition))
5180 goto exception_exit;
5182 if (mono_debug_using_mono_debugger ())
5183 cfg->keep_cil_nops = TRUE;
5185 if (sig->is_inflated)
5186 generic_context = mono_method_get_context (method);
5187 else if (generic_container)
5188 generic_context = &generic_container->context;
5189 cfg->generic_context = generic_context;
5191 if (!cfg->generic_sharing_context)
5192 g_assert (!sig->has_type_parameters);
5194 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5195 g_assert (method->is_inflated);
5196 g_assert (mono_method_get_context (method)->method_inst);
5198 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5199 g_assert (sig->generic_param_count);
5201 if (cfg->method == method) {
5202 cfg->real_offset = 0;
5204 cfg->real_offset = inline_offset;
5207 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5208 cfg->cil_offset_to_bb_len = header->code_size;
5210 cfg->current_method = method;
5212 if (cfg->verbose_level > 2)
5213 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5215 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5217 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5218 for (n = 0; n < sig->param_count; ++n)
5219 param_types [n + sig->hasthis] = sig->params [n];
5220 cfg->arg_types = param_types;
5222 dont_inline = g_list_prepend (dont_inline, method);
5223 if (cfg->method == method) {
5225 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5226 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5229 NEW_BBLOCK (cfg, start_bblock);
5230 cfg->bb_entry = start_bblock;
5231 start_bblock->cil_code = NULL;
5232 start_bblock->cil_length = 0;
5235 NEW_BBLOCK (cfg, end_bblock);
5236 cfg->bb_exit = end_bblock;
5237 end_bblock->cil_code = NULL;
5238 end_bblock->cil_length = 0;
5239 g_assert (cfg->num_bblocks == 2);
5241 arg_array = cfg->args;
5243 if (header->num_clauses) {
5244 cfg->spvars = g_hash_table_new (NULL, NULL);
5245 cfg->exvars = g_hash_table_new (NULL, NULL);
5247 /* handle exception clauses */
5248 for (i = 0; i < header->num_clauses; ++i) {
5249 MonoBasicBlock *try_bb;
5250 MonoExceptionClause *clause = &header->clauses [i];
5251 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5252 try_bb->real_offset = clause->try_offset;
5253 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5254 tblock->real_offset = clause->handler_offset;
5255 tblock->flags |= BB_EXCEPTION_HANDLER;
5257 link_bblock (cfg, try_bb, tblock);
5259 if (*(ip + clause->handler_offset) == CEE_POP)
5260 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5262 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5263 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5264 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5265 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5266 MONO_ADD_INS (tblock, ins);
5268 /* todo: is a fault block unsafe to optimize? */
5269 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5270 tblock->flags |= BB_EXCEPTION_UNSAFE;
5274 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5276 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5278 /* catch and filter blocks get the exception object on the stack */
5279 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5280 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5281 MonoInst *dummy_use;
5283 /* mostly like handle_stack_args (), but just sets the input args */
5284 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5285 tblock->in_scount = 1;
5286 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5287 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5290 * Add a dummy use for the exvar so its liveness info will be
5294 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5296 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5297 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5298 tblock->flags |= BB_EXCEPTION_HANDLER;
5299 tblock->real_offset = clause->data.filter_offset;
5300 tblock->in_scount = 1;
5301 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5302 /* The filter block shares the exvar with the handler block */
5303 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5304 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5305 MONO_ADD_INS (tblock, ins);
5309 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5310 clause->data.catch_class &&
5311 cfg->generic_sharing_context &&
5312 mono_class_check_context_used (clause->data.catch_class)) {
5314 * In shared generic code with catch
5315 * clauses containing type variables
5316 * the exception handling code has to
5317 * be able to get to the rgctx.
5318 * Therefore we have to make sure that
5319 * the vtable/mrgctx argument (for
5320 * static or generic methods) or the
5321 * "this" argument (for non-static
5322 * methods) are live.
5324 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5325 mini_method_get_context (method)->method_inst ||
5326 method->klass->valuetype) {
5327 mono_get_vtable_var (cfg);
5329 MonoInst *dummy_use;
5331 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5336 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5337 cfg->cbb = start_bblock;
5338 cfg->args = arg_array;
5339 mono_save_args (cfg, sig, inline_args);
5342 /* FIRST CODE BLOCK */
5343 NEW_BBLOCK (cfg, bblock);
5344 bblock->cil_code = ip;
5348 ADD_BBLOCK (cfg, bblock);
5350 if (cfg->method == method) {
5351 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5352 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5353 MONO_INST_NEW (cfg, ins, OP_BREAK);
5354 MONO_ADD_INS (bblock, ins);
5358 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5359 secman = mono_security_manager_get_methods ();
5361 security = (secman && mono_method_has_declsec (method));
5362 /* at this point having security doesn't mean we have any code to generate */
5363 if (security && (cfg->method == method)) {
5364 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5365 * And we do not want to enter the next section (with allocation) if we
5366 * have nothing to generate */
5367 security = mono_declsec_get_demands (method, &actions);
5370 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5371 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5373 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5374 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5375 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5377 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5378 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5382 mono_custom_attrs_free (custom);
5385 custom = mono_custom_attrs_from_class (wrapped->klass);
5386 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5390 mono_custom_attrs_free (custom);
5393 /* not a P/Invoke after all */
5398 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5399 /* we use a separate basic block for the initialization code */
5400 NEW_BBLOCK (cfg, init_localsbb);
5401 cfg->bb_init = init_localsbb;
5402 init_localsbb->real_offset = cfg->real_offset;
5403 start_bblock->next_bb = init_localsbb;
5404 init_localsbb->next_bb = bblock;
5405 link_bblock (cfg, start_bblock, init_localsbb);
5406 link_bblock (cfg, init_localsbb, bblock);
5408 cfg->cbb = init_localsbb;
5410 start_bblock->next_bb = bblock;
5411 link_bblock (cfg, start_bblock, bblock);
5414 /* at this point we know, if security is TRUE, that some code needs to be generated */
5415 if (security && (cfg->method == method)) {
5418 mono_jit_stats.cas_demand_generation++;
5420 if (actions.demand.blob) {
5421 /* Add code for SecurityAction.Demand */
5422 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5423 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5424 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5425 mono_emit_method_call (cfg, secman->demand, args, NULL);
5427 if (actions.noncasdemand.blob) {
5428 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5429 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5430 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5431 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5432 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5433 mono_emit_method_call (cfg, secman->demand, args, NULL);
5435 if (actions.demandchoice.blob) {
5436 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5437 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5438 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5439 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5440 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5444 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5446 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5449 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5450 /* check if this is native code, e.g. an icall or a p/invoke */
5451 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5452 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5454 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5455 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5457 /* if this ia a native call then it can only be JITted from platform code */
5458 if ((icall || pinvk) && method->klass && method->klass->image) {
5459 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5460 MonoException *ex = icall ? mono_get_exception_security () :
5461 mono_get_exception_method_access ();
5462 emit_throw_exception (cfg, ex);
5469 if (header->code_size == 0)
5472 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5477 if (cfg->method == method)
5478 mono_debug_init_method (cfg, bblock, breakpoint_id);
5480 for (n = 0; n < header->num_locals; ++n) {
5481 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5486 /* We force the vtable variable here for all shared methods
5487 for the possibility that they might show up in a stack
5488 trace where their exact instantiation is needed. */
5489 if (cfg->generic_sharing_context && method == cfg->method) {
5490 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5491 mini_method_get_context (method)->method_inst ||
5492 method->klass->valuetype) {
5493 mono_get_vtable_var (cfg);
5495 /* FIXME: Is there a better way to do this?
5496 We need the variable live for the duration
5497 of the whole method. */
5498 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5502 /* add a check for this != NULL to inlined methods */
5503 if (is_virtual_call) {
5506 NEW_ARGLOAD (cfg, arg_ins, 0);
5507 MONO_ADD_INS (cfg->cbb, arg_ins);
5508 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5511 skip_dead_blocks = !dont_verify;
5512 if (skip_dead_blocks) {
5513 original_bb = bb = mono_basic_block_split (method, &error);
5514 if (!mono_error_ok (&error)) {
5515 mono_error_cleanup (&error);
5521 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5522 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5525 start_new_bblock = 0;
5528 if (cfg->method == method)
5529 cfg->real_offset = ip - header->code;
5531 cfg->real_offset = inline_offset;
5536 if (start_new_bblock) {
5537 bblock->cil_length = ip - bblock->cil_code;
5538 if (start_new_bblock == 2) {
5539 g_assert (ip == tblock->cil_code);
5541 GET_BBLOCK (cfg, tblock, ip);
5543 bblock->next_bb = tblock;
5546 start_new_bblock = 0;
5547 for (i = 0; i < bblock->in_scount; ++i) {
5548 if (cfg->verbose_level > 3)
5549 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5550 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5554 g_slist_free (class_inits);
5557 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5558 link_bblock (cfg, bblock, tblock);
5559 if (sp != stack_start) {
5560 handle_stack_args (cfg, stack_start, sp - stack_start);
5562 CHECK_UNVERIFIABLE (cfg);
5564 bblock->next_bb = tblock;
5567 for (i = 0; i < bblock->in_scount; ++i) {
5568 if (cfg->verbose_level > 3)
5569 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5570 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5573 g_slist_free (class_inits);
5578 if (skip_dead_blocks) {
5579 int ip_offset = ip - header->code;
5581 if (ip_offset == bb->end)
5585 int op_size = mono_opcode_size (ip, end);
5586 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5588 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5590 if (ip_offset + op_size == bb->end) {
5591 MONO_INST_NEW (cfg, ins, OP_NOP);
5592 MONO_ADD_INS (bblock, ins);
5593 start_new_bblock = 1;
5601 * Sequence points are points where the debugger can place a breakpoint.
5602 * Currently, we generate these automatically at points where the IL
5605 if (seq_points && sp == stack_start) {
5606 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5607 MONO_ADD_INS (cfg->cbb, ins);
5610 bblock->real_offset = cfg->real_offset;
5612 if ((cfg->method == method) && cfg->coverage_info) {
5613 guint32 cil_offset = ip - header->code;
5614 cfg->coverage_info->data [cil_offset].cil_code = ip;
5616 /* TODO: Use an increment here */
5617 #if defined(TARGET_X86)
5618 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5619 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5621 MONO_ADD_INS (cfg->cbb, ins);
5623 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5624 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5628 if (cfg->verbose_level > 3)
5629 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5633 if (cfg->keep_cil_nops)
5634 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5636 MONO_INST_NEW (cfg, ins, OP_NOP);
5638 MONO_ADD_INS (bblock, ins);
5641 if (should_insert_brekpoint (cfg->method))
5642 MONO_INST_NEW (cfg, ins, OP_BREAK);
5644 MONO_INST_NEW (cfg, ins, OP_NOP);
5646 MONO_ADD_INS (bblock, ins);
5652 CHECK_STACK_OVF (1);
5653 n = (*ip)-CEE_LDARG_0;
5655 EMIT_NEW_ARGLOAD (cfg, ins, n);
5663 CHECK_STACK_OVF (1);
5664 n = (*ip)-CEE_LDLOC_0;
5666 EMIT_NEW_LOCLOAD (cfg, ins, n);
5675 n = (*ip)-CEE_STLOC_0;
5678 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5680 emit_stloc_ir (cfg, sp, header, n);
5687 CHECK_STACK_OVF (1);
5690 EMIT_NEW_ARGLOAD (cfg, ins, n);
5696 CHECK_STACK_OVF (1);
5699 NEW_ARGLOADA (cfg, ins, n);
5700 MONO_ADD_INS (cfg->cbb, ins);
5710 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5712 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5717 CHECK_STACK_OVF (1);
5720 EMIT_NEW_LOCLOAD (cfg, ins, n);
5724 case CEE_LDLOCA_S: {
5725 unsigned char *tmp_ip;
5727 CHECK_STACK_OVF (1);
5728 CHECK_LOCAL (ip [1]);
5730 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5736 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5745 CHECK_LOCAL (ip [1]);
5746 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5748 emit_stloc_ir (cfg, sp, header, ip [1]);
5753 CHECK_STACK_OVF (1);
5754 EMIT_NEW_PCONST (cfg, ins, NULL);
5755 ins->type = STACK_OBJ;
5760 CHECK_STACK_OVF (1);
5761 EMIT_NEW_ICONST (cfg, ins, -1);
5774 CHECK_STACK_OVF (1);
5775 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5781 CHECK_STACK_OVF (1);
5783 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5789 CHECK_STACK_OVF (1);
5790 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5796 CHECK_STACK_OVF (1);
5797 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5798 ins->type = STACK_I8;
5799 ins->dreg = alloc_dreg (cfg, STACK_I8);
5801 ins->inst_l = (gint64)read64 (ip);
5802 MONO_ADD_INS (bblock, ins);
5808 gboolean use_aotconst = FALSE;
5810 #ifdef TARGET_POWERPC
5811 /* FIXME: Clean this up */
5812 if (cfg->compile_aot)
5813 use_aotconst = TRUE;
5816 /* FIXME: we should really allocate this only late in the compilation process */
5817 f = mono_domain_alloc (cfg->domain, sizeof (float));
5819 CHECK_STACK_OVF (1);
5825 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5827 dreg = alloc_freg (cfg);
5828 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5829 ins->type = STACK_R8;
5831 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5832 ins->type = STACK_R8;
5833 ins->dreg = alloc_dreg (cfg, STACK_R8);
5835 MONO_ADD_INS (bblock, ins);
5845 gboolean use_aotconst = FALSE;
5847 #ifdef TARGET_POWERPC
5848 /* FIXME: Clean this up */
5849 if (cfg->compile_aot)
5850 use_aotconst = TRUE;
5853 /* FIXME: we should really allocate this only late in the compilation process */
5854 d = mono_domain_alloc (cfg->domain, sizeof (double));
5856 CHECK_STACK_OVF (1);
5862 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5864 dreg = alloc_freg (cfg);
5865 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5866 ins->type = STACK_R8;
5868 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5869 ins->type = STACK_R8;
5870 ins->dreg = alloc_dreg (cfg, STACK_R8);
5872 MONO_ADD_INS (bblock, ins);
5881 MonoInst *temp, *store;
5883 CHECK_STACK_OVF (1);
5887 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5888 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5890 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5893 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5906 if (sp [0]->type == STACK_R8)
5907 /* we need to pop the value from the x86 FP stack */
5908 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5917 if (stack_start != sp)
5919 token = read32 (ip + 1);
5920 /* FIXME: check the signature matches */
5921 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5926 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5927 GENERIC_SHARING_FAILURE (CEE_JMP);
5929 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5930 CHECK_CFG_EXCEPTION;
5932 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5934 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5937 /* Handle tail calls similarly to calls */
5938 n = fsig->param_count + fsig->hasthis;
5940 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5941 call->method = cmethod;
5942 call->tail_call = TRUE;
5943 call->signature = mono_method_signature (cmethod);
5944 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5945 call->inst.inst_p0 = cmethod;
5946 for (i = 0; i < n; ++i)
5947 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5949 mono_arch_emit_call (cfg, call);
5950 MONO_ADD_INS (bblock, (MonoInst*)call);
5953 for (i = 0; i < num_args; ++i)
5954 /* Prevent arguments from being optimized away */
5955 arg_array [i]->flags |= MONO_INST_VOLATILE;
5957 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5958 ins = (MonoInst*)call;
5959 ins->inst_p0 = cmethod;
5960 MONO_ADD_INS (bblock, ins);
5964 start_new_bblock = 1;
5969 case CEE_CALLVIRT: {
5970 MonoInst *addr = NULL;
5971 MonoMethodSignature *fsig = NULL;
5973 int virtual = *ip == CEE_CALLVIRT;
5974 int calli = *ip == CEE_CALLI;
5975 gboolean pass_imt_from_rgctx = FALSE;
5976 MonoInst *imt_arg = NULL;
5977 gboolean pass_vtable = FALSE;
5978 gboolean pass_mrgctx = FALSE;
5979 MonoInst *vtable_arg = NULL;
5980 gboolean check_this = FALSE;
5981 gboolean supported_tail_call = FALSE;
5984 token = read32 (ip + 1);
5991 if (method->wrapper_type != MONO_WRAPPER_NONE)
5992 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5994 fsig = mono_metadata_parse_signature (image, token);
5996 n = fsig->param_count + fsig->hasthis;
5998 if (method->dynamic && fsig->pinvoke) {
6002 * This is a call through a function pointer using a pinvoke
6003 * signature. Have to create a wrapper and call that instead.
6004 * FIXME: This is very slow, need to create a wrapper at JIT time
6005 * instead based on the signature.
6007 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6008 EMIT_NEW_PCONST (cfg, args [1], fsig);
6010 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6013 MonoMethod *cil_method;
6015 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6016 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6017 cil_method = cmethod;
6018 } else if (constrained_call) {
6019 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6021 * This is needed since get_method_constrained can't find
6022 * the method in klass representing a type var.
6023 * The type var is guaranteed to be a reference type in this
6026 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6027 cil_method = cmethod;
6028 g_assert (!cmethod->klass->valuetype);
6030 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6033 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6034 cil_method = cmethod;
6039 if (!dont_verify && !cfg->skip_visibility) {
6040 MonoMethod *target_method = cil_method;
6041 if (method->is_inflated) {
6042 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6044 if (!mono_method_can_access_method (method_definition, target_method) &&
6045 !mono_method_can_access_method (method, cil_method))
6046 METHOD_ACCESS_FAILURE;
6049 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6050 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6052 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6053 /* MS.NET seems to silently convert this to a callvirt */
6058 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6059 * converts to a callvirt.
6061 * tests/bug-515884.il is an example of this behavior
6063 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6064 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6065 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6069 if (!cmethod->klass->inited)
6070 if (!mono_class_init (cmethod->klass))
6073 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6074 mini_class_is_system_array (cmethod->klass)) {
6075 array_rank = cmethod->klass->rank;
6076 fsig = mono_method_signature (cmethod);
6078 fsig = mono_method_signature (cmethod);
6083 if (fsig->pinvoke) {
6084 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6085 check_for_pending_exc, FALSE);
6086 fsig = mono_method_signature (wrapper);
6087 } else if (constrained_call) {
6088 fsig = mono_method_signature (cmethod);
6090 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6094 mono_save_token_info (cfg, image, token, cil_method);
6096 n = fsig->param_count + fsig->hasthis;
6098 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6099 if (check_linkdemand (cfg, method, cmethod))
6101 CHECK_CFG_EXCEPTION;
6104 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6105 g_assert_not_reached ();
6108 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6111 if (!cfg->generic_sharing_context && cmethod)
6112 g_assert (!mono_method_check_context_used (cmethod));
6116 //g_assert (!virtual || fsig->hasthis);
6120 if (constrained_call) {
6122 * We have the `constrained.' prefix opcode.
6124 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6126 * The type parameter is instantiated as a valuetype,
6127 * but that type doesn't override the method we're
6128 * calling, so we need to box `this'.
6130 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6131 ins->klass = constrained_call;
6132 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6133 CHECK_CFG_EXCEPTION;
6134 } else if (!constrained_call->valuetype) {
6135 int dreg = alloc_preg (cfg);
6138 * The type parameter is instantiated as a reference
6139 * type. We have a managed pointer on the stack, so
6140 * we need to dereference it here.
6142 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6143 ins->type = STACK_OBJ;
6145 } else if (cmethod->klass->valuetype)
6147 constrained_call = NULL;
6150 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6154 * If the callee is a shared method, then its static cctor
6155 * might not get called after the call was patched.
6157 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6158 emit_generic_class_init (cfg, cmethod->klass);
6159 CHECK_TYPELOAD (cmethod->klass);
6162 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6163 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6164 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6165 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6166 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6169 * Pass vtable iff target method might
6170 * be shared, which means that sharing
6171 * is enabled for its class and its
6172 * context is sharable (and it's not a
6175 if (sharing_enabled && context_sharable &&
6176 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6180 if (cmethod && mini_method_get_context (cmethod) &&
6181 mini_method_get_context (cmethod)->method_inst) {
6182 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6183 MonoGenericContext *context = mini_method_get_context (cmethod);
6184 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6186 g_assert (!pass_vtable);
6188 if (sharing_enabled && context_sharable)
6192 if (cfg->generic_sharing_context && cmethod) {
6193 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6195 context_used = mono_method_check_context_used (cmethod);
6197 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6198 /* Generic method interface
6199 calls are resolved via a
6200 helper function and don't
6202 if (!cmethod_context || !cmethod_context->method_inst)
6203 pass_imt_from_rgctx = TRUE;
6207 * If a shared method calls another
6208 * shared method then the caller must
6209 * have a generic sharing context
6210 * because the magic trampoline
6211 * requires it. FIXME: We shouldn't
6212 * have to force the vtable/mrgctx
6213 * variable here. Instead there
6214 * should be a flag in the cfg to
6215 * request a generic sharing context.
6218 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6219 mono_get_vtable_var (cfg);
6224 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6226 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6228 CHECK_TYPELOAD (cmethod->klass);
6229 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6234 g_assert (!vtable_arg);
6236 if (!cfg->compile_aot) {
6238 * emit_get_rgctx_method () calls mono_class_vtable () so check
6239 * for type load errors before.
6241 mono_class_setup_vtable (cmethod->klass);
6242 CHECK_TYPELOAD (cmethod->klass);
6245 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6247 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6248 MONO_METHOD_IS_FINAL (cmethod)) {
6255 if (pass_imt_from_rgctx) {
6256 g_assert (!pass_vtable);
6259 imt_arg = emit_get_rgctx_method (cfg, context_used,
6260 cmethod, MONO_RGCTX_INFO_METHOD);
6264 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6266 /* Calling virtual generic methods */
6267 if (cmethod && virtual &&
6268 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6269 !(MONO_METHOD_IS_FINAL (cmethod) &&
6270 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6271 mono_method_signature (cmethod)->generic_param_count) {
6272 MonoInst *this_temp, *this_arg_temp, *store;
6273 MonoInst *iargs [4];
6275 g_assert (mono_method_signature (cmethod)->is_inflated);
6277 /* Prevent inlining of methods that contain indirect calls */
6280 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6281 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6282 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6283 g_assert (!imt_arg);
6285 g_assert (cmethod->is_inflated);
6286 imt_arg = emit_get_rgctx_method (cfg, context_used,
6287 cmethod, MONO_RGCTX_INFO_METHOD);
6288 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6292 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6293 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6294 MONO_ADD_INS (bblock, store);
6296 /* FIXME: This should be a managed pointer */
6297 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6299 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6300 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6301 cmethod, MONO_RGCTX_INFO_METHOD);
6302 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6303 addr = mono_emit_jit_icall (cfg,
6304 mono_helper_compile_generic_method, iargs);
6306 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6308 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6311 if (!MONO_TYPE_IS_VOID (fsig->ret))
6312 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6314 CHECK_CFG_EXCEPTION;
6321 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6322 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6324 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6328 /* FIXME: runtime generic context pointer for jumps? */
6329 /* FIXME: handle this for generic sharing eventually */
6330 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6333 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6336 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6337 /* Handle tail calls similarly to calls */
6338 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6340 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6341 call->tail_call = TRUE;
6342 call->method = cmethod;
6343 call->signature = mono_method_signature (cmethod);
6346 * We implement tail calls by storing the actual arguments into the
6347 * argument variables, then emitting a CEE_JMP.
6349 for (i = 0; i < n; ++i) {
6350 /* Prevent argument from being register allocated */
6351 arg_array [i]->flags |= MONO_INST_VOLATILE;
6352 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6356 ins = (MonoInst*)call;
6357 ins->inst_p0 = cmethod;
6358 ins->inst_p1 = arg_array [0];
6359 MONO_ADD_INS (bblock, ins);
6360 link_bblock (cfg, bblock, end_bblock);
6361 start_new_bblock = 1;
6363 CHECK_CFG_EXCEPTION;
6365 /* skip CEE_RET as well */
6371 /* Conversion to a JIT intrinsic */
6372 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6373 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6374 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6379 CHECK_CFG_EXCEPTION;
6387 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6388 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6389 mono_method_check_inlining (cfg, cmethod) &&
6390 !g_list_find (dont_inline, cmethod)) {
6392 gboolean allways = FALSE;
6394 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6395 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6396 /* Prevent inlining of methods that call wrappers */
6398 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6402 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6404 cfg->real_offset += 5;
6407 if (!MONO_TYPE_IS_VOID (fsig->ret))
6408 /* *sp is already set by inline_method */
6411 inline_costs += costs;
6417 inline_costs += 10 * num_calls++;
6419 /* Tail recursion elimination */
6420 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6421 gboolean has_vtargs = FALSE;
6424 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6427 /* keep it simple */
6428 for (i = fsig->param_count - 1; i >= 0; i--) {
6429 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6434 for (i = 0; i < n; ++i)
6435 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6436 MONO_INST_NEW (cfg, ins, OP_BR);
6437 MONO_ADD_INS (bblock, ins);
6438 tblock = start_bblock->out_bb [0];
6439 link_bblock (cfg, bblock, tblock);
6440 ins->inst_target_bb = tblock;
6441 start_new_bblock = 1;
6443 /* skip the CEE_RET, too */
6444 if (ip_in_bb (cfg, bblock, ip + 5))
6454 /* Generic sharing */
6455 /* FIXME: only do this for generic methods if
6456 they are not shared! */
6457 if (context_used && !imt_arg && !array_rank &&
6458 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6459 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6460 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6461 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6464 g_assert (cfg->generic_sharing_context && cmethod);
6468 * We are compiling a call to a
6469 * generic method from shared code,
6470 * which means that we have to look up
6471 * the method in the rgctx and do an
6474 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6477 /* Indirect calls */
6479 g_assert (!imt_arg);
6481 if (*ip == CEE_CALL)
6482 g_assert (context_used);
6483 else if (*ip == CEE_CALLI)
6484 g_assert (!vtable_arg);
6486 /* FIXME: what the hell is this??? */
6487 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6488 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6490 /* Prevent inlining of methods with indirect calls */
6494 #ifdef MONO_ARCH_RGCTX_REG
6496 int rgctx_reg = mono_alloc_preg (cfg);
6498 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6499 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6500 call = (MonoCallInst*)ins;
6501 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6502 cfg->uses_rgctx_reg = TRUE;
6503 call->rgctx_reg = TRUE;
6508 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6510 * Instead of emitting an indirect call, emit a direct call
6511 * with the contents of the aotconst as the patch info.
6513 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6515 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6516 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6519 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6522 if (!MONO_TYPE_IS_VOID (fsig->ret))
6523 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6525 CHECK_CFG_EXCEPTION;
6536 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6537 if (sp [fsig->param_count]->type == STACK_OBJ) {
6538 MonoInst *iargs [2];
6541 iargs [1] = sp [fsig->param_count];
6543 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6546 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6547 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6548 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6549 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6551 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6554 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6555 if (!cmethod->klass->element_class->valuetype && !readonly)
6556 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6557 CHECK_TYPELOAD (cmethod->klass);
6560 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6563 g_assert_not_reached ();
6566 CHECK_CFG_EXCEPTION;
6573 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6575 if (!MONO_TYPE_IS_VOID (fsig->ret))
6576 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6578 CHECK_CFG_EXCEPTION;
6588 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6590 } else if (imt_arg) {
6591 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6593 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6596 if (!MONO_TYPE_IS_VOID (fsig->ret))
6597 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6599 CHECK_CFG_EXCEPTION;
6606 if (cfg->method != method) {
6607 /* return from inlined method */
6609 * If in_count == 0, that means the ret is unreachable due to
6610 * being preceeded by a throw. In that case, inline_method () will
6611 * handle setting the return value
6612 * (test case: test_0_inline_throw ()).
6614 if (return_var && cfg->cbb->in_count) {
6618 //g_assert (returnvar != -1);
6619 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6620 cfg->ret_var_set = TRUE;
6624 MonoType *ret_type = mono_method_signature (method)->ret;
6628 * Place a seq point here too even through the IL stack is not
6629 * empty, so a step over on
6632 * will work correctly.
6634 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6635 MONO_ADD_INS (cfg->cbb, ins);
6638 g_assert (!return_var);
6641 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6644 if (!cfg->vret_addr) {
6647 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6649 EMIT_NEW_RETLOADA (cfg, ret_addr);
6651 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6652 ins->klass = mono_class_from_mono_type (ret_type);
6655 #ifdef MONO_ARCH_SOFT_FLOAT
6656 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6657 MonoInst *iargs [1];
6661 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6662 mono_arch_emit_setret (cfg, method, conv);
6664 mono_arch_emit_setret (cfg, method, *sp);
6667 mono_arch_emit_setret (cfg, method, *sp);
6672 if (sp != stack_start)
6674 MONO_INST_NEW (cfg, ins, OP_BR);
6676 ins->inst_target_bb = end_bblock;
6677 MONO_ADD_INS (bblock, ins);
6678 link_bblock (cfg, bblock, end_bblock);
6679 start_new_bblock = 1;
6683 MONO_INST_NEW (cfg, ins, OP_BR);
6685 target = ip + 1 + (signed char)(*ip);
6687 GET_BBLOCK (cfg, tblock, target);
6688 link_bblock (cfg, bblock, tblock);
6689 ins->inst_target_bb = tblock;
6690 if (sp != stack_start) {
6691 handle_stack_args (cfg, stack_start, sp - stack_start);
6693 CHECK_UNVERIFIABLE (cfg);
6695 MONO_ADD_INS (bblock, ins);
6696 start_new_bblock = 1;
6697 inline_costs += BRANCH_COST;
6711 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6713 target = ip + 1 + *(signed char*)ip;
6719 inline_costs += BRANCH_COST;
6723 MONO_INST_NEW (cfg, ins, OP_BR);
6726 target = ip + 4 + (gint32)read32(ip);
6728 GET_BBLOCK (cfg, tblock, target);
6729 link_bblock (cfg, bblock, tblock);
6730 ins->inst_target_bb = tblock;
6731 if (sp != stack_start) {
6732 handle_stack_args (cfg, stack_start, sp - stack_start);
6734 CHECK_UNVERIFIABLE (cfg);
6737 MONO_ADD_INS (bblock, ins);
6739 start_new_bblock = 1;
6740 inline_costs += BRANCH_COST;
6747 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6748 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6749 guint32 opsize = is_short ? 1 : 4;
6751 CHECK_OPSIZE (opsize);
6753 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6756 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6761 GET_BBLOCK (cfg, tblock, target);
6762 link_bblock (cfg, bblock, tblock);
6763 GET_BBLOCK (cfg, tblock, ip);
6764 link_bblock (cfg, bblock, tblock);
6766 if (sp != stack_start) {
6767 handle_stack_args (cfg, stack_start, sp - stack_start);
6768 CHECK_UNVERIFIABLE (cfg);
6771 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6772 cmp->sreg1 = sp [0]->dreg;
6773 type_from_op (cmp, sp [0], NULL);
6776 #if SIZEOF_REGISTER == 4
6777 if (cmp->opcode == OP_LCOMPARE_IMM) {
6778 /* Convert it to OP_LCOMPARE */
6779 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6780 ins->type = STACK_I8;
6781 ins->dreg = alloc_dreg (cfg, STACK_I8);
6783 MONO_ADD_INS (bblock, ins);
6784 cmp->opcode = OP_LCOMPARE;
6785 cmp->sreg2 = ins->dreg;
6788 MONO_ADD_INS (bblock, cmp);
6790 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6791 type_from_op (ins, sp [0], NULL);
6792 MONO_ADD_INS (bblock, ins);
6793 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6794 GET_BBLOCK (cfg, tblock, target);
6795 ins->inst_true_bb = tblock;
6796 GET_BBLOCK (cfg, tblock, ip);
6797 ins->inst_false_bb = tblock;
6798 start_new_bblock = 2;
6801 inline_costs += BRANCH_COST;
6816 MONO_INST_NEW (cfg, ins, *ip);
6818 target = ip + 4 + (gint32)read32(ip);
6824 inline_costs += BRANCH_COST;
6828 MonoBasicBlock **targets;
6829 MonoBasicBlock *default_bblock;
6830 MonoJumpInfoBBTable *table;
6831 int offset_reg = alloc_preg (cfg);
6832 int target_reg = alloc_preg (cfg);
6833 int table_reg = alloc_preg (cfg);
6834 int sum_reg = alloc_preg (cfg);
6835 gboolean use_op_switch;
6839 n = read32 (ip + 1);
6842 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6846 CHECK_OPSIZE (n * sizeof (guint32));
6847 target = ip + n * sizeof (guint32);
6849 GET_BBLOCK (cfg, default_bblock, target);
6851 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6852 for (i = 0; i < n; ++i) {
6853 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6854 targets [i] = tblock;
6858 if (sp != stack_start) {
6860 * Link the current bb with the targets as well, so handle_stack_args
6861 * will set their in_stack correctly.
6863 link_bblock (cfg, bblock, default_bblock);
6864 for (i = 0; i < n; ++i)
6865 link_bblock (cfg, bblock, targets [i]);
6867 handle_stack_args (cfg, stack_start, sp - stack_start);
6869 CHECK_UNVERIFIABLE (cfg);
6872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6876 for (i = 0; i < n; ++i)
6877 link_bblock (cfg, bblock, targets [i]);
6879 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6880 table->table = targets;
6881 table->table_size = n;
6883 use_op_switch = FALSE;
6885 /* ARM implements SWITCH statements differently */
6886 /* FIXME: Make it use the generic implementation */
6887 if (!cfg->compile_aot)
6888 use_op_switch = TRUE;
6891 if (COMPILE_LLVM (cfg))
6892 use_op_switch = TRUE;
6894 cfg->cbb->has_jump_table = 1;
6896 if (use_op_switch) {
6897 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6898 ins->sreg1 = src1->dreg;
6899 ins->inst_p0 = table;
6900 ins->inst_many_bb = targets;
6901 ins->klass = GUINT_TO_POINTER (n);
6902 MONO_ADD_INS (cfg->cbb, ins);
6904 if (sizeof (gpointer) == 8)
6905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6909 #if SIZEOF_REGISTER == 8
6910 /* The upper word might not be zero, and we add it to a 64 bit address later */
6911 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6914 if (cfg->compile_aot) {
6915 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6917 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6918 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6919 ins->inst_p0 = table;
6920 ins->dreg = table_reg;
6921 MONO_ADD_INS (cfg->cbb, ins);
6924 /* FIXME: Use load_memindex */
6925 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6926 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6927 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6929 start_new_bblock = 1;
6930 inline_costs += (BRANCH_COST * 2);
6950 dreg = alloc_freg (cfg);
6953 dreg = alloc_lreg (cfg);
6956 dreg = alloc_preg (cfg);
6959 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6960 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6961 ins->flags |= ins_flag;
6963 MONO_ADD_INS (bblock, ins);
6978 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6979 ins->flags |= ins_flag;
6981 MONO_ADD_INS (bblock, ins);
6983 #if HAVE_WRITE_BARRIERS
6984 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6985 MonoInst *dummy_use;
6986 /* insert call to write barrier */
6987 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6988 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6989 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7000 MONO_INST_NEW (cfg, ins, (*ip));
7002 ins->sreg1 = sp [0]->dreg;
7003 ins->sreg2 = sp [1]->dreg;
7004 type_from_op (ins, sp [0], sp [1]);
7006 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7008 /* Use the immediate opcodes if possible */
7009 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7010 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7011 if (imm_opcode != -1) {
7012 ins->opcode = imm_opcode;
7013 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7016 sp [1]->opcode = OP_NOP;
7020 MONO_ADD_INS ((cfg)->cbb, (ins));
7022 *sp++ = mono_decompose_opcode (cfg, ins);
7039 MONO_INST_NEW (cfg, ins, (*ip));
7041 ins->sreg1 = sp [0]->dreg;
7042 ins->sreg2 = sp [1]->dreg;
7043 type_from_op (ins, sp [0], sp [1]);
7045 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7046 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7048 /* FIXME: Pass opcode to is_inst_imm */
7050 /* Use the immediate opcodes if possible */
7051 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7054 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7055 if (imm_opcode != -1) {
7056 ins->opcode = imm_opcode;
7057 if (sp [1]->opcode == OP_I8CONST) {
7058 #if SIZEOF_REGISTER == 8
7059 ins->inst_imm = sp [1]->inst_l;
7061 ins->inst_ls_word = sp [1]->inst_ls_word;
7062 ins->inst_ms_word = sp [1]->inst_ms_word;
7066 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7069 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7070 if (sp [1]->next == NULL)
7071 sp [1]->opcode = OP_NOP;
7074 MONO_ADD_INS ((cfg)->cbb, (ins));
7076 *sp++ = mono_decompose_opcode (cfg, ins);
7089 case CEE_CONV_OVF_I8:
7090 case CEE_CONV_OVF_U8:
7094 /* Special case this earlier so we have long constants in the IR */
7095 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7096 int data = sp [-1]->inst_c0;
7097 sp [-1]->opcode = OP_I8CONST;
7098 sp [-1]->type = STACK_I8;
7099 #if SIZEOF_REGISTER == 8
7100 if ((*ip) == CEE_CONV_U8)
7101 sp [-1]->inst_c0 = (guint32)data;
7103 sp [-1]->inst_c0 = data;
7105 sp [-1]->inst_ls_word = data;
7106 if ((*ip) == CEE_CONV_U8)
7107 sp [-1]->inst_ms_word = 0;
7109 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7111 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7118 case CEE_CONV_OVF_I4:
7119 case CEE_CONV_OVF_I1:
7120 case CEE_CONV_OVF_I2:
7121 case CEE_CONV_OVF_I:
7122 case CEE_CONV_OVF_U:
7125 if (sp [-1]->type == STACK_R8) {
7126 ADD_UNOP (CEE_CONV_OVF_I8);
7133 case CEE_CONV_OVF_U1:
7134 case CEE_CONV_OVF_U2:
7135 case CEE_CONV_OVF_U4:
7138 if (sp [-1]->type == STACK_R8) {
7139 ADD_UNOP (CEE_CONV_OVF_U8);
7146 case CEE_CONV_OVF_I1_UN:
7147 case CEE_CONV_OVF_I2_UN:
7148 case CEE_CONV_OVF_I4_UN:
7149 case CEE_CONV_OVF_I8_UN:
7150 case CEE_CONV_OVF_U1_UN:
7151 case CEE_CONV_OVF_U2_UN:
7152 case CEE_CONV_OVF_U4_UN:
7153 case CEE_CONV_OVF_U8_UN:
7154 case CEE_CONV_OVF_I_UN:
7155 case CEE_CONV_OVF_U_UN:
7162 CHECK_CFG_EXCEPTION;
7166 case CEE_ADD_OVF_UN:
7168 case CEE_MUL_OVF_UN:
7170 case CEE_SUB_OVF_UN:
7178 token = read32 (ip + 1);
7179 klass = mini_get_class (method, token, generic_context);
7180 CHECK_TYPELOAD (klass);
7182 if (generic_class_is_reference_type (cfg, klass)) {
7183 MonoInst *store, *load;
7184 int dreg = alloc_preg (cfg);
7186 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7187 load->flags |= ins_flag;
7188 MONO_ADD_INS (cfg->cbb, load);
7190 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7191 store->flags |= ins_flag;
7192 MONO_ADD_INS (cfg->cbb, store);
7194 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7206 token = read32 (ip + 1);
7207 klass = mini_get_class (method, token, generic_context);
7208 CHECK_TYPELOAD (klass);
7210 /* Optimize the common ldobj+stloc combination */
7220 loc_index = ip [5] - CEE_STLOC_0;
7227 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7228 CHECK_LOCAL (loc_index);
7230 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7231 ins->dreg = cfg->locals [loc_index]->dreg;
7237 /* Optimize the ldobj+stobj combination */
7238 /* The reference case ends up being a load+store anyway */
7239 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7244 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7260 CHECK_STACK_OVF (1);
7262 n = read32 (ip + 1);
7264 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7265 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7266 ins->type = STACK_OBJ;
7269 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7270 MonoInst *iargs [1];
7272 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7273 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7275 if (cfg->opt & MONO_OPT_SHARED) {
7276 MonoInst *iargs [3];
7278 if (cfg->compile_aot) {
7279 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7281 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7282 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7283 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7284 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7285 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7287 if (bblock->out_of_line) {
7288 MonoInst *iargs [2];
7290 if (image == mono_defaults.corlib) {
7292 * Avoid relocations in AOT and save some space by using a
7293 * version of helper_ldstr specialized to mscorlib.
7295 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7296 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7298 /* Avoid creating the string object */
7299 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7300 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7301 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7305 if (cfg->compile_aot) {
7306 NEW_LDSTRCONST (cfg, ins, image, n);
7308 MONO_ADD_INS (bblock, ins);
7311 NEW_PCONST (cfg, ins, NULL);
7312 ins->type = STACK_OBJ;
7313 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7315 MONO_ADD_INS (bblock, ins);
7324 MonoInst *iargs [2];
7325 MonoMethodSignature *fsig;
7328 MonoInst *vtable_arg = NULL;
7331 token = read32 (ip + 1);
7332 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7335 fsig = mono_method_get_signature (cmethod, image, token);
7339 mono_save_token_info (cfg, image, token, cmethod);
7341 if (!mono_class_init (cmethod->klass))
7344 if (cfg->generic_sharing_context)
7345 context_used = mono_method_check_context_used (cmethod);
7347 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7348 if (check_linkdemand (cfg, method, cmethod))
7350 CHECK_CFG_EXCEPTION;
7351 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7352 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7355 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7356 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7357 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7358 mono_class_vtable (cfg->domain, cmethod->klass);
7359 CHECK_TYPELOAD (cmethod->klass);
7361 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7362 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7365 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7366 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7368 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7370 CHECK_TYPELOAD (cmethod->klass);
7371 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7376 n = fsig->param_count;
7380 * Generate smaller code for the common newobj <exception> instruction in
7381 * argument checking code.
7383 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7384 is_exception_class (cmethod->klass) && n <= 2 &&
7385 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7386 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7387 MonoInst *iargs [3];
7389 g_assert (!vtable_arg);
7393 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7396 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7400 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7405 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7408 g_assert_not_reached ();
7416 /* move the args to allow room for 'this' in the first position */
7422 /* check_call_signature () requires sp[0] to be set */
7423 this_ins.type = STACK_OBJ;
7425 if (check_call_signature (cfg, fsig, sp))
7430 if (mini_class_is_system_array (cmethod->klass)) {
7431 g_assert (!vtable_arg);
7433 *sp = emit_get_rgctx_method (cfg, context_used,
7434 cmethod, MONO_RGCTX_INFO_METHOD);
7436 /* Avoid varargs in the common case */
7437 if (fsig->param_count == 1)
7438 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7439 else if (fsig->param_count == 2)
7440 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7441 else if (fsig->param_count == 3)
7442 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7444 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7445 } else if (cmethod->string_ctor) {
7446 g_assert (!context_used);
7447 g_assert (!vtable_arg);
7448 /* we simply pass a null pointer */
7449 EMIT_NEW_PCONST (cfg, *sp, NULL);
7450 /* now call the string ctor */
7451 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7453 MonoInst* callvirt_this_arg = NULL;
7455 if (cmethod->klass->valuetype) {
7456 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7457 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7458 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7463 * The code generated by mini_emit_virtual_call () expects
7464 * iargs [0] to be a boxed instance, but luckily the vcall
7465 * will be transformed into a normal call there.
7467 } else if (context_used) {
7468 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7471 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7473 CHECK_TYPELOAD (cmethod->klass);
7476 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7477 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7478 * As a workaround, we call class cctors before allocating objects.
7480 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7481 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7482 if (cfg->verbose_level > 2)
7483 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7484 class_inits = g_slist_prepend (class_inits, vtable);
7487 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7490 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7493 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7495 /* Now call the actual ctor */
7496 /* Avoid virtual calls to ctors if possible */
7497 if (cmethod->klass->marshalbyref)
7498 callvirt_this_arg = sp [0];
7500 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7501 mono_method_check_inlining (cfg, cmethod) &&
7502 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7503 !g_list_find (dont_inline, cmethod)) {
7506 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7507 cfg->real_offset += 5;
7510 inline_costs += costs - 5;
7513 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7515 } else if (context_used &&
7516 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7517 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7518 MonoInst *cmethod_addr;
7520 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7521 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7523 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7526 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7527 callvirt_this_arg, NULL, vtable_arg);
7531 if (alloc == NULL) {
7533 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7534 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7548 token = read32 (ip + 1);
7549 klass = mini_get_class (method, token, generic_context);
7550 CHECK_TYPELOAD (klass);
7551 if (sp [0]->type != STACK_OBJ)
7554 if (cfg->generic_sharing_context)
7555 context_used = mono_class_check_context_used (klass);
7557 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7564 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7566 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7570 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7571 MonoMethod *mono_castclass;
7572 MonoInst *iargs [1];
7575 mono_castclass = mono_marshal_get_castclass (klass);
7578 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7579 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7580 g_assert (costs > 0);
7583 cfg->real_offset += 5;
7588 inline_costs += costs;
7591 ins = handle_castclass (cfg, klass, *sp, context_used);
7592 CHECK_CFG_EXCEPTION;
7602 token = read32 (ip + 1);
7603 klass = mini_get_class (method, token, generic_context);
7604 CHECK_TYPELOAD (klass);
7605 if (sp [0]->type != STACK_OBJ)
7608 if (cfg->generic_sharing_context)
7609 context_used = mono_class_check_context_used (klass);
7611 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7618 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7620 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7624 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7625 MonoMethod *mono_isinst;
7626 MonoInst *iargs [1];
7629 mono_isinst = mono_marshal_get_isinst (klass);
7632 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7633 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7634 g_assert (costs > 0);
7637 cfg->real_offset += 5;
7642 inline_costs += costs;
7645 ins = handle_isinst (cfg, klass, *sp, context_used);
7646 CHECK_CFG_EXCEPTION;
7653 case CEE_UNBOX_ANY: {
7657 token = read32 (ip + 1);
7658 klass = mini_get_class (method, token, generic_context);
7659 CHECK_TYPELOAD (klass);
7661 mono_save_token_info (cfg, image, token, klass);
7663 if (cfg->generic_sharing_context)
7664 context_used = mono_class_check_context_used (klass);
7666 if (generic_class_is_reference_type (cfg, klass)) {
7667 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7669 MonoInst *iargs [2];
7674 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7675 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7679 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7680 MonoMethod *mono_castclass;
7681 MonoInst *iargs [1];
7684 mono_castclass = mono_marshal_get_castclass (klass);
7687 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7688 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7690 g_assert (costs > 0);
7693 cfg->real_offset += 5;
7697 inline_costs += costs;
7699 ins = handle_castclass (cfg, klass, *sp, 0);
7700 CHECK_CFG_EXCEPTION;
7708 if (mono_class_is_nullable (klass)) {
7709 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7716 ins = handle_unbox (cfg, klass, sp, context_used);
7722 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7735 token = read32 (ip + 1);
7736 klass = mini_get_class (method, token, generic_context);
7737 CHECK_TYPELOAD (klass);
7739 mono_save_token_info (cfg, image, token, klass);
7741 if (cfg->generic_sharing_context)
7742 context_used = mono_class_check_context_used (klass);
7744 if (generic_class_is_reference_type (cfg, klass)) {
7750 if (klass == mono_defaults.void_class)
7752 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7754 /* frequent check in generic code: box (struct), brtrue */
7755 if (!mono_class_is_nullable (klass) &&
7756 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7757 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7759 MONO_INST_NEW (cfg, ins, OP_BR);
7760 if (*ip == CEE_BRTRUE_S) {
7763 target = ip + 1 + (signed char)(*ip);
7768 target = ip + 4 + (gint)(read32 (ip));
7771 GET_BBLOCK (cfg, tblock, target);
7772 link_bblock (cfg, bblock, tblock);
7773 ins->inst_target_bb = tblock;
7774 GET_BBLOCK (cfg, tblock, ip);
7776 * This leads to some inconsistency, since the two bblocks are
7777 * not really connected, but it is needed for handling stack
7778 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7779 * FIXME: This should only be needed if sp != stack_start, but that
7780 * doesn't work for some reason (test failure in mcs/tests on x86).
7782 link_bblock (cfg, bblock, tblock);
7783 if (sp != stack_start) {
7784 handle_stack_args (cfg, stack_start, sp - stack_start);
7786 CHECK_UNVERIFIABLE (cfg);
7788 MONO_ADD_INS (bblock, ins);
7789 start_new_bblock = 1;
7793 *sp++ = handle_box (cfg, val, klass, context_used);
7795 CHECK_CFG_EXCEPTION;
7804 token = read32 (ip + 1);
7805 klass = mini_get_class (method, token, generic_context);
7806 CHECK_TYPELOAD (klass);
7808 mono_save_token_info (cfg, image, token, klass);
7810 if (cfg->generic_sharing_context)
7811 context_used = mono_class_check_context_used (klass);
7813 if (mono_class_is_nullable (klass)) {
7816 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7817 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7821 ins = handle_unbox (cfg, klass, sp, context_used);
7831 MonoClassField *field;
7835 if (*ip == CEE_STFLD) {
7842 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7844 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7847 token = read32 (ip + 1);
7848 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7849 field = mono_method_get_wrapper_data (method, token);
7850 klass = field->parent;
7853 field = mono_field_from_token (image, token, &klass, generic_context);
7857 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7858 FIELD_ACCESS_FAILURE;
7859 mono_class_init (klass);
7861 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7862 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7863 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7864 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7867 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7868 if (*ip == CEE_STFLD) {
7869 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7871 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7872 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7873 MonoInst *iargs [5];
7876 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7877 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7878 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7882 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7883 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7884 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7885 g_assert (costs > 0);
7887 cfg->real_offset += 5;
7890 inline_costs += costs;
7892 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7897 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7899 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7901 #if HAVE_WRITE_BARRIERS
7902 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7903 /* insert call to write barrier */
7904 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7905 MonoInst *iargs [2], *dummy_use;
7908 dreg = alloc_preg (cfg);
7909 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7911 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7913 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7917 store->flags |= ins_flag;
7924 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7925 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7926 MonoInst *iargs [4];
7929 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7930 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7931 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7932 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7933 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7934 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7936 g_assert (costs > 0);
7938 cfg->real_offset += 5;
7942 inline_costs += costs;
7944 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7948 if (sp [0]->type == STACK_VTYPE) {
7951 /* Have to compute the address of the variable */
7953 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7955 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7957 g_assert (var->klass == klass);
7959 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7963 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7965 if (*ip == CEE_LDFLDA) {
7966 dreg = alloc_preg (cfg);
7968 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7969 ins->klass = mono_class_from_mono_type (field->type);
7970 ins->type = STACK_MP;
7975 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7976 load->flags |= ins_flag;
7977 load->flags |= MONO_INST_FAULT;
7988 MonoClassField *field;
7989 gpointer addr = NULL;
7990 gboolean is_special_static;
7993 token = read32 (ip + 1);
7995 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7996 field = mono_method_get_wrapper_data (method, token);
7997 klass = field->parent;
8000 field = mono_field_from_token (image, token, &klass, generic_context);
8003 mono_class_init (klass);
8004 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8005 FIELD_ACCESS_FAILURE;
8007 /* if the class is Critical then transparent code cannot access it's fields */
8008 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8009 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8012 * We can only support shared generic static
8013 * field access on architectures where the
8014 * trampoline code has been extended to handle
8015 * the generic class init.
8017 #ifndef MONO_ARCH_VTABLE_REG
8018 GENERIC_SHARING_FAILURE (*ip);
8021 if (cfg->generic_sharing_context)
8022 context_used = mono_class_check_context_used (klass);
8024 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8026 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8027 * to be called here.
8029 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8030 mono_class_vtable (cfg->domain, klass);
8031 CHECK_TYPELOAD (klass);
8033 mono_domain_lock (cfg->domain);
8034 if (cfg->domain->special_static_fields)
8035 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8036 mono_domain_unlock (cfg->domain);
8038 is_special_static = mono_class_field_is_special_static (field);
8040 /* Generate IR to compute the field address */
8041 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8043 * Fast access to TLS data
8044 * Inline version of get_thread_static_data () in
8048 int idx, static_data_reg, array_reg, dreg;
8049 MonoInst *thread_ins;
8051 // offset &= 0x7fffffff;
8052 // idx = (offset >> 24) - 1;
8053 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8055 thread_ins = mono_get_thread_intrinsic (cfg);
8056 MONO_ADD_INS (cfg->cbb, thread_ins);
8057 static_data_reg = alloc_ireg (cfg);
8058 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8060 if (cfg->compile_aot) {
8061 int offset_reg, offset2_reg, idx_reg;
8063 /* For TLS variables, this will return the TLS offset */
8064 EMIT_NEW_SFLDACONST (cfg, ins, field);
8065 offset_reg = ins->dreg;
8066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8067 idx_reg = alloc_ireg (cfg);
8068 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8069 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8070 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8071 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8072 array_reg = alloc_ireg (cfg);
8073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8074 offset2_reg = alloc_ireg (cfg);
8075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8076 dreg = alloc_ireg (cfg);
8077 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8079 offset = (gsize)addr & 0x7fffffff;
8080 idx = (offset >> 24) - 1;
8082 array_reg = alloc_ireg (cfg);
8083 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8084 dreg = alloc_ireg (cfg);
8085 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8087 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8088 (cfg->compile_aot && is_special_static) ||
8089 (context_used && is_special_static)) {
8090 MonoInst *iargs [2];
8092 g_assert (field->parent);
8093 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8095 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8096 field, MONO_RGCTX_INFO_CLASS_FIELD);
8098 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8100 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8101 } else if (context_used) {
8102 MonoInst *static_data;
8105 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8106 method->klass->name_space, method->klass->name, method->name,
8107 depth, field->offset);
8110 if (mono_class_needs_cctor_run (klass, method)) {
8114 vtable = emit_get_rgctx_klass (cfg, context_used,
8115 klass, MONO_RGCTX_INFO_VTABLE);
8117 // FIXME: This doesn't work since it tries to pass the argument
8118 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8120 * The vtable pointer is always passed in a register regardless of
8121 * the calling convention, so assign it manually, and make a call
8122 * using a signature without parameters.
8124 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8125 #ifdef MONO_ARCH_VTABLE_REG
8126 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8127 cfg->uses_vtable_reg = TRUE;
8134 * The pointer we're computing here is
8136 * super_info.static_data + field->offset
8138 static_data = emit_get_rgctx_klass (cfg, context_used,
8139 klass, MONO_RGCTX_INFO_STATIC_DATA);
8141 if (field->offset == 0) {
8144 int addr_reg = mono_alloc_preg (cfg);
8145 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8147 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8148 MonoInst *iargs [2];
8150 g_assert (field->parent);
8151 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8152 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8153 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8155 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8157 CHECK_TYPELOAD (klass);
8159 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8160 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8161 if (cfg->verbose_level > 2)
8162 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8163 class_inits = g_slist_prepend (class_inits, vtable);
8165 if (cfg->run_cctors) {
8167 /* This makes so that inline cannot trigger */
8168 /* .cctors: too many apps depend on them */
8169 /* running with a specific order... */
8170 if (! vtable->initialized)
8172 ex = mono_runtime_class_init_full (vtable, FALSE);
8174 set_exception_object (cfg, ex);
8175 goto exception_exit;
8179 addr = (char*)vtable->data + field->offset;
8181 if (cfg->compile_aot)
8182 EMIT_NEW_SFLDACONST (cfg, ins, field);
8184 EMIT_NEW_PCONST (cfg, ins, addr);
8186 MonoInst *iargs [1];
8187 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8188 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8192 /* Generate IR to do the actual load/store operation */
8194 if (*ip == CEE_LDSFLDA) {
8195 ins->klass = mono_class_from_mono_type (field->type);
8196 ins->type = STACK_PTR;
8198 } else if (*ip == CEE_STSFLD) {
8203 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8204 store->flags |= ins_flag;
8206 gboolean is_const = FALSE;
8207 MonoVTable *vtable = NULL;
8209 if (!context_used) {
8210 vtable = mono_class_vtable (cfg->domain, klass);
8211 CHECK_TYPELOAD (klass);
8213 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8214 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8215 gpointer addr = (char*)vtable->data + field->offset;
8216 int ro_type = field->type->type;
8217 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8218 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8220 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8223 case MONO_TYPE_BOOLEAN:
8225 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8229 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8232 case MONO_TYPE_CHAR:
8234 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8238 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8243 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8247 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8253 case MONO_TYPE_FNPTR:
8254 #ifndef HAVE_MOVING_COLLECTOR
8255 case MONO_TYPE_STRING:
8256 case MONO_TYPE_OBJECT:
8257 case MONO_TYPE_CLASS:
8258 case MONO_TYPE_SZARRAY:
8259 case MONO_TYPE_ARRAY:
8261 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8262 type_to_eval_stack_type ((cfg), field->type, *sp);
8267 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8272 case MONO_TYPE_VALUETYPE:
8282 CHECK_STACK_OVF (1);
8284 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8285 load->flags |= ins_flag;
8298 token = read32 (ip + 1);
8299 klass = mini_get_class (method, token, generic_context);
8300 CHECK_TYPELOAD (klass);
8301 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8302 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8313 const char *data_ptr;
8315 guint32 field_token;
8321 token = read32 (ip + 1);
8323 klass = mini_get_class (method, token, generic_context);
8324 CHECK_TYPELOAD (klass);
8326 if (cfg->generic_sharing_context)
8327 context_used = mono_class_check_context_used (klass);
8329 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8330 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8331 ins->sreg1 = sp [0]->dreg;
8332 ins->type = STACK_I4;
8333 ins->dreg = alloc_ireg (cfg);
8334 MONO_ADD_INS (cfg->cbb, ins);
8335 *sp = mono_decompose_opcode (cfg, ins);
8340 MonoClass *array_class = mono_array_class_get (klass, 1);
8341 /* FIXME: we cannot get a managed
8342 allocator because we can't get the
8343 open generic class's vtable. We
8344 have the same problem in
8345 handle_alloc(). This
8346 needs to be solved so that we can
8347 have managed allocs of shared
8350 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8351 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8353 MonoMethod *managed_alloc = NULL;
8355 /* FIXME: Decompose later to help abcrem */
8358 args [0] = emit_get_rgctx_klass (cfg, context_used,
8359 array_class, MONO_RGCTX_INFO_VTABLE);
8364 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8366 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8368 if (cfg->opt & MONO_OPT_SHARED) {
8369 /* Decompose now to avoid problems with references to the domainvar */
8370 MonoInst *iargs [3];
8372 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8373 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8376 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8378 /* Decompose later since it is needed by abcrem */
8379 MonoClass *array_type = mono_array_class_get (klass, 1);
8380 mono_class_vtable (cfg->domain, array_type);
8381 CHECK_TYPELOAD (array_type);
8383 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8384 ins->dreg = alloc_preg (cfg);
8385 ins->sreg1 = sp [0]->dreg;
8386 ins->inst_newa_class = klass;
8387 ins->type = STACK_OBJ;
8389 MONO_ADD_INS (cfg->cbb, ins);
8390 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8391 cfg->cbb->has_array_access = TRUE;
8393 /* Needed so mono_emit_load_get_addr () gets called */
8394 mono_get_got_var (cfg);
8404 * we inline/optimize the initialization sequence if possible.
8405 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8406 * for small sizes open code the memcpy
8407 * ensure the rva field is big enough
8409 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8410 MonoMethod *memcpy_method = get_memcpy_method ();
8411 MonoInst *iargs [3];
8412 int add_reg = alloc_preg (cfg);
8414 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8415 if (cfg->compile_aot) {
8416 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8418 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8420 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8421 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8430 if (sp [0]->type != STACK_OBJ)
8433 dreg = alloc_preg (cfg);
8434 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8435 ins->dreg = alloc_preg (cfg);
8436 ins->sreg1 = sp [0]->dreg;
8437 ins->type = STACK_I4;
8438 MONO_ADD_INS (cfg->cbb, ins);
8439 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8440 cfg->cbb->has_array_access = TRUE;
8448 if (sp [0]->type != STACK_OBJ)
8451 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8453 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8454 CHECK_TYPELOAD (klass);
8455 /* we need to make sure that this array is exactly the type it needs
8456 * to be for correctness. the wrappers are lax with their usage
8457 * so we need to ignore them here
8459 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8460 MonoClass *array_class = mono_array_class_get (klass, 1);
8461 mini_emit_check_array_type (cfg, sp [0], array_class);
8462 CHECK_TYPELOAD (array_class);
8466 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8481 case CEE_LDELEM_REF: {
8487 if (*ip == CEE_LDELEM) {
8489 token = read32 (ip + 1);
8490 klass = mini_get_class (method, token, generic_context);
8491 CHECK_TYPELOAD (klass);
8492 mono_class_init (klass);
8495 klass = array_access_to_klass (*ip);
8497 if (sp [0]->type != STACK_OBJ)
8500 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8502 if (sp [1]->opcode == OP_ICONST) {
8503 int array_reg = sp [0]->dreg;
8504 int index_reg = sp [1]->dreg;
8505 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8507 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8508 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8510 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8511 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8514 if (*ip == CEE_LDELEM)
8527 case CEE_STELEM_REF:
8534 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8536 if (*ip == CEE_STELEM) {
8538 token = read32 (ip + 1);
8539 klass = mini_get_class (method, token, generic_context);
8540 CHECK_TYPELOAD (klass);
8541 mono_class_init (klass);
8544 klass = array_access_to_klass (*ip);
8546 if (sp [0]->type != STACK_OBJ)
8549 /* storing a NULL doesn't need any of the complex checks in stelemref */
8550 if (generic_class_is_reference_type (cfg, klass) &&
8551 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8552 MonoMethod* helper = mono_marshal_get_stelemref ();
8553 MonoInst *iargs [3];
8555 if (sp [0]->type != STACK_OBJ)
8557 if (sp [2]->type != STACK_OBJ)
8564 mono_emit_method_call (cfg, helper, iargs, NULL);
8566 if (sp [1]->opcode == OP_ICONST) {
8567 int array_reg = sp [0]->dreg;
8568 int index_reg = sp [1]->dreg;
8569 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8571 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8572 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8574 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8575 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8579 if (*ip == CEE_STELEM)
8586 case CEE_CKFINITE: {
8590 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8591 ins->sreg1 = sp [0]->dreg;
8592 ins->dreg = alloc_freg (cfg);
8593 ins->type = STACK_R8;
8594 MONO_ADD_INS (bblock, ins);
8596 *sp++ = mono_decompose_opcode (cfg, ins);
8601 case CEE_REFANYVAL: {
8602 MonoInst *src_var, *src;
8604 int klass_reg = alloc_preg (cfg);
8605 int dreg = alloc_preg (cfg);
8608 MONO_INST_NEW (cfg, ins, *ip);
8611 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8612 CHECK_TYPELOAD (klass);
8613 mono_class_init (klass);
8615 if (cfg->generic_sharing_context)
8616 context_used = mono_class_check_context_used (klass);
8619 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8621 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8622 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8623 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8626 MonoInst *klass_ins;
8628 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8629 klass, MONO_RGCTX_INFO_KLASS);
8632 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8633 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8635 mini_emit_class_check (cfg, klass_reg, klass);
8637 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8638 ins->type = STACK_MP;
8643 case CEE_MKREFANY: {
8644 MonoInst *loc, *addr;
8647 MONO_INST_NEW (cfg, ins, *ip);
8650 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8651 CHECK_TYPELOAD (klass);
8652 mono_class_init (klass);
8654 if (cfg->generic_sharing_context)
8655 context_used = mono_class_check_context_used (klass);
8657 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8658 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8661 MonoInst *const_ins;
8662 int type_reg = alloc_preg (cfg);
8664 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8665 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8666 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8667 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8668 } else if (cfg->compile_aot) {
8669 int const_reg = alloc_preg (cfg);
8670 int type_reg = alloc_preg (cfg);
8672 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8673 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8674 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8677 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8678 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8682 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8683 ins->type = STACK_VTYPE;
8684 ins->klass = mono_defaults.typed_reference_class;
8691 MonoClass *handle_class;
8693 CHECK_STACK_OVF (1);
8696 n = read32 (ip + 1);
8698 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8699 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8700 handle = mono_method_get_wrapper_data (method, n);
8701 handle_class = mono_method_get_wrapper_data (method, n + 1);
8702 if (handle_class == mono_defaults.typehandle_class)
8703 handle = &((MonoClass*)handle)->byval_arg;
8706 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8710 mono_class_init (handle_class);
8711 if (cfg->generic_sharing_context) {
8712 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8713 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8714 /* This case handles ldtoken
8715 of an open type, like for
8718 } else if (handle_class == mono_defaults.typehandle_class) {
8719 /* If we get a MONO_TYPE_CLASS
8720 then we need to provide the
8722 instantiation of it. */
8723 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8726 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8727 } else if (handle_class == mono_defaults.fieldhandle_class)
8728 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8729 else if (handle_class == mono_defaults.methodhandle_class)
8730 context_used = mono_method_check_context_used (handle);
8732 g_assert_not_reached ();
8735 if ((cfg->opt & MONO_OPT_SHARED) &&
8736 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8737 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8738 MonoInst *addr, *vtvar, *iargs [3];
8739 int method_context_used;
8741 if (cfg->generic_sharing_context)
8742 method_context_used = mono_method_check_context_used (method);
8744 method_context_used = 0;
8746 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8748 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8749 EMIT_NEW_ICONST (cfg, iargs [1], n);
8750 if (method_context_used) {
8751 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8752 method, MONO_RGCTX_INFO_METHOD);
8753 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8755 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8756 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8758 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8760 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8762 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8764 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8765 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8766 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8767 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8768 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8769 MonoClass *tclass = mono_class_from_mono_type (handle);
8771 mono_class_init (tclass);
8773 ins = emit_get_rgctx_klass (cfg, context_used,
8774 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8775 } else if (cfg->compile_aot) {
8776 if (method->wrapper_type) {
8777 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8778 /* Special case for static synchronized wrappers */
8779 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8781 /* FIXME: n is not a normal token */
8782 cfg->disable_aot = TRUE;
8783 EMIT_NEW_PCONST (cfg, ins, NULL);
8786 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8789 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8791 ins->type = STACK_OBJ;
8792 ins->klass = cmethod->klass;
8795 MonoInst *addr, *vtvar;
8797 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8800 if (handle_class == mono_defaults.typehandle_class) {
8801 ins = emit_get_rgctx_klass (cfg, context_used,
8802 mono_class_from_mono_type (handle),
8803 MONO_RGCTX_INFO_TYPE);
8804 } else if (handle_class == mono_defaults.methodhandle_class) {
8805 ins = emit_get_rgctx_method (cfg, context_used,
8806 handle, MONO_RGCTX_INFO_METHOD);
8807 } else if (handle_class == mono_defaults.fieldhandle_class) {
8808 ins = emit_get_rgctx_field (cfg, context_used,
8809 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8811 g_assert_not_reached ();
8813 } else if (cfg->compile_aot) {
8814 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8816 EMIT_NEW_PCONST (cfg, ins, handle);
8818 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8819 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8820 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8830 MONO_INST_NEW (cfg, ins, OP_THROW);
8832 ins->sreg1 = sp [0]->dreg;
8834 bblock->out_of_line = TRUE;
8835 MONO_ADD_INS (bblock, ins);
8836 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8837 MONO_ADD_INS (bblock, ins);
8840 link_bblock (cfg, bblock, end_bblock);
8841 start_new_bblock = 1;
8843 case CEE_ENDFINALLY:
8844 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8845 MONO_ADD_INS (bblock, ins);
8847 start_new_bblock = 1;
8850 * Control will leave the method so empty the stack, otherwise
8851 * the next basic block will start with a nonempty stack.
8853 while (sp != stack_start) {
8861 if (*ip == CEE_LEAVE) {
8863 target = ip + 5 + (gint32)read32(ip + 1);
8866 target = ip + 2 + (signed char)(ip [1]);
8869 /* empty the stack */
8870 while (sp != stack_start) {
8875 * If this leave statement is in a catch block, check for a
8876 * pending exception, and rethrow it if necessary.
8877 * We avoid doing this in runtime invoke wrappers, since those are called
8878 * by native code which excepts the wrapper to catch all exceptions.
8880 for (i = 0; i < header->num_clauses; ++i) {
8881 MonoExceptionClause *clause = &header->clauses [i];
8884 * Use <= in the final comparison to handle clauses with multiple
8885 * leave statements, like in bug #78024.
8886 * The ordering of the exception clauses guarantees that we find the
8889 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8891 MonoBasicBlock *dont_throw;
8896 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8899 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8901 NEW_BBLOCK (cfg, dont_throw);
8904 * Currently, we allways rethrow the abort exception, despite the
8905 * fact that this is not correct. See thread6.cs for an example.
8906 * But propagating the abort exception is more important than
8907 * getting the sematics right.
8909 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8910 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8911 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8913 MONO_START_BB (cfg, dont_throw);
8918 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8920 MonoExceptionClause *clause;
8922 for (tmp = handlers; tmp; tmp = tmp->next) {
8924 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
8926 link_bblock (cfg, bblock, tblock);
8927 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8928 ins->inst_target_bb = tblock;
8929 ins->inst_eh_block = clause;
8930 MONO_ADD_INS (bblock, ins);
8931 bblock->has_call_handler = 1;
8932 if (COMPILE_LLVM (cfg)) {
8933 MonoBasicBlock *target_bb;
8936 * Link the finally bblock with the target, since it will
8937 * conceptually branch there.
8938 * FIXME: Have to link the bblock containing the endfinally.
8940 GET_BBLOCK (cfg, target_bb, target);
8941 link_bblock (cfg, tblock, target_bb);
8944 g_list_free (handlers);
8947 MONO_INST_NEW (cfg, ins, OP_BR);
8948 MONO_ADD_INS (bblock, ins);
8949 GET_BBLOCK (cfg, tblock, target);
8950 link_bblock (cfg, bblock, tblock);
8951 ins->inst_target_bb = tblock;
8952 start_new_bblock = 1;
8954 if (*ip == CEE_LEAVE)
8963 * Mono specific opcodes
8965 case MONO_CUSTOM_PREFIX: {
8967 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8971 case CEE_MONO_ICALL: {
8973 MonoJitICallInfo *info;
8975 token = read32 (ip + 2);
8976 func = mono_method_get_wrapper_data (method, token);
8977 info = mono_find_jit_icall_by_addr (func);
8980 CHECK_STACK (info->sig->param_count);
8981 sp -= info->sig->param_count;
8983 ins = mono_emit_jit_icall (cfg, info->func, sp);
8984 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8988 inline_costs += 10 * num_calls++;
8992 case CEE_MONO_LDPTR: {
8995 CHECK_STACK_OVF (1);
8997 token = read32 (ip + 2);
8999 ptr = mono_method_get_wrapper_data (method, token);
9000 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9001 MonoJitICallInfo *callinfo;
9002 const char *icall_name;
9004 icall_name = method->name + strlen ("__icall_wrapper_");
9005 g_assert (icall_name);
9006 callinfo = mono_find_jit_icall_by_name (icall_name);
9007 g_assert (callinfo);
9009 if (ptr == callinfo->func) {
9010 /* Will be transformed into an AOTCONST later */
9011 EMIT_NEW_PCONST (cfg, ins, ptr);
9017 /* FIXME: Generalize this */
9018 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9019 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9024 EMIT_NEW_PCONST (cfg, ins, ptr);
9027 inline_costs += 10 * num_calls++;
9028 /* Can't embed random pointers into AOT code */
9029 cfg->disable_aot = 1;
9032 case CEE_MONO_ICALL_ADDR: {
9033 MonoMethod *cmethod;
9036 CHECK_STACK_OVF (1);
9038 token = read32 (ip + 2);
9040 cmethod = mono_method_get_wrapper_data (method, token);
9042 if (cfg->compile_aot) {
9043 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9045 ptr = mono_lookup_internal_call (cmethod);
9047 EMIT_NEW_PCONST (cfg, ins, ptr);
9053 case CEE_MONO_VTADDR: {
9054 MonoInst *src_var, *src;
9060 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9061 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9066 case CEE_MONO_NEWOBJ: {
9067 MonoInst *iargs [2];
9069 CHECK_STACK_OVF (1);
9071 token = read32 (ip + 2);
9072 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9073 mono_class_init (klass);
9074 NEW_DOMAINCONST (cfg, iargs [0]);
9075 MONO_ADD_INS (cfg->cbb, iargs [0]);
9076 NEW_CLASSCONST (cfg, iargs [1], klass);
9077 MONO_ADD_INS (cfg->cbb, iargs [1]);
9078 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9080 inline_costs += 10 * num_calls++;
9083 case CEE_MONO_OBJADDR:
9086 MONO_INST_NEW (cfg, ins, OP_MOVE);
9087 ins->dreg = alloc_preg (cfg);
9088 ins->sreg1 = sp [0]->dreg;
9089 ins->type = STACK_MP;
9090 MONO_ADD_INS (cfg->cbb, ins);
9094 case CEE_MONO_LDNATIVEOBJ:
9096 * Similar to LDOBJ, but instead load the unmanaged
9097 * representation of the vtype to the stack.
9102 token = read32 (ip + 2);
9103 klass = mono_method_get_wrapper_data (method, token);
9104 g_assert (klass->valuetype);
9105 mono_class_init (klass);
9108 MonoInst *src, *dest, *temp;
9111 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9112 temp->backend.is_pinvoke = 1;
9113 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9114 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9116 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9117 dest->type = STACK_VTYPE;
9118 dest->klass = klass;
9124 case CEE_MONO_RETOBJ: {
9126 * Same as RET, but return the native representation of a vtype
9129 g_assert (cfg->ret);
9130 g_assert (mono_method_signature (method)->pinvoke);
9135 token = read32 (ip + 2);
9136 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9138 if (!cfg->vret_addr) {
9139 g_assert (cfg->ret_var_is_local);
9141 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9143 EMIT_NEW_RETLOADA (cfg, ins);
9145 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9147 if (sp != stack_start)
9150 MONO_INST_NEW (cfg, ins, OP_BR);
9151 ins->inst_target_bb = end_bblock;
9152 MONO_ADD_INS (bblock, ins);
9153 link_bblock (cfg, bblock, end_bblock);
9154 start_new_bblock = 1;
9158 case CEE_MONO_CISINST:
9159 case CEE_MONO_CCASTCLASS: {
9164 token = read32 (ip + 2);
9165 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9166 if (ip [1] == CEE_MONO_CISINST)
9167 ins = handle_cisinst (cfg, klass, sp [0]);
9169 ins = handle_ccastclass (cfg, klass, sp [0]);
9175 case CEE_MONO_SAVE_LMF:
9176 case CEE_MONO_RESTORE_LMF:
9177 #ifdef MONO_ARCH_HAVE_LMF_OPS
9178 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9179 MONO_ADD_INS (bblock, ins);
9180 cfg->need_lmf_area = TRUE;
9184 case CEE_MONO_CLASSCONST:
9185 CHECK_STACK_OVF (1);
9187 token = read32 (ip + 2);
9188 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9191 inline_costs += 10 * num_calls++;
9193 case CEE_MONO_NOT_TAKEN:
9194 bblock->out_of_line = TRUE;
9198 CHECK_STACK_OVF (1);
9200 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9201 ins->dreg = alloc_preg (cfg);
9202 ins->inst_offset = (gint32)read32 (ip + 2);
9203 ins->type = STACK_PTR;
9204 MONO_ADD_INS (bblock, ins);
9208 case CEE_MONO_DYN_CALL: {
9211 /* It would be easier to call a trampoline, but that would put an
9212 * extra frame on the stack, confusing exception handling. So
9213 * implement it inline using an opcode for now.
9216 if (!cfg->dyn_call_var) {
9217 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9218 /* prevent it from being register allocated */
9219 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9222 /* Has to use a call inst since it local regalloc expects it */
9223 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9224 ins = (MonoInst*)call;
9226 ins->sreg1 = sp [0]->dreg;
9227 ins->sreg2 = sp [1]->dreg;
9228 MONO_ADD_INS (bblock, ins);
9230 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9231 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9235 inline_costs += 10 * num_calls++;
9240 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9250 /* somewhat similar to LDTOKEN */
9251 MonoInst *addr, *vtvar;
9252 CHECK_STACK_OVF (1);
9253 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9255 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9256 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9258 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9259 ins->type = STACK_VTYPE;
9260 ins->klass = mono_defaults.argumenthandle_class;
9273 * The following transforms:
9274 * CEE_CEQ into OP_CEQ
9275 * CEE_CGT into OP_CGT
9276 * CEE_CGT_UN into OP_CGT_UN
9277 * CEE_CLT into OP_CLT
9278 * CEE_CLT_UN into OP_CLT_UN
9280 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9282 MONO_INST_NEW (cfg, ins, cmp->opcode);
9284 cmp->sreg1 = sp [0]->dreg;
9285 cmp->sreg2 = sp [1]->dreg;
9286 type_from_op (cmp, sp [0], sp [1]);
9288 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9289 cmp->opcode = OP_LCOMPARE;
9290 else if (sp [0]->type == STACK_R8)
9291 cmp->opcode = OP_FCOMPARE;
9293 cmp->opcode = OP_ICOMPARE;
9294 MONO_ADD_INS (bblock, cmp);
9295 ins->type = STACK_I4;
9296 ins->dreg = alloc_dreg (cfg, ins->type);
9297 type_from_op (ins, sp [0], sp [1]);
9299 if (cmp->opcode == OP_FCOMPARE) {
9301 * The backends expect the fceq opcodes to do the
9304 cmp->opcode = OP_NOP;
9305 ins->sreg1 = cmp->sreg1;
9306 ins->sreg2 = cmp->sreg2;
9308 MONO_ADD_INS (bblock, ins);
9315 MonoMethod *cil_method;
9316 gboolean needs_static_rgctx_invoke;
9318 CHECK_STACK_OVF (1);
9320 n = read32 (ip + 2);
9321 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9324 mono_class_init (cmethod->klass);
9326 mono_save_token_info (cfg, image, n, cmethod);
9328 if (cfg->generic_sharing_context)
9329 context_used = mono_method_check_context_used (cmethod);
9331 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9333 cil_method = cmethod;
9334 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9335 METHOD_ACCESS_FAILURE;
9337 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9338 if (check_linkdemand (cfg, method, cmethod))
9340 CHECK_CFG_EXCEPTION;
9341 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9342 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9346 * Optimize the common case of ldftn+delegate creation
9348 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9349 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9350 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9352 int invoke_context_used = 0;
9354 invoke = mono_get_delegate_invoke (ctor_method->klass);
9355 if (!invoke || !mono_method_signature (invoke))
9358 if (cfg->generic_sharing_context)
9359 invoke_context_used = mono_method_check_context_used (invoke);
9361 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9362 /* FIXME: SGEN support */
9363 if (invoke_context_used == 0) {
9364 MonoInst *target_ins;
9367 if (cfg->verbose_level > 3)
9368 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9369 target_ins = sp [-1];
9371 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9372 CHECK_CFG_EXCEPTION;
9381 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9382 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9386 inline_costs += 10 * num_calls++;
9389 case CEE_LDVIRTFTN: {
9394 n = read32 (ip + 2);
9395 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9398 mono_class_init (cmethod->klass);
9400 if (cfg->generic_sharing_context)
9401 context_used = mono_method_check_context_used (cmethod);
9403 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9404 if (check_linkdemand (cfg, method, cmethod))
9406 CHECK_CFG_EXCEPTION;
9407 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9408 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9414 args [1] = emit_get_rgctx_method (cfg, context_used,
9415 cmethod, MONO_RGCTX_INFO_METHOD);
9418 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9420 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9423 inline_costs += 10 * num_calls++;
9427 CHECK_STACK_OVF (1);
9429 n = read16 (ip + 2);
9431 EMIT_NEW_ARGLOAD (cfg, ins, n);
9436 CHECK_STACK_OVF (1);
9438 n = read16 (ip + 2);
9440 NEW_ARGLOADA (cfg, ins, n);
9441 MONO_ADD_INS (cfg->cbb, ins);
9449 n = read16 (ip + 2);
9451 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9453 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9457 CHECK_STACK_OVF (1);
9459 n = read16 (ip + 2);
9461 EMIT_NEW_LOCLOAD (cfg, ins, n);
9466 unsigned char *tmp_ip;
9467 CHECK_STACK_OVF (1);
9469 n = read16 (ip + 2);
9472 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9478 EMIT_NEW_LOCLOADA (cfg, ins, n);
9487 n = read16 (ip + 2);
9489 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9491 emit_stloc_ir (cfg, sp, header, n);
9498 if (sp != stack_start)
9500 if (cfg->method != method)
9502 * Inlining this into a loop in a parent could lead to
9503 * stack overflows which is different behavior than the
9504 * non-inlined case, thus disable inlining in this case.
9506 goto inline_failure;
9508 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9509 ins->dreg = alloc_preg (cfg);
9510 ins->sreg1 = sp [0]->dreg;
9511 ins->type = STACK_PTR;
9512 MONO_ADD_INS (cfg->cbb, ins);
9514 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9516 ins->flags |= MONO_INST_INIT;
9521 case CEE_ENDFILTER: {
9522 MonoExceptionClause *clause, *nearest;
9523 int cc, nearest_num;
9527 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9529 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9530 ins->sreg1 = (*sp)->dreg;
9531 MONO_ADD_INS (bblock, ins);
9532 start_new_bblock = 1;
9537 for (cc = 0; cc < header->num_clauses; ++cc) {
9538 clause = &header->clauses [cc];
9539 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9540 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9541 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9547 if ((ip - header->code) != nearest->handler_offset)
9552 case CEE_UNALIGNED_:
9553 ins_flag |= MONO_INST_UNALIGNED;
9554 /* FIXME: record alignment? we can assume 1 for now */
9559 ins_flag |= MONO_INST_VOLATILE;
9563 ins_flag |= MONO_INST_TAILCALL;
9564 cfg->flags |= MONO_CFG_HAS_TAIL;
9565 /* Can't inline tail calls at this time */
9566 inline_costs += 100000;
9573 token = read32 (ip + 2);
9574 klass = mini_get_class (method, token, generic_context);
9575 CHECK_TYPELOAD (klass);
9576 if (generic_class_is_reference_type (cfg, klass))
9577 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9579 mini_emit_initobj (cfg, *sp, NULL, klass);
9583 case CEE_CONSTRAINED_:
9585 token = read32 (ip + 2);
9586 if (method->wrapper_type != MONO_WRAPPER_NONE)
9587 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9589 constrained_call = mono_class_get_full (image, token, generic_context);
9590 CHECK_TYPELOAD (constrained_call);
9595 MonoInst *iargs [3];
9599 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9600 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9601 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9602 /* emit_memset only works when val == 0 */
9603 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9608 if (ip [1] == CEE_CPBLK) {
9609 MonoMethod *memcpy_method = get_memcpy_method ();
9610 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9612 MonoMethod *memset_method = get_memset_method ();
9613 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9623 ins_flag |= MONO_INST_NOTYPECHECK;
9625 ins_flag |= MONO_INST_NORANGECHECK;
9626 /* we ignore the no-nullcheck for now since we
9627 * really do it explicitly only when doing callvirt->call
9633 int handler_offset = -1;
9635 for (i = 0; i < header->num_clauses; ++i) {
9636 MonoExceptionClause *clause = &header->clauses [i];
9637 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9638 handler_offset = clause->handler_offset;
9643 bblock->flags |= BB_EXCEPTION_UNSAFE;
9645 g_assert (handler_offset != -1);
9647 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9648 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9649 ins->sreg1 = load->dreg;
9650 MONO_ADD_INS (bblock, ins);
9652 link_bblock (cfg, bblock, end_bblock);
9653 start_new_bblock = 1;
9661 CHECK_STACK_OVF (1);
9663 token = read32 (ip + 2);
9664 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9665 MonoType *type = mono_type_create_from_typespec (image, token);
9666 token = mono_type_size (type, &ialign);
9668 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9669 CHECK_TYPELOAD (klass);
9670 mono_class_init (klass);
9671 token = mono_class_value_size (klass, &align);
9673 EMIT_NEW_ICONST (cfg, ins, token);
9678 case CEE_REFANYTYPE: {
9679 MonoInst *src_var, *src;
9685 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9687 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9688 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9689 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9707 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9717 g_warning ("opcode 0x%02x not handled", *ip);
9721 if (start_new_bblock != 1)
9724 bblock->cil_length = ip - bblock->cil_code;
9725 bblock->next_bb = end_bblock;
9727 if (cfg->method == method && cfg->domainvar) {
9729 MonoInst *get_domain;
9731 cfg->cbb = init_localsbb;
9733 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9734 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9737 get_domain->dreg = alloc_preg (cfg);
9738 MONO_ADD_INS (cfg->cbb, get_domain);
9740 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9741 MONO_ADD_INS (cfg->cbb, store);
9744 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9745 if (cfg->compile_aot)
9746 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9747 mono_get_got_var (cfg);
9750 if (cfg->method == method && cfg->got_var)
9751 mono_emit_load_got_addr (cfg);
9756 cfg->cbb = init_localsbb;
9758 for (i = 0; i < header->num_locals; ++i) {
9759 MonoType *ptype = header->locals [i];
9760 int t = ptype->type;
9761 dreg = cfg->locals [i]->dreg;
9763 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9764 t = mono_class_enum_basetype (ptype->data.klass)->type;
9766 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9767 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9768 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9769 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9770 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9771 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9772 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9773 ins->type = STACK_R8;
9774 ins->inst_p0 = (void*)&r8_0;
9775 ins->dreg = alloc_dreg (cfg, STACK_R8);
9776 MONO_ADD_INS (init_localsbb, ins);
9777 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9778 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9779 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9780 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9782 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9787 if (cfg->init_ref_vars && cfg->method == method) {
9788 /* Emit initialization for ref vars */
9789 // FIXME: Avoid duplication initialization for IL locals.
9790 for (i = 0; i < cfg->num_varinfo; ++i) {
9791 MonoInst *ins = cfg->varinfo [i];
9793 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9794 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9798 /* Add a sequence point for method entry/exit events */
9800 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9801 MONO_ADD_INS (init_localsbb, ins);
9802 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9803 MONO_ADD_INS (cfg->bb_exit, ins);
9808 if (cfg->method == method) {
9810 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9811 bb->region = mono_find_block_region (cfg, bb->real_offset);
9813 mono_create_spvar_for_region (cfg, bb->region);
9814 if (cfg->verbose_level > 2)
9815 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9819 g_slist_free (class_inits);
9820 dont_inline = g_list_remove (dont_inline, method);
9822 if (inline_costs < 0) {
9825 /* Method is too large */
9826 mname = mono_method_full_name (method, TRUE);
9827 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9828 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9830 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9831 mono_basic_block_free (original_bb);
9835 if ((cfg->verbose_level > 2) && (cfg->method == method))
9836 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9838 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9839 mono_basic_block_free (original_bb);
9840 return inline_costs;
9843 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9850 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9854 set_exception_type_from_invalid_il (cfg, method, ip);
9858 g_slist_free (class_inits);
9859 mono_basic_block_free (original_bb);
9860 dont_inline = g_list_remove (dont_inline, method);
9861 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9866 store_membase_reg_to_store_membase_imm (int opcode)
9869 case OP_STORE_MEMBASE_REG:
9870 return OP_STORE_MEMBASE_IMM;
9871 case OP_STOREI1_MEMBASE_REG:
9872 return OP_STOREI1_MEMBASE_IMM;
9873 case OP_STOREI2_MEMBASE_REG:
9874 return OP_STOREI2_MEMBASE_IMM;
9875 case OP_STOREI4_MEMBASE_REG:
9876 return OP_STOREI4_MEMBASE_IMM;
9877 case OP_STOREI8_MEMBASE_REG:
9878 return OP_STOREI8_MEMBASE_IMM;
9880 g_assert_not_reached ();
9886 #endif /* DISABLE_JIT */
9889 mono_op_to_op_imm (int opcode)
9899 return OP_IDIV_UN_IMM;
9903 return OP_IREM_UN_IMM;
9917 return OP_ISHR_UN_IMM;
9934 return OP_LSHR_UN_IMM;
9937 return OP_COMPARE_IMM;
9939 return OP_ICOMPARE_IMM;
9941 return OP_LCOMPARE_IMM;
9943 case OP_STORE_MEMBASE_REG:
9944 return OP_STORE_MEMBASE_IMM;
9945 case OP_STOREI1_MEMBASE_REG:
9946 return OP_STOREI1_MEMBASE_IMM;
9947 case OP_STOREI2_MEMBASE_REG:
9948 return OP_STOREI2_MEMBASE_IMM;
9949 case OP_STOREI4_MEMBASE_REG:
9950 return OP_STOREI4_MEMBASE_IMM;
9952 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9954 return OP_X86_PUSH_IMM;
9955 case OP_X86_COMPARE_MEMBASE_REG:
9956 return OP_X86_COMPARE_MEMBASE_IMM;
9958 #if defined(TARGET_AMD64)
9959 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9960 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9962 case OP_VOIDCALL_REG:
9971 return OP_LOCALLOC_IMM;
9978 ldind_to_load_membase (int opcode)
9982 return OP_LOADI1_MEMBASE;
9984 return OP_LOADU1_MEMBASE;
9986 return OP_LOADI2_MEMBASE;
9988 return OP_LOADU2_MEMBASE;
9990 return OP_LOADI4_MEMBASE;
9992 return OP_LOADU4_MEMBASE;
9994 return OP_LOAD_MEMBASE;
9996 return OP_LOAD_MEMBASE;
9998 return OP_LOADI8_MEMBASE;
10000 return OP_LOADR4_MEMBASE;
10002 return OP_LOADR8_MEMBASE;
10004 g_assert_not_reached ();
10011 stind_to_store_membase (int opcode)
10015 return OP_STOREI1_MEMBASE_REG;
10017 return OP_STOREI2_MEMBASE_REG;
10019 return OP_STOREI4_MEMBASE_REG;
10021 case CEE_STIND_REF:
10022 return OP_STORE_MEMBASE_REG;
10024 return OP_STOREI8_MEMBASE_REG;
10026 return OP_STORER4_MEMBASE_REG;
10028 return OP_STORER8_MEMBASE_REG;
10030 g_assert_not_reached ();
10037 mono_load_membase_to_load_mem (int opcode)
10039 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10040 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10042 case OP_LOAD_MEMBASE:
10043 return OP_LOAD_MEM;
10044 case OP_LOADU1_MEMBASE:
10045 return OP_LOADU1_MEM;
10046 case OP_LOADU2_MEMBASE:
10047 return OP_LOADU2_MEM;
10048 case OP_LOADI4_MEMBASE:
10049 return OP_LOADI4_MEM;
10050 case OP_LOADU4_MEMBASE:
10051 return OP_LOADU4_MEM;
10052 #if SIZEOF_REGISTER == 8
10053 case OP_LOADI8_MEMBASE:
10054 return OP_LOADI8_MEM;
10063 op_to_op_dest_membase (int store_opcode, int opcode)
10065 #if defined(TARGET_X86)
10066 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10071 return OP_X86_ADD_MEMBASE_REG;
10073 return OP_X86_SUB_MEMBASE_REG;
10075 return OP_X86_AND_MEMBASE_REG;
10077 return OP_X86_OR_MEMBASE_REG;
10079 return OP_X86_XOR_MEMBASE_REG;
10082 return OP_X86_ADD_MEMBASE_IMM;
10085 return OP_X86_SUB_MEMBASE_IMM;
10088 return OP_X86_AND_MEMBASE_IMM;
10091 return OP_X86_OR_MEMBASE_IMM;
10094 return OP_X86_XOR_MEMBASE_IMM;
10100 #if defined(TARGET_AMD64)
10101 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10106 return OP_X86_ADD_MEMBASE_REG;
10108 return OP_X86_SUB_MEMBASE_REG;
10110 return OP_X86_AND_MEMBASE_REG;
10112 return OP_X86_OR_MEMBASE_REG;
10114 return OP_X86_XOR_MEMBASE_REG;
10116 return OP_X86_ADD_MEMBASE_IMM;
10118 return OP_X86_SUB_MEMBASE_IMM;
10120 return OP_X86_AND_MEMBASE_IMM;
10122 return OP_X86_OR_MEMBASE_IMM;
10124 return OP_X86_XOR_MEMBASE_IMM;
10126 return OP_AMD64_ADD_MEMBASE_REG;
10128 return OP_AMD64_SUB_MEMBASE_REG;
10130 return OP_AMD64_AND_MEMBASE_REG;
10132 return OP_AMD64_OR_MEMBASE_REG;
10134 return OP_AMD64_XOR_MEMBASE_REG;
10137 return OP_AMD64_ADD_MEMBASE_IMM;
10140 return OP_AMD64_SUB_MEMBASE_IMM;
10143 return OP_AMD64_AND_MEMBASE_IMM;
10146 return OP_AMD64_OR_MEMBASE_IMM;
10149 return OP_AMD64_XOR_MEMBASE_IMM;
10159 op_to_op_store_membase (int store_opcode, int opcode)
10161 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10164 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10165 return OP_X86_SETEQ_MEMBASE;
10167 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10168 return OP_X86_SETNE_MEMBASE;
10176 op_to_op_src1_membase (int load_opcode, int opcode)
10179 /* FIXME: This has sign extension issues */
10181 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10182 return OP_X86_COMPARE_MEMBASE8_IMM;
10185 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10190 return OP_X86_PUSH_MEMBASE;
10191 case OP_COMPARE_IMM:
10192 case OP_ICOMPARE_IMM:
10193 return OP_X86_COMPARE_MEMBASE_IMM;
10196 return OP_X86_COMPARE_MEMBASE_REG;
10200 #ifdef TARGET_AMD64
10201 /* FIXME: This has sign extension issues */
10203 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10204 return OP_X86_COMPARE_MEMBASE8_IMM;
10209 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10210 return OP_X86_PUSH_MEMBASE;
10212 /* FIXME: This only works for 32 bit immediates
10213 case OP_COMPARE_IMM:
10214 case OP_LCOMPARE_IMM:
10215 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10216 return OP_AMD64_COMPARE_MEMBASE_IMM;
10218 case OP_ICOMPARE_IMM:
10219 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10220 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10224 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10225 return OP_AMD64_COMPARE_MEMBASE_REG;
10228 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10229 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10238 op_to_op_src2_membase (int load_opcode, int opcode)
10241 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10247 return OP_X86_COMPARE_REG_MEMBASE;
10249 return OP_X86_ADD_REG_MEMBASE;
10251 return OP_X86_SUB_REG_MEMBASE;
10253 return OP_X86_AND_REG_MEMBASE;
10255 return OP_X86_OR_REG_MEMBASE;
10257 return OP_X86_XOR_REG_MEMBASE;
10261 #ifdef TARGET_AMD64
10264 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10265 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10269 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10270 return OP_AMD64_COMPARE_REG_MEMBASE;
10273 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10274 return OP_X86_ADD_REG_MEMBASE;
10276 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10277 return OP_X86_SUB_REG_MEMBASE;
10279 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10280 return OP_X86_AND_REG_MEMBASE;
10282 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10283 return OP_X86_OR_REG_MEMBASE;
10285 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10286 return OP_X86_XOR_REG_MEMBASE;
10288 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10289 return OP_AMD64_ADD_REG_MEMBASE;
10291 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10292 return OP_AMD64_SUB_REG_MEMBASE;
10294 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10295 return OP_AMD64_AND_REG_MEMBASE;
10297 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10298 return OP_AMD64_OR_REG_MEMBASE;
10300 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10301 return OP_AMD64_XOR_REG_MEMBASE;
10309 mono_op_to_op_imm_noemul (int opcode)
10312 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10318 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10326 return mono_op_to_op_imm (opcode);
10330 #ifndef DISABLE_JIT
10333 * mono_handle_global_vregs:
10335 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10339 mono_handle_global_vregs (MonoCompile *cfg)
10341 gint32 *vreg_to_bb;
10342 MonoBasicBlock *bb;
10345 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10347 #ifdef MONO_ARCH_SIMD_INTRINSICS
10348 if (cfg->uses_simd_intrinsics)
10349 mono_simd_simplify_indirection (cfg);
10352 /* Find local vregs used in more than one bb */
10353 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10354 MonoInst *ins = bb->code;
10355 int block_num = bb->block_num;
10357 if (cfg->verbose_level > 2)
10358 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10361 for (; ins; ins = ins->next) {
10362 const char *spec = INS_INFO (ins->opcode);
10363 int regtype = 0, regindex;
10366 if (G_UNLIKELY (cfg->verbose_level > 2))
10367 mono_print_ins (ins);
10369 g_assert (ins->opcode >= MONO_CEE_LAST);
10371 for (regindex = 0; regindex < 4; regindex ++) {
10374 if (regindex == 0) {
10375 regtype = spec [MONO_INST_DEST];
10376 if (regtype == ' ')
10379 } else if (regindex == 1) {
10380 regtype = spec [MONO_INST_SRC1];
10381 if (regtype == ' ')
10384 } else if (regindex == 2) {
10385 regtype = spec [MONO_INST_SRC2];
10386 if (regtype == ' ')
10389 } else if (regindex == 3) {
10390 regtype = spec [MONO_INST_SRC3];
10391 if (regtype == ' ')
10396 #if SIZEOF_REGISTER == 4
10397 /* In the LLVM case, the long opcodes are not decomposed */
10398 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10400 * Since some instructions reference the original long vreg,
10401 * and some reference the two component vregs, it is quite hard
10402 * to determine when it needs to be global. So be conservative.
10404 if (!get_vreg_to_inst (cfg, vreg)) {
10405 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10407 if (cfg->verbose_level > 2)
10408 printf ("LONG VREG R%d made global.\n", vreg);
10412 * Make the component vregs volatile since the optimizations can
10413 * get confused otherwise.
10415 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10416 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10420 g_assert (vreg != -1);
10422 prev_bb = vreg_to_bb [vreg];
10423 if (prev_bb == 0) {
10424 /* 0 is a valid block num */
10425 vreg_to_bb [vreg] = block_num + 1;
10426 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10427 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10430 if (!get_vreg_to_inst (cfg, vreg)) {
10431 if (G_UNLIKELY (cfg->verbose_level > 2))
10432 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10436 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10439 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10442 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10445 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10448 g_assert_not_reached ();
10452 /* Flag as having been used in more than one bb */
10453 vreg_to_bb [vreg] = -1;
10459 /* If a variable is used in only one bblock, convert it into a local vreg */
10460 for (i = 0; i < cfg->num_varinfo; i++) {
10461 MonoInst *var = cfg->varinfo [i];
10462 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10464 switch (var->type) {
10470 #if SIZEOF_REGISTER == 8
10473 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10474 /* Enabling this screws up the fp stack on x86 */
10477 /* Arguments are implicitly global */
10478 /* Putting R4 vars into registers doesn't work currently */
10479 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10481 * Make that the variable's liveness interval doesn't contain a call, since
10482 * that would cause the lvreg to be spilled, making the whole optimization
10485 /* This is too slow for JIT compilation */
10487 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10489 int def_index, call_index, ins_index;
10490 gboolean spilled = FALSE;
10495 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10496 const char *spec = INS_INFO (ins->opcode);
10498 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10499 def_index = ins_index;
10501 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10502 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10503 if (call_index > def_index) {
10509 if (MONO_IS_CALL (ins))
10510 call_index = ins_index;
10520 if (G_UNLIKELY (cfg->verbose_level > 2))
10521 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10522 var->flags |= MONO_INST_IS_DEAD;
10523 cfg->vreg_to_inst [var->dreg] = NULL;
10530 * Compress the varinfo and vars tables so the liveness computation is faster and
10531 * takes up less space.
10534 for (i = 0; i < cfg->num_varinfo; ++i) {
10535 MonoInst *var = cfg->varinfo [i];
10536 if (pos < i && cfg->locals_start == i)
10537 cfg->locals_start = pos;
10538 if (!(var->flags & MONO_INST_IS_DEAD)) {
10540 cfg->varinfo [pos] = cfg->varinfo [i];
10541 cfg->varinfo [pos]->inst_c0 = pos;
10542 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10543 cfg->vars [pos].idx = pos;
10544 #if SIZEOF_REGISTER == 4
10545 if (cfg->varinfo [pos]->type == STACK_I8) {
10546 /* Modify the two component vars too */
10549 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10550 var1->inst_c0 = pos;
10551 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10552 var1->inst_c0 = pos;
10559 cfg->num_varinfo = pos;
10560 if (cfg->locals_start > cfg->num_varinfo)
10561 cfg->locals_start = cfg->num_varinfo;
10565 * mono_spill_global_vars:
10567 * Generate spill code for variables which are not allocated to registers,
10568 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10569 * code is generated which could be optimized by the local optimization passes.
10572 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10574 MonoBasicBlock *bb;
10576 int orig_next_vreg;
10577 guint32 *vreg_to_lvreg;
10579 guint32 i, lvregs_len;
10580 gboolean dest_has_lvreg = FALSE;
10581 guint32 stacktypes [128];
10582 MonoInst **live_range_start, **live_range_end;
10583 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10585 *need_local_opts = FALSE;
10587 memset (spec2, 0, sizeof (spec2));
10589 /* FIXME: Move this function to mini.c */
10590 stacktypes ['i'] = STACK_PTR;
10591 stacktypes ['l'] = STACK_I8;
10592 stacktypes ['f'] = STACK_R8;
10593 #ifdef MONO_ARCH_SIMD_INTRINSICS
10594 stacktypes ['x'] = STACK_VTYPE;
10597 #if SIZEOF_REGISTER == 4
10598 /* Create MonoInsts for longs */
10599 for (i = 0; i < cfg->num_varinfo; i++) {
10600 MonoInst *ins = cfg->varinfo [i];
10602 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10603 switch (ins->type) {
10608 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10611 g_assert (ins->opcode == OP_REGOFFSET);
10613 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10615 tree->opcode = OP_REGOFFSET;
10616 tree->inst_basereg = ins->inst_basereg;
10617 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10619 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10621 tree->opcode = OP_REGOFFSET;
10622 tree->inst_basereg = ins->inst_basereg;
10623 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10633 /* FIXME: widening and truncation */
10636 * As an optimization, when a variable allocated to the stack is first loaded into
10637 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10638 * the variable again.
10640 orig_next_vreg = cfg->next_vreg;
10641 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10642 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10646 * These arrays contain the first and last instructions accessing a given
10648 * Since we emit bblocks in the same order we process them here, and we
10649 * don't split live ranges, these will precisely describe the live range of
10650 * the variable, i.e. the instruction range where a valid value can be found
10651 * in the variables location.
10652 * The live range is computed using the liveness info computed by the liveness pass.
10653 * We can't use vmv->range, since that is an abstract live range, and we need
10654 * one which is instruction precise.
10655 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10657 /* FIXME: Only do this if debugging info is requested */
10658 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10659 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10660 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10661 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10663 /* Add spill loads/stores */
10664 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10667 if (cfg->verbose_level > 2)
10668 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10670 /* Clear vreg_to_lvreg array */
10671 for (i = 0; i < lvregs_len; i++)
10672 vreg_to_lvreg [lvregs [i]] = 0;
10676 MONO_BB_FOR_EACH_INS (bb, ins) {
10677 const char *spec = INS_INFO (ins->opcode);
10678 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10679 gboolean store, no_lvreg;
10680 int sregs [MONO_MAX_SRC_REGS];
10682 if (G_UNLIKELY (cfg->verbose_level > 2))
10683 mono_print_ins (ins);
10685 if (ins->opcode == OP_NOP)
10689 * We handle LDADDR here as well, since it can only be decomposed
10690 * when variable addresses are known.
10692 if (ins->opcode == OP_LDADDR) {
10693 MonoInst *var = ins->inst_p0;
10695 if (var->opcode == OP_VTARG_ADDR) {
10696 /* Happens on SPARC/S390 where vtypes are passed by reference */
10697 MonoInst *vtaddr = var->inst_left;
10698 if (vtaddr->opcode == OP_REGVAR) {
10699 ins->opcode = OP_MOVE;
10700 ins->sreg1 = vtaddr->dreg;
10702 else if (var->inst_left->opcode == OP_REGOFFSET) {
10703 ins->opcode = OP_LOAD_MEMBASE;
10704 ins->inst_basereg = vtaddr->inst_basereg;
10705 ins->inst_offset = vtaddr->inst_offset;
10709 g_assert (var->opcode == OP_REGOFFSET);
10711 ins->opcode = OP_ADD_IMM;
10712 ins->sreg1 = var->inst_basereg;
10713 ins->inst_imm = var->inst_offset;
10716 *need_local_opts = TRUE;
10717 spec = INS_INFO (ins->opcode);
10720 if (ins->opcode < MONO_CEE_LAST) {
10721 mono_print_ins (ins);
10722 g_assert_not_reached ();
10726 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10730 if (MONO_IS_STORE_MEMBASE (ins)) {
10731 tmp_reg = ins->dreg;
10732 ins->dreg = ins->sreg2;
10733 ins->sreg2 = tmp_reg;
10736 spec2 [MONO_INST_DEST] = ' ';
10737 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10738 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10739 spec2 [MONO_INST_SRC3] = ' ';
10741 } else if (MONO_IS_STORE_MEMINDEX (ins))
10742 g_assert_not_reached ();
10747 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10748 printf ("\t %.3s %d", spec, ins->dreg);
10749 num_sregs = mono_inst_get_src_registers (ins, sregs);
10750 for (srcindex = 0; srcindex < 3; ++srcindex)
10751 printf (" %d", sregs [srcindex]);
10758 regtype = spec [MONO_INST_DEST];
10759 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10762 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10763 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10764 MonoInst *store_ins;
10766 MonoInst *def_ins = ins;
10767 int dreg = ins->dreg; /* The original vreg */
10769 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10771 if (var->opcode == OP_REGVAR) {
10772 ins->dreg = var->dreg;
10773 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10775 * Instead of emitting a load+store, use a _membase opcode.
10777 g_assert (var->opcode == OP_REGOFFSET);
10778 if (ins->opcode == OP_MOVE) {
10782 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10783 ins->inst_basereg = var->inst_basereg;
10784 ins->inst_offset = var->inst_offset;
10787 spec = INS_INFO (ins->opcode);
10791 g_assert (var->opcode == OP_REGOFFSET);
10793 prev_dreg = ins->dreg;
10795 /* Invalidate any previous lvreg for this vreg */
10796 vreg_to_lvreg [ins->dreg] = 0;
10800 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10802 store_opcode = OP_STOREI8_MEMBASE_REG;
10805 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10807 if (regtype == 'l') {
10808 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10809 mono_bblock_insert_after_ins (bb, ins, store_ins);
10810 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10811 mono_bblock_insert_after_ins (bb, ins, store_ins);
10812 def_ins = store_ins;
10815 g_assert (store_opcode != OP_STOREV_MEMBASE);
10817 /* Try to fuse the store into the instruction itself */
10818 /* FIXME: Add more instructions */
10819 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10820 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10821 ins->inst_imm = ins->inst_c0;
10822 ins->inst_destbasereg = var->inst_basereg;
10823 ins->inst_offset = var->inst_offset;
10824 spec = INS_INFO (ins->opcode);
10825 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10826 ins->opcode = store_opcode;
10827 ins->inst_destbasereg = var->inst_basereg;
10828 ins->inst_offset = var->inst_offset;
10832 tmp_reg = ins->dreg;
10833 ins->dreg = ins->sreg2;
10834 ins->sreg2 = tmp_reg;
10837 spec2 [MONO_INST_DEST] = ' ';
10838 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10839 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10840 spec2 [MONO_INST_SRC3] = ' ';
10842 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10843 // FIXME: The backends expect the base reg to be in inst_basereg
10844 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10846 ins->inst_basereg = var->inst_basereg;
10847 ins->inst_offset = var->inst_offset;
10848 spec = INS_INFO (ins->opcode);
10850 /* printf ("INS: "); mono_print_ins (ins); */
10851 /* Create a store instruction */
10852 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10854 /* Insert it after the instruction */
10855 mono_bblock_insert_after_ins (bb, ins, store_ins);
10857 def_ins = store_ins;
10860 * We can't assign ins->dreg to var->dreg here, since the
10861 * sregs could use it. So set a flag, and do it after
10864 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10865 dest_has_lvreg = TRUE;
10870 if (def_ins && !live_range_start [dreg]) {
10871 live_range_start [dreg] = def_ins;
10872 live_range_start_bb [dreg] = bb;
10879 num_sregs = mono_inst_get_src_registers (ins, sregs);
10880 for (srcindex = 0; srcindex < 3; ++srcindex) {
10881 regtype = spec [MONO_INST_SRC1 + srcindex];
10882 sreg = sregs [srcindex];
10884 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10885 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10886 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10887 MonoInst *use_ins = ins;
10888 MonoInst *load_ins;
10889 guint32 load_opcode;
10891 if (var->opcode == OP_REGVAR) {
10892 sregs [srcindex] = var->dreg;
10893 //mono_inst_set_src_registers (ins, sregs);
10894 live_range_end [sreg] = use_ins;
10895 live_range_end_bb [sreg] = bb;
10899 g_assert (var->opcode == OP_REGOFFSET);
10901 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10903 g_assert (load_opcode != OP_LOADV_MEMBASE);
10905 if (vreg_to_lvreg [sreg]) {
10906 g_assert (vreg_to_lvreg [sreg] != -1);
10908 /* The variable is already loaded to an lvreg */
10909 if (G_UNLIKELY (cfg->verbose_level > 2))
10910 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10911 sregs [srcindex] = vreg_to_lvreg [sreg];
10912 //mono_inst_set_src_registers (ins, sregs);
10916 /* Try to fuse the load into the instruction */
10917 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10918 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10919 sregs [0] = var->inst_basereg;
10920 //mono_inst_set_src_registers (ins, sregs);
10921 ins->inst_offset = var->inst_offset;
10922 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10923 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10924 sregs [1] = var->inst_basereg;
10925 //mono_inst_set_src_registers (ins, sregs);
10926 ins->inst_offset = var->inst_offset;
10928 if (MONO_IS_REAL_MOVE (ins)) {
10929 ins->opcode = OP_NOP;
10932 //printf ("%d ", srcindex); mono_print_ins (ins);
10934 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10936 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10937 if (var->dreg == prev_dreg) {
10939 * sreg refers to the value loaded by the load
10940 * emitted below, but we need to use ins->dreg
10941 * since it refers to the store emitted earlier.
10945 g_assert (sreg != -1);
10946 vreg_to_lvreg [var->dreg] = sreg;
10947 g_assert (lvregs_len < 1024);
10948 lvregs [lvregs_len ++] = var->dreg;
10952 sregs [srcindex] = sreg;
10953 //mono_inst_set_src_registers (ins, sregs);
10955 if (regtype == 'l') {
10956 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10957 mono_bblock_insert_before_ins (bb, ins, load_ins);
10958 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10959 mono_bblock_insert_before_ins (bb, ins, load_ins);
10960 use_ins = load_ins;
10963 #if SIZEOF_REGISTER == 4
10964 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10966 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10967 mono_bblock_insert_before_ins (bb, ins, load_ins);
10968 use_ins = load_ins;
10972 if (var->dreg < orig_next_vreg) {
10973 live_range_end [var->dreg] = use_ins;
10974 live_range_end_bb [var->dreg] = bb;
10978 mono_inst_set_src_registers (ins, sregs);
10980 if (dest_has_lvreg) {
10981 g_assert (ins->dreg != -1);
10982 vreg_to_lvreg [prev_dreg] = ins->dreg;
10983 g_assert (lvregs_len < 1024);
10984 lvregs [lvregs_len ++] = prev_dreg;
10985 dest_has_lvreg = FALSE;
10989 tmp_reg = ins->dreg;
10990 ins->dreg = ins->sreg2;
10991 ins->sreg2 = tmp_reg;
10994 if (MONO_IS_CALL (ins)) {
10995 /* Clear vreg_to_lvreg array */
10996 for (i = 0; i < lvregs_len; i++)
10997 vreg_to_lvreg [lvregs [i]] = 0;
10999 } else if (ins->opcode == OP_NOP) {
11001 MONO_INST_NULLIFY_SREGS (ins);
11004 if (cfg->verbose_level > 2)
11005 mono_print_ins_index (1, ins);
11008 /* Extend the live range based on the liveness info */
11009 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11010 for (i = 0; i < cfg->num_varinfo; i ++) {
11011 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11013 if (vreg_is_volatile (cfg, vi->vreg))
11014 /* The liveness info is incomplete */
11017 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11018 /* Live from at least the first ins of this bb */
11019 live_range_start [vi->vreg] = bb->code;
11020 live_range_start_bb [vi->vreg] = bb;
11023 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11024 /* Live at least until the last ins of this bb */
11025 live_range_end [vi->vreg] = bb->last_ins;
11026 live_range_end_bb [vi->vreg] = bb;
11032 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11034 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11035 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11037 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11038 for (i = 0; i < cfg->num_varinfo; ++i) {
11039 int vreg = MONO_VARINFO (cfg, i)->vreg;
11042 if (live_range_start [vreg]) {
11043 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11045 ins->inst_c1 = vreg;
11046 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11048 if (live_range_end [vreg]) {
11049 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11051 ins->inst_c1 = vreg;
11052 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11053 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11055 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11061 g_free (live_range_start);
11062 g_free (live_range_end);
11063 g_free (live_range_start_bb);
11064 g_free (live_range_end_bb);
11069 * - use 'iadd' instead of 'int_add'
11070 * - handling ovf opcodes: decompose in method_to_ir.
11071 * - unify iregs/fregs
11072 * -> partly done, the missing parts are:
11073 * - a more complete unification would involve unifying the hregs as well, so
11074 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11075 * would no longer map to the machine hregs, so the code generators would need to
11076 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11077 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11078 * fp/non-fp branches speeds it up by about 15%.
11079 * - use sext/zext opcodes instead of shifts
11081 * - get rid of TEMPLOADs if possible and use vregs instead
11082 * - clean up usage of OP_P/OP_ opcodes
11083 * - cleanup usage of DUMMY_USE
11084 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11086 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11087 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11088 * - make sure handle_stack_args () is called before the branch is emitted
11089 * - when the new IR is done, get rid of all unused stuff
11090 * - COMPARE/BEQ as separate instructions or unify them ?
11091 * - keeping them separate allows specialized compare instructions like
11092 * compare_imm, compare_membase
11093 * - most back ends unify fp compare+branch, fp compare+ceq
11094 * - integrate mono_save_args into inline_method
11095 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11096 * - handle long shift opts on 32 bit platforms somehow: they require
11097 * 3 sregs (2 for arg1 and 1 for arg2)
11098 * - make byref a 'normal' type.
11099 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11100 * variable if needed.
11101 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11102 * like inline_method.
11103 * - remove inlining restrictions
11104 * - fix LNEG and enable cfold of INEG
11105 * - generalize x86 optimizations like ldelema as a peephole optimization
11106 * - add store_mem_imm for amd64
11107 * - optimize the loading of the interruption flag in the managed->native wrappers
11108 * - avoid special handling of OP_NOP in passes
11109 * - move code inserting instructions into one function/macro.
11110 * - try a coalescing phase after liveness analysis
11111 * - add float -> vreg conversion + local optimizations on !x86
11112 * - figure out how to handle decomposed branches during optimizations, ie.
11113 * compare+branch, op_jump_table+op_br etc.
11114 * - promote RuntimeXHandles to vregs
11115 * - vtype cleanups:
11116 * - add a NEW_VARLOADA_VREG macro
11117 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11118 * accessing vtype fields.
11119 * - get rid of I8CONST on 64 bit platforms
11120 * - dealing with the increase in code size due to branches created during opcode
11122 * - use extended basic blocks
11123 * - all parts of the JIT
11124 * - handle_global_vregs () && local regalloc
11125 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11126 * - sources of increase in code size:
11129 * - isinst and castclass
11130 * - lvregs not allocated to global registers even if used multiple times
11131 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11133 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11134 * - add all micro optimizations from the old JIT
11135 * - put tree optimizations into the deadce pass
11136 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11137 * specific function.
11138 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11139 * fcompare + branchCC.
11140 * - create a helper function for allocating a stack slot, taking into account
11141 * MONO_CFG_HAS_SPILLUP.
11143 * - merge the ia64 switch changes.
11144 * - optimize mono_regstate2_alloc_int/float.
11145 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11146 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11147 * parts of the tree could be separated by other instructions, killing the tree
11148 * arguments, or stores killing loads etc. Also, should we fold loads into other
11149 * instructions if the result of the load is used multiple times ?
11150 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11151 * - LAST MERGE: 108395.
11152 * - when returning vtypes in registers, generate IR and append it to the end of the
11153 * last bb instead of doing it in the epilog.
11154 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11162 - When to decompose opcodes:
11163 - earlier: this makes some optimizations hard to implement, since the low level IR
11164 no longer contains the neccessary information. But it is easier to do.
11165 - later: harder to implement, enables more optimizations.
11166 - Branches inside bblocks:
11167 - created when decomposing complex opcodes.
11168 - branches to another bblock: harmless, but not tracked by the branch
11169 optimizations, so need to branch to a label at the start of the bblock.
11170 - branches to inside the same bblock: very problematic, trips up the local
11171 reg allocator. Can be fixed by spitting the current bblock, but that is a
11172 complex operation, since some local vregs can become global vregs etc.
11173 - Local/global vregs:
11174 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11175 local register allocator.
11176 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11177 structure, created by mono_create_var (). Assigned to hregs or the stack by
11178 the global register allocator.
11179 - When to do optimizations like alu->alu_imm:
11180 - earlier -> saves work later on since the IR will be smaller/simpler
11181 - later -> can work on more instructions
11182 - Handling of valuetypes:
11183 - When a vtype is pushed on the stack, a new temporary is created, an
11184 instruction computing its address (LDADDR) is emitted and pushed on
11185 the stack. Need to optimize cases when the vtype is used immediately as in
11186 argument passing, stloc etc.
11187 - Instead of the to_end stuff in the old JIT, simply call the function handling
11188 the values on the stack before emitting the last instruction of the bb.
11191 #endif /* DISABLE_JIT */