2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethodHeader *header = cfg->header;
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethodHeader *header = cfg->header;
497 MonoExceptionClause *clause;
501 for (i = 0; i < header->num_clauses; ++i) {
502 clause = &header->clauses [i];
503 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
504 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
505 if (clause->flags == type)
506 res = g_list_append (res, clause);
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
561 inst->klass = klass = mono_class_from_mono_type (type);
563 inst->type = STACK_MP;
568 switch (type->type) {
570 inst->type = STACK_INV;
574 case MONO_TYPE_BOOLEAN:
580 inst->type = STACK_I4;
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
597 inst->type = STACK_I8;
601 inst->type = STACK_R8;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
609 inst->type = STACK_VTYPE;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
783 ins->opcode = OP_ICOMPARE;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
800 ins->opcode += beqops_op_map [src1->type];
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
833 ins->type = STACK_R8;
834 switch (src1->type) {
837 ins->opcode = OP_ICONV_TO_R_UN;
840 ins->opcode = OP_LCONV_TO_R_UN;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
868 ins->type = STACK_PTR;
869 switch (src1->type) {
871 ins->opcode = OP_ICONV_TO_U;
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
878 ins->opcode = OP_MOVE;
882 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_FCONV_TO_U;
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
910 ins->type = STACK_R8;
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
969 param_table [STACK_MAX] [STACK_MAX] = {
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
978 switch (args->type) {
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
993 if (!sig->params [i]->byref)
997 if (sig->params [i]->byref)
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1011 if (sig->params [i]->byref)
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 t = mono_type_get_underlying_type (t);
1099 case MONO_TYPE_BOOLEAN:
1102 case MONO_TYPE_CHAR:
1109 case MONO_TYPE_FNPTR:
1111 case MONO_TYPE_CLASS:
1112 case MONO_TYPE_STRING:
1113 case MONO_TYPE_OBJECT:
1114 case MONO_TYPE_SZARRAY:
1115 case MONO_TYPE_ARRAY:
1123 case MONO_TYPE_VALUETYPE:
1124 case MONO_TYPE_TYPEDBYREF:
1126 case MONO_TYPE_GENERICINST:
1127 if (mono_type_generic_inst_is_valuetype (t))
1133 g_assert_not_reached ();
1140 array_access_to_klass (int opcode)
1144 return mono_defaults.byte_class;
1146 return mono_defaults.uint16_class;
1149 return mono_defaults.int_class;
1152 return mono_defaults.sbyte_class;
1155 return mono_defaults.int16_class;
1158 return mono_defaults.int32_class;
1160 return mono_defaults.uint32_class;
1163 return mono_defaults.int64_class;
1166 return mono_defaults.single_class;
1169 return mono_defaults.double_class;
1170 case CEE_LDELEM_REF:
1171 case CEE_STELEM_REF:
1172 return mono_defaults.object_class;
1174 g_assert_not_reached ();
1180 * We try to share variables when possible
1183 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1188 /* inlining can result in deeper stacks */
1189 if (slot >= cfg->header->max_stack)
1190 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1192 pos = ins->type - 1 + slot * STACK_MAX;
1194 switch (ins->type) {
1201 if ((vnum = cfg->intvars [pos]))
1202 return cfg->varinfo [vnum];
1203 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1204 cfg->intvars [pos] = res->inst_c0;
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1213 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1216 * Don't use this if a generic_context is set, since that means AOT can't
1217 * look up the method using just the image+token.
1218 * table == 0 means this is a reference made from a wrapper.
1220 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1221 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1222 jump_info_token->image = image;
1223 jump_info_token->token = token;
1224 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1229 * This function is called to handle items that are left on the evaluation stack
1230 * at basic block boundaries. What happens is that we save the values to local variables
1231 * and we reload them later when first entering the target basic block (with the
1232 * handle_loaded_temps () function).
1233 * A single joint point will use the same variables (stored in the array bb->out_stack or
1234 * bb->in_stack, if the basic block is before or after the joint point).
1236 * This function needs to be called _before_ emitting the last instruction of
1237 * the bb (i.e. before emitting a branch).
1238 * If the stack merge fails at a join point, cfg->unverifiable is set.
1241 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1244 MonoBasicBlock *bb = cfg->cbb;
1245 MonoBasicBlock *outb;
1246 MonoInst *inst, **locals;
1251 if (cfg->verbose_level > 3)
1252 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1253 if (!bb->out_scount) {
1254 bb->out_scount = count;
1255 //printf ("bblock %d has out:", bb->block_num);
1257 for (i = 0; i < bb->out_count; ++i) {
1258 outb = bb->out_bb [i];
1259 /* exception handlers are linked, but they should not be considered for stack args */
1260 if (outb->flags & BB_EXCEPTION_HANDLER)
1262 //printf (" %d", outb->block_num);
1263 if (outb->in_stack) {
1265 bb->out_stack = outb->in_stack;
1271 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1272 for (i = 0; i < count; ++i) {
1274 * try to reuse temps already allocated for this purpouse, if they occupy the same
1275 * stack slot and if they are of the same type.
1276 * This won't cause conflicts since if 'local' is used to
1277 * store one of the values in the in_stack of a bblock, then
1278 * the same variable will be used for the same outgoing stack
1280 * This doesn't work when inlining methods, since the bblocks
1281 * in the inlined methods do not inherit their in_stack from
1282 * the bblock they are inlined to. See bug #58863 for an
1285 if (cfg->inlined_method)
1286 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1288 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1293 for (i = 0; i < bb->out_count; ++i) {
1294 outb = bb->out_bb [i];
1295 /* exception handlers are linked, but they should not be considered for stack args */
1296 if (outb->flags & BB_EXCEPTION_HANDLER)
1298 if (outb->in_scount) {
1299 if (outb->in_scount != bb->out_scount) {
1300 cfg->unverifiable = TRUE;
1303 continue; /* check they are the same locals */
1305 outb->in_scount = count;
1306 outb->in_stack = bb->out_stack;
1309 locals = bb->out_stack;
1311 for (i = 0; i < count; ++i) {
1312 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1313 inst->cil_code = sp [i]->cil_code;
1314 sp [i] = locals [i];
1315 if (cfg->verbose_level > 3)
1316 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1320 * It is possible that the out bblocks already have in_stack assigned, and
1321 * the in_stacks differ. In this case, we will store to all the different
1328 /* Find a bblock which has a different in_stack */
1330 while (bindex < bb->out_count) {
1331 outb = bb->out_bb [bindex];
1332 /* exception handlers are linked, but they should not be considered for stack args */
1333 if (outb->flags & BB_EXCEPTION_HANDLER) {
1337 if (outb->in_stack != locals) {
1338 for (i = 0; i < count; ++i) {
1339 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1340 inst->cil_code = sp [i]->cil_code;
1341 sp [i] = locals [i];
1342 if (cfg->verbose_level > 3)
1343 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1345 locals = outb->in_stack;
1354 /* Emit code which loads interface_offsets [klass->interface_id]
1355 * The array is stored in memory before vtable.
1358 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1360 if (cfg->compile_aot) {
1361 int ioffset_reg = alloc_preg (cfg);
1362 int iid_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1374 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1376 int ibitmap_reg = alloc_preg (cfg);
1377 #ifdef COMPRESSED_INTERFACE_BITMAP
1379 MonoInst *res, *ins;
1380 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1381 MONO_ADD_INS (cfg->cbb, ins);
1383 if (cfg->compile_aot)
1384 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1386 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1387 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1388 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1390 int ibitmap_byte_reg = alloc_preg (cfg);
1392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1394 if (cfg->compile_aot) {
1395 int iid_reg = alloc_preg (cfg);
1396 int shifted_iid_reg = alloc_preg (cfg);
1397 int ibitmap_byte_address_reg = alloc_preg (cfg);
1398 int masked_iid_reg = alloc_preg (cfg);
1399 int iid_one_bit_reg = alloc_preg (cfg);
1400 int iid_bit_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1406 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1417 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1418 * stored in "klass_reg" implements the interface "klass".
1421 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1423 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1427 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1428 * stored in "vtable_reg" implements the interface "klass".
1431 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1433 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1437 * Emit code which checks whenever the interface id of @klass is smaller than
1438 * than the value given by max_iid_reg.
1441 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1442 MonoBasicBlock *false_target)
1444 if (cfg->compile_aot) {
1445 int iid_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1454 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1457 /* Same as above, but obtains max_iid from a vtable */
1459 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 int max_iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1465 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1468 /* Same as above, but obtains max_iid from a klass */
1470 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1471 MonoBasicBlock *false_target)
1473 int max_iid_reg = alloc_preg (cfg);
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1476 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1482 int idepth_reg = alloc_preg (cfg);
1483 int stypes_reg = alloc_preg (cfg);
1484 int stype = alloc_preg (cfg);
1486 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1495 } else if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1547 } else if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1560 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1564 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1566 if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1577 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1580 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1583 int rank_reg = alloc_preg (cfg);
1584 int eclass_reg = alloc_preg (cfg);
1586 g_assert (!klass_inst);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1589 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1590 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1592 if (klass->cast_class == mono_defaults.object_class) {
1593 int parent_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1595 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1596 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1597 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1598 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class == mono_defaults.enum_class) {
1601 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1602 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1603 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1605 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1606 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1609 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1610 /* Check that the object is a vector too */
1611 int bounds_reg = alloc_preg (cfg);
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 int idepth_reg = alloc_preg (cfg);
1618 int stypes_reg = alloc_preg (cfg);
1619 int stype = alloc_preg (cfg);
1621 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1628 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1633 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1635 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1639 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1643 g_assert (val == 0);
1648 if ((size <= 4) && (size <= align)) {
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1659 #if SIZEOF_REGISTER == 8
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1667 val_reg = alloc_preg (cfg);
1669 if (SIZEOF_REGISTER == 8)
1670 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1672 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1675 /* This could be optimized further if neccesary */
1677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1684 #if !NO_UNALIGNED_ACCESS
1685 if (SIZEOF_REGISTER == 8) {
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1717 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1724 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1725 g_assert (size < 10000);
1728 /* This could be optimized further if neccesary */
1730 cur_reg = alloc_preg (cfg);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1739 #if !NO_UNALIGNED_ACCESS
1740 if (SIZEOF_REGISTER == 8) {
1742 cur_reg = alloc_preg (cfg);
1743 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1753 cur_reg = alloc_preg (cfg);
1754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1755 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1761 cur_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1769 cur_reg = alloc_preg (cfg);
1770 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1771 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1779 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1782 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1785 type = mini_get_basic_type_from_generic (gsctx, type);
1786 switch (type->type) {
1787 case MONO_TYPE_VOID:
1788 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1791 case MONO_TYPE_BOOLEAN:
1794 case MONO_TYPE_CHAR:
1797 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1801 case MONO_TYPE_FNPTR:
1802 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1803 case MONO_TYPE_CLASS:
1804 case MONO_TYPE_STRING:
1805 case MONO_TYPE_OBJECT:
1806 case MONO_TYPE_SZARRAY:
1807 case MONO_TYPE_ARRAY:
1808 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1811 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1814 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1815 case MONO_TYPE_VALUETYPE:
1816 if (type->data.klass->enumtype) {
1817 type = mono_class_enum_basetype (type->data.klass);
1820 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1821 case MONO_TYPE_TYPEDBYREF:
1822 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1823 case MONO_TYPE_GENERICINST:
1824 type = &type->data.generic_class->container_class->byval_arg;
1827 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1833 * target_type_is_incompatible:
1834 * @cfg: MonoCompile context
1836 * Check that the item @arg on the evaluation stack can be stored
1837 * in the target type (can be a local, or field, etc).
1838 * The cfg arg can be used to check if we need verification or just
1841 * Returns: non-0 value if arg can't be stored on a target.
1844 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1846 MonoType *simple_type;
1849 if (target->byref) {
1850 /* FIXME: check that the pointed to types match */
1851 if (arg->type == STACK_MP)
1852 return arg->klass != mono_class_from_mono_type (target);
1853 if (arg->type == STACK_PTR)
1858 simple_type = mono_type_get_underlying_type (target);
1859 switch (simple_type->type) {
1860 case MONO_TYPE_VOID:
1864 case MONO_TYPE_BOOLEAN:
1867 case MONO_TYPE_CHAR:
1870 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1874 /* STACK_MP is needed when setting pinned locals */
1875 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1880 case MONO_TYPE_FNPTR:
1881 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1884 case MONO_TYPE_CLASS:
1885 case MONO_TYPE_STRING:
1886 case MONO_TYPE_OBJECT:
1887 case MONO_TYPE_SZARRAY:
1888 case MONO_TYPE_ARRAY:
1889 if (arg->type != STACK_OBJ)
1891 /* FIXME: check type compatibility */
1895 if (arg->type != STACK_I8)
1900 if (arg->type != STACK_R8)
1903 case MONO_TYPE_VALUETYPE:
1904 if (arg->type != STACK_VTYPE)
1906 klass = mono_class_from_mono_type (simple_type);
1907 if (klass != arg->klass)
1910 case MONO_TYPE_TYPEDBYREF:
1911 if (arg->type != STACK_VTYPE)
1913 klass = mono_class_from_mono_type (simple_type);
1914 if (klass != arg->klass)
1917 case MONO_TYPE_GENERICINST:
1918 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1919 if (arg->type != STACK_VTYPE)
1921 klass = mono_class_from_mono_type (simple_type);
1922 if (klass != arg->klass)
1926 if (arg->type != STACK_OBJ)
1928 /* FIXME: check type compatibility */
1932 case MONO_TYPE_MVAR:
1933 /* FIXME: all the arguments must be references for now,
1934 * later look inside cfg and see if the arg num is
1935 * really a reference
1937 g_assert (cfg->generic_sharing_context);
1938 if (arg->type != STACK_OBJ)
1942 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1948 * Prepare arguments for passing to a function call.
1949 * Return a non-zero value if the arguments can't be passed to the given
1951 * The type checks are not yet complete and some conversions may need
1952 * casts on 32 or 64 bit architectures.
1954 * FIXME: implement this using target_type_is_incompatible ()
1957 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1959 MonoType *simple_type;
1963 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1967 for (i = 0; i < sig->param_count; ++i) {
1968 if (sig->params [i]->byref) {
1969 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1973 simple_type = sig->params [i];
1974 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1976 switch (simple_type->type) {
1977 case MONO_TYPE_VOID:
1982 case MONO_TYPE_BOOLEAN:
1985 case MONO_TYPE_CHAR:
1988 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1994 case MONO_TYPE_FNPTR:
1995 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1998 case MONO_TYPE_CLASS:
1999 case MONO_TYPE_STRING:
2000 case MONO_TYPE_OBJECT:
2001 case MONO_TYPE_SZARRAY:
2002 case MONO_TYPE_ARRAY:
2003 if (args [i]->type != STACK_OBJ)
2008 if (args [i]->type != STACK_I8)
2013 if (args [i]->type != STACK_R8)
2016 case MONO_TYPE_VALUETYPE:
2017 if (simple_type->data.klass->enumtype) {
2018 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2021 if (args [i]->type != STACK_VTYPE)
2024 case MONO_TYPE_TYPEDBYREF:
2025 if (args [i]->type != STACK_VTYPE)
2028 case MONO_TYPE_GENERICINST:
2029 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2033 g_error ("unknown type 0x%02x in check_call_signature",
2041 callvirt_to_call (int opcode)
2046 case OP_VOIDCALLVIRT:
2055 g_assert_not_reached ();
2062 callvirt_to_call_membase (int opcode)
2066 return OP_CALL_MEMBASE;
2067 case OP_VOIDCALLVIRT:
2068 return OP_VOIDCALL_MEMBASE;
2070 return OP_FCALL_MEMBASE;
2072 return OP_LCALL_MEMBASE;
2074 return OP_VCALL_MEMBASE;
2076 g_assert_not_reached ();
2082 #ifdef MONO_ARCH_HAVE_IMT
2084 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2086 #ifdef MONO_ARCH_IMT_REG
2087 int method_reg = alloc_preg (cfg);
2090 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2091 } else if (cfg->compile_aot) {
2092 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2095 MONO_INST_NEW (cfg, ins, OP_PCONST);
2096 ins->inst_p0 = call->method;
2097 ins->dreg = method_reg;
2098 MONO_ADD_INS (cfg->cbb, ins);
2101 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2103 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2108 static MonoJumpInfo *
2109 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2111 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2115 ji->data.target = target;
2120 inline static MonoCallInst *
2121 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2122 MonoInst **args, int calli, int virtual, int tail)
2125 #ifdef MONO_ARCH_SOFT_FLOAT
2130 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2132 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2135 call->signature = sig;
2137 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2140 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2141 call->vret_var = cfg->vret_addr;
2142 //g_assert_not_reached ();
2144 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2145 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2148 temp->backend.is_pinvoke = sig->pinvoke;
2151 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2152 * address of return value to increase optimization opportunities.
2153 * Before vtype decomposition, the dreg of the call ins itself represents the
2154 * fact the call modifies the return value. After decomposition, the call will
2155 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2156 * will be transformed into an LDADDR.
2158 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2159 loada->dreg = alloc_preg (cfg);
2160 loada->inst_p0 = temp;
2161 /* We reference the call too since call->dreg could change during optimization */
2162 loada->inst_p1 = call;
2163 MONO_ADD_INS (cfg->cbb, loada);
2165 call->inst.dreg = temp->dreg;
2167 call->vret_var = loada;
2168 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2169 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2171 #ifdef MONO_ARCH_SOFT_FLOAT
2172 if (COMPILE_SOFT_FLOAT (cfg)) {
2174 * If the call has a float argument, we would need to do an r8->r4 conversion using
2175 * an icall, but that cannot be done during the call sequence since it would clobber
2176 * the call registers + the stack. So we do it before emitting the call.
2178 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2180 MonoInst *in = call->args [i];
2182 if (i >= sig->hasthis)
2183 t = sig->params [i - sig->hasthis];
2185 t = &mono_defaults.int_class->byval_arg;
2186 t = mono_type_get_underlying_type (t);
2188 if (!t->byref && t->type == MONO_TYPE_R4) {
2189 MonoInst *iargs [1];
2193 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2195 /* The result will be in an int vreg */
2196 call->args [i] = conv;
2203 if (COMPILE_LLVM (cfg))
2204 mono_llvm_emit_call (cfg, call);
2206 mono_arch_emit_call (cfg, call);
2208 mono_arch_emit_call (cfg, call);
2211 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2212 cfg->flags |= MONO_CFG_HAS_CALLS;
2217 inline static MonoInst*
2218 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2220 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2222 call->inst.sreg1 = addr->dreg;
2224 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2226 return (MonoInst*)call;
2229 inline static MonoInst*
2230 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2232 #ifdef MONO_ARCH_RGCTX_REG
2237 rgctx_reg = mono_alloc_preg (cfg);
2238 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2240 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2242 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2243 cfg->uses_rgctx_reg = TRUE;
2244 call->rgctx_reg = TRUE;
2246 return (MonoInst*)call;
2248 g_assert_not_reached ();
2254 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2256 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2259 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2260 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2262 gboolean might_be_remote;
2263 gboolean virtual = this != NULL;
2264 gboolean enable_for_aot = TRUE;
2268 if (method->string_ctor) {
2269 /* Create the real signature */
2270 /* FIXME: Cache these */
2271 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2272 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2277 might_be_remote = this && sig->hasthis &&
2278 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2279 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2281 context_used = mono_method_check_context_used (method);
2282 if (might_be_remote && context_used) {
2285 g_assert (cfg->generic_sharing_context);
2287 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2289 return mono_emit_calli (cfg, sig, args, addr);
2292 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2294 if (might_be_remote)
2295 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2297 call->method = method;
2298 call->inst.flags |= MONO_INST_HAS_METHOD;
2299 call->inst.inst_left = this;
2302 int vtable_reg, slot_reg, this_reg;
2304 this_reg = this->dreg;
2306 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2307 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2308 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2310 /* Make a call to delegate->invoke_impl */
2311 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2312 call->inst.inst_basereg = this_reg;
2313 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2314 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2316 return (MonoInst*)call;
2320 if ((!cfg->compile_aot || enable_for_aot) &&
2321 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2322 (MONO_METHOD_IS_FINAL (method) &&
2323 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2324 !(method->klass->marshalbyref && context_used)) {
2326 * the method is not virtual, we just need to ensure this is not null
2327 * and then we can call the method directly.
2329 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2331 * The check above ensures method is not gshared, this is needed since
2332 * gshared methods can't have wrappers.
2334 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2337 if (!method->string_ctor)
2338 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2340 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2342 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2344 return (MonoInst*)call;
2347 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2349 * the method is virtual, but we can statically dispatch since either
2350 * it's class or the method itself are sealed.
2351 * But first we need to ensure it's not a null reference.
2353 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2355 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2356 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2358 return (MonoInst*)call;
2361 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2363 vtable_reg = alloc_preg (cfg);
2364 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2365 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2367 #ifdef MONO_ARCH_HAVE_IMT
2369 guint32 imt_slot = mono_method_get_imt_slot (method);
2370 emit_imt_argument (cfg, call, imt_arg);
2371 slot_reg = vtable_reg;
2372 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2375 if (slot_reg == -1) {
2376 slot_reg = alloc_preg (cfg);
2377 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2378 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2381 slot_reg = vtable_reg;
2382 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2383 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2384 #ifdef MONO_ARCH_HAVE_IMT
2386 g_assert (mono_method_signature (method)->generic_param_count);
2387 emit_imt_argument (cfg, call, imt_arg);
2392 call->inst.sreg1 = slot_reg;
2393 call->virtual = TRUE;
2396 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2398 return (MonoInst*)call;
2402 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2403 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2405 #ifdef MONO_ARCH_RGCTX_REG
2412 #ifdef MONO_ARCH_RGCTX_REG
2413 rgctx_reg = mono_alloc_preg (cfg);
2414 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2419 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2421 call = (MonoCallInst*)ins;
2423 #ifdef MONO_ARCH_RGCTX_REG
2424 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2425 cfg->uses_rgctx_reg = TRUE;
2426 call->rgctx_reg = TRUE;
2436 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2438 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2442 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2449 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2452 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2454 return (MonoInst*)call;
2458 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2460 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2464 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2468 * mono_emit_abs_call:
2470 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2472 inline static MonoInst*
2473 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2474 MonoMethodSignature *sig, MonoInst **args)
2476 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2480 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2483 if (cfg->abs_patches == NULL)
2484 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2485 g_hash_table_insert (cfg->abs_patches, ji, ji);
2486 ins = mono_emit_native_call (cfg, ji, sig, args);
2487 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2492 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2494 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2495 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2499 * Native code might return non register sized integers
2500 * without initializing the upper bits.
2502 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2503 case OP_LOADI1_MEMBASE:
2504 widen_op = OP_ICONV_TO_I1;
2506 case OP_LOADU1_MEMBASE:
2507 widen_op = OP_ICONV_TO_U1;
2509 case OP_LOADI2_MEMBASE:
2510 widen_op = OP_ICONV_TO_I2;
2512 case OP_LOADU2_MEMBASE:
2513 widen_op = OP_ICONV_TO_U2;
2519 if (widen_op != -1) {
2520 int dreg = alloc_preg (cfg);
2523 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2524 widen->type = ins->type;
2534 get_memcpy_method (void)
2536 static MonoMethod *memcpy_method = NULL;
2537 if (!memcpy_method) {
2538 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2540 g_error ("Old corlib found. Install a new one");
2542 return memcpy_method;
2546 * Emit code to copy a valuetype of type @klass whose address is stored in
2547 * @src->dreg to memory whose address is stored at @dest->dreg.
2550 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2552 MonoInst *iargs [3];
2555 MonoMethod *memcpy_method;
2559 * This check breaks with spilled vars... need to handle it during verification anyway.
2560 * g_assert (klass && klass == src->klass && klass == dest->klass);
2564 n = mono_class_native_size (klass, &align);
2566 n = mono_class_value_size (klass, &align);
2568 #if HAVE_WRITE_BARRIERS
2569 /* if native is true there should be no references in the struct */
2570 if (klass->has_references && !native) {
2571 /* Avoid barriers when storing to the stack */
2572 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2573 (dest->opcode == OP_LDADDR))) {
2574 int context_used = 0;
2579 if (cfg->generic_sharing_context)
2580 context_used = mono_class_check_context_used (klass);
2582 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2584 if (cfg->compile_aot) {
2585 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2587 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2588 mono_class_compute_gc_descriptor (klass);
2592 /* FIXME: this does the memcpy as well (or
2593 should), so we don't need the memcpy
2595 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2600 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2601 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2602 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2606 EMIT_NEW_ICONST (cfg, iargs [2], n);
2608 memcpy_method = get_memcpy_method ();
2609 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2614 get_memset_method (void)
2616 static MonoMethod *memset_method = NULL;
2617 if (!memset_method) {
2618 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2620 g_error ("Old corlib found. Install a new one");
2622 return memset_method;
2626 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2628 MonoInst *iargs [3];
2631 MonoMethod *memset_method;
2633 /* FIXME: Optimize this for the case when dest is an LDADDR */
2635 mono_class_init (klass);
2636 n = mono_class_value_size (klass, &align);
2638 if (n <= sizeof (gpointer) * 5) {
2639 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2642 memset_method = get_memset_method ();
2644 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2645 EMIT_NEW_ICONST (cfg, iargs [2], n);
2646 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2651 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2653 MonoInst *this = NULL;
2655 g_assert (cfg->generic_sharing_context);
2657 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2658 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2659 !method->klass->valuetype)
2660 EMIT_NEW_ARGLOAD (cfg, this, 0);
2662 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2663 MonoInst *mrgctx_loc, *mrgctx_var;
2666 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2668 mrgctx_loc = mono_get_vtable_var (cfg);
2669 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2672 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2673 MonoInst *vtable_loc, *vtable_var;
2677 vtable_loc = mono_get_vtable_var (cfg);
2678 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2680 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2681 MonoInst *mrgctx_var = vtable_var;
2684 vtable_reg = alloc_preg (cfg);
2685 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2686 vtable_var->type = STACK_PTR;
2692 int vtable_reg, res_reg;
2694 vtable_reg = alloc_preg (cfg);
2695 res_reg = alloc_preg (cfg);
2696 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2701 static MonoJumpInfoRgctxEntry *
2702 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2704 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2705 res->method = method;
2706 res->in_mrgctx = in_mrgctx;
2707 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2708 res->data->type = patch_type;
2709 res->data->data.target = patch_data;
2710 res->info_type = info_type;
2715 static inline MonoInst*
2716 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2718 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2722 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2723 MonoClass *klass, int rgctx_type)
2725 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2726 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2728 return emit_rgctx_fetch (cfg, rgctx, entry);
2732 * emit_get_rgctx_method:
2734 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2735 * normal constants, else emit a load from the rgctx.
2738 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2739 MonoMethod *cmethod, int rgctx_type)
2741 if (!context_used) {
2744 switch (rgctx_type) {
2745 case MONO_RGCTX_INFO_METHOD:
2746 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2748 case MONO_RGCTX_INFO_METHOD_RGCTX:
2749 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2752 g_assert_not_reached ();
2755 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2756 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2758 return emit_rgctx_fetch (cfg, rgctx, entry);
2763 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2764 MonoClassField *field, int rgctx_type)
2766 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2767 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2769 return emit_rgctx_fetch (cfg, rgctx, entry);
2773 * On return the caller must check @klass for load errors.
2776 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2778 MonoInst *vtable_arg;
2780 int context_used = 0;
2782 if (cfg->generic_sharing_context)
2783 context_used = mono_class_check_context_used (klass);
2786 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2787 klass, MONO_RGCTX_INFO_VTABLE);
2789 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2793 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2796 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2797 #ifdef MONO_ARCH_VTABLE_REG
2798 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2799 cfg->uses_vtable_reg = TRUE;
2806 * On return the caller must check @array_class for load errors
2809 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2811 int vtable_reg = alloc_preg (cfg);
2812 int context_used = 0;
2814 if (cfg->generic_sharing_context)
2815 context_used = mono_class_check_context_used (array_class);
2817 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2819 if (cfg->opt & MONO_OPT_SHARED) {
2820 int class_reg = alloc_preg (cfg);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2822 if (cfg->compile_aot) {
2823 int klass_reg = alloc_preg (cfg);
2824 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2825 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2827 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2829 } else if (context_used) {
2830 MonoInst *vtable_ins;
2832 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2833 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2835 if (cfg->compile_aot) {
2839 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2841 vt_reg = alloc_preg (cfg);
2842 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2843 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2846 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2848 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2852 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2856 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2858 if (mini_get_debug_options ()->better_cast_details) {
2859 int to_klass_reg = alloc_preg (cfg);
2860 int vtable_reg = alloc_preg (cfg);
2861 int klass_reg = alloc_preg (cfg);
2862 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2865 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2869 MONO_ADD_INS (cfg->cbb, tls_get);
2870 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2871 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2874 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2880 reset_cast_details (MonoCompile *cfg)
2882 /* Reset the variables holding the cast details */
2883 if (mini_get_debug_options ()->better_cast_details) {
2884 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2886 MONO_ADD_INS (cfg->cbb, tls_get);
2887 /* It is enough to reset the from field */
2888 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2893 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2894 * generic code is generated.
2897 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2899 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2902 MonoInst *rgctx, *addr;
2904 /* FIXME: What if the class is shared? We might not
2905 have to get the address of the method from the
2907 addr = emit_get_rgctx_method (cfg, context_used, method,
2908 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2910 rgctx = emit_get_rgctx (cfg, method, context_used);
2912 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2914 return mono_emit_method_call (cfg, method, &val, NULL);
2919 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2923 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2924 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2925 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2926 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2928 obj_reg = sp [0]->dreg;
2929 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2930 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2932 /* FIXME: generics */
2933 g_assert (klass->rank == 0);
2936 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2937 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2939 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2940 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2943 MonoInst *element_class;
2945 /* This assertion is from the unboxcast insn */
2946 g_assert (klass->rank == 0);
2948 element_class = emit_get_rgctx_klass (cfg, context_used,
2949 klass->element_class, MONO_RGCTX_INFO_KLASS);
2951 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2952 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2954 save_cast_details (cfg, klass->element_class, obj_reg);
2955 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2956 reset_cast_details (cfg);
2959 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2960 MONO_ADD_INS (cfg->cbb, add);
2961 add->type = STACK_MP;
2968 * Returns NULL and set the cfg exception on error.
2971 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
2973 MonoInst *iargs [2];
2979 MonoInst *iargs [2];
2982 FIXME: we cannot get managed_alloc here because we can't get
2983 the class's vtable (because it's not a closed class)
2985 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2986 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2989 if (cfg->opt & MONO_OPT_SHARED)
2990 rgctx_info = MONO_RGCTX_INFO_KLASS;
2992 rgctx_info = MONO_RGCTX_INFO_VTABLE;
2993 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
2995 if (cfg->opt & MONO_OPT_SHARED) {
2996 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2998 alloc_ftn = mono_object_new;
3001 alloc_ftn = mono_object_new_specific;
3004 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3007 if (cfg->opt & MONO_OPT_SHARED) {
3008 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3009 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3011 alloc_ftn = mono_object_new;
3012 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3013 /* This happens often in argument checking code, eg. throw new FooException... */
3014 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3015 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3016 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3018 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3019 MonoMethod *managed_alloc = NULL;
3023 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3024 cfg->exception_ptr = klass;
3028 #ifndef MONO_CROSS_COMPILE
3029 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3032 if (managed_alloc) {
3033 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3034 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3036 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3038 guint32 lw = vtable->klass->instance_size;
3039 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3040 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3041 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3044 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3048 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3052 * Returns NULL and set the cfg exception on error.
3055 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3057 MonoInst *alloc, *ins;
3059 if (mono_class_is_nullable (klass)) {
3060 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3063 /* FIXME: What if the class is shared? We might not
3064 have to get the method address from the RGCTX. */
3065 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3066 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3067 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3069 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3071 return mono_emit_method_call (cfg, method, &val, NULL);
3075 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3079 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3084 // FIXME: This doesn't work yet (class libs tests fail?)
3085 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3088 * Returns NULL and set the cfg exception on error.
3091 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3093 MonoBasicBlock *is_null_bb;
3094 int obj_reg = src->dreg;
3095 int vtable_reg = alloc_preg (cfg);
3096 MonoInst *klass_inst = NULL;
3101 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3102 klass, MONO_RGCTX_INFO_KLASS);
3104 if (is_complex_isinst (klass)) {
3105 /* Complex case, handle by an icall */
3111 args [1] = klass_inst;
3113 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3115 /* Simple case, handled by the code below */
3119 NEW_BBLOCK (cfg, is_null_bb);
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3124 save_cast_details (cfg, klass, obj_reg);
3126 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3128 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3130 int klass_reg = alloc_preg (cfg);
3132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3134 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3135 /* the remoting code is broken, access the class for now */
3136 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3137 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3139 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3140 cfg->exception_ptr = klass;
3143 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3146 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3148 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3151 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3155 MONO_START_BB (cfg, is_null_bb);
3157 reset_cast_details (cfg);
3163 * Returns NULL and set the cfg exception on error.
3166 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3169 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3170 int obj_reg = src->dreg;
3171 int vtable_reg = alloc_preg (cfg);
3172 int res_reg = alloc_preg (cfg);
3173 MonoInst *klass_inst = NULL;
3176 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3178 if (is_complex_isinst (klass)) {
3181 /* Complex case, handle by an icall */
3187 args [1] = klass_inst;
3189 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3191 /* Simple case, the code below can handle it */
3195 NEW_BBLOCK (cfg, is_null_bb);
3196 NEW_BBLOCK (cfg, false_bb);
3197 NEW_BBLOCK (cfg, end_bb);
3199 /* Do the assignment at the beginning, so the other assignment can be if converted */
3200 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3201 ins->type = STACK_OBJ;
3204 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3205 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3207 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3209 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3210 g_assert (!context_used);
3211 /* the is_null_bb target simply copies the input register to the output */
3212 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3214 int klass_reg = alloc_preg (cfg);
3217 int rank_reg = alloc_preg (cfg);
3218 int eclass_reg = alloc_preg (cfg);
3220 g_assert (!context_used);
3221 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3224 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3226 if (klass->cast_class == mono_defaults.object_class) {
3227 int parent_reg = alloc_preg (cfg);
3228 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3229 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3230 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3231 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3232 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3233 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3234 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3235 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3236 } else if (klass->cast_class == mono_defaults.enum_class) {
3237 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3238 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3239 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3240 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3242 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3243 /* Check that the object is a vector too */
3244 int bounds_reg = alloc_preg (cfg);
3245 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3246 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3247 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3250 /* the is_null_bb target simply copies the input register to the output */
3251 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3253 } else if (mono_class_is_nullable (klass)) {
3254 g_assert (!context_used);
3255 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3256 /* the is_null_bb target simply copies the input register to the output */
3257 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3259 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3260 g_assert (!context_used);
3261 /* the remoting code is broken, access the class for now */
3262 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3263 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3265 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3266 cfg->exception_ptr = klass;
3269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3271 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3272 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3274 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3275 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3277 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3278 /* the is_null_bb target simply copies the input register to the output */
3279 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3284 MONO_START_BB (cfg, false_bb);
3286 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3287 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3289 MONO_START_BB (cfg, is_null_bb);
3291 MONO_START_BB (cfg, end_bb);
3297 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3299 /* This opcode takes as input an object reference and a class, and returns:
3300 0) if the object is an instance of the class,
3301 1) if the object is not instance of the class,
3302 2) if the object is a proxy whose type cannot be determined */
3305 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3306 int obj_reg = src->dreg;
3307 int dreg = alloc_ireg (cfg);
3309 int klass_reg = alloc_preg (cfg);
3311 NEW_BBLOCK (cfg, true_bb);
3312 NEW_BBLOCK (cfg, false_bb);
3313 NEW_BBLOCK (cfg, false2_bb);
3314 NEW_BBLOCK (cfg, end_bb);
3315 NEW_BBLOCK (cfg, no_proxy_bb);
3317 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3320 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3321 NEW_BBLOCK (cfg, interface_fail_bb);
3323 tmp_reg = alloc_preg (cfg);
3324 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3325 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3326 MONO_START_BB (cfg, interface_fail_bb);
3327 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3329 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3331 tmp_reg = alloc_preg (cfg);
3332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3333 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3334 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3336 tmp_reg = alloc_preg (cfg);
3337 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3338 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3340 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3341 tmp_reg = alloc_preg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3343 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3345 tmp_reg = alloc_preg (cfg);
3346 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3347 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3348 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3350 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3351 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3353 MONO_START_BB (cfg, no_proxy_bb);
3355 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3358 MONO_START_BB (cfg, false_bb);
3360 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3361 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3363 MONO_START_BB (cfg, false2_bb);
3365 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3368 MONO_START_BB (cfg, true_bb);
3370 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3372 MONO_START_BB (cfg, end_bb);
3375 MONO_INST_NEW (cfg, ins, OP_ICONST);
3377 ins->type = STACK_I4;
3383 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3385 /* This opcode takes as input an object reference and a class, and returns:
3386 0) if the object is an instance of the class,
3387 1) if the object is a proxy whose type cannot be determined
3388 an InvalidCastException exception is thrown otherwhise*/
3391 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3392 int obj_reg = src->dreg;
3393 int dreg = alloc_ireg (cfg);
3394 int tmp_reg = alloc_preg (cfg);
3395 int klass_reg = alloc_preg (cfg);
3397 NEW_BBLOCK (cfg, end_bb);
3398 NEW_BBLOCK (cfg, ok_result_bb);
3400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3401 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3403 save_cast_details (cfg, klass, obj_reg);
3405 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3406 NEW_BBLOCK (cfg, interface_fail_bb);
3408 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3409 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3410 MONO_START_BB (cfg, interface_fail_bb);
3411 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3413 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3415 tmp_reg = alloc_preg (cfg);
3416 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3417 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3418 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3420 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3421 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3424 NEW_BBLOCK (cfg, no_proxy_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3428 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3430 tmp_reg = alloc_preg (cfg);
3431 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3434 tmp_reg = alloc_preg (cfg);
3435 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3437 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3439 NEW_BBLOCK (cfg, fail_1_bb);
3441 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3443 MONO_START_BB (cfg, fail_1_bb);
3445 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3446 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3448 MONO_START_BB (cfg, no_proxy_bb);
3450 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3453 MONO_START_BB (cfg, ok_result_bb);
3455 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3457 MONO_START_BB (cfg, end_bb);
3460 MONO_INST_NEW (cfg, ins, OP_ICONST);
3462 ins->type = STACK_I4;
3468 * Returns NULL and set the cfg exception on error.
3470 static G_GNUC_UNUSED MonoInst*
3471 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3473 gpointer *trampoline;
3474 MonoInst *obj, *method_ins, *tramp_ins;
3478 obj = handle_alloc (cfg, klass, FALSE, 0);
3482 /* Inline the contents of mono_delegate_ctor */
3484 /* Set target field */
3485 /* Optimize away setting of NULL target */
3486 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3487 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3489 /* Set method field */
3490 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3491 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3494 * To avoid looking up the compiled code belonging to the target method
3495 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3496 * store it, and we fill it after the method has been compiled.
3498 if (!cfg->compile_aot && !method->dynamic) {
3499 MonoInst *code_slot_ins;
3502 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3504 domain = mono_domain_get ();
3505 mono_domain_lock (domain);
3506 if (!domain_jit_info (domain)->method_code_hash)
3507 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3508 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3510 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3511 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3513 mono_domain_unlock (domain);
3515 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3517 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3520 /* Set invoke_impl field */
3521 if (cfg->compile_aot) {
3522 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3524 trampoline = mono_create_delegate_trampoline (klass);
3525 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3527 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3529 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3535 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3537 MonoJitICallInfo *info;
3539 /* Need to register the icall so it gets an icall wrapper */
3540 info = mono_get_array_new_va_icall (rank);
3542 cfg->flags |= MONO_CFG_HAS_VARARGS;
3544 /* mono_array_new_va () needs a vararg calling convention */
3545 cfg->disable_llvm = TRUE;
3547 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3548 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3552 mono_emit_load_got_addr (MonoCompile *cfg)
3554 MonoInst *getaddr, *dummy_use;
3556 if (!cfg->got_var || cfg->got_var_allocated)
3559 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3560 getaddr->dreg = cfg->got_var->dreg;
3562 /* Add it to the start of the first bblock */
3563 if (cfg->bb_entry->code) {
3564 getaddr->next = cfg->bb_entry->code;
3565 cfg->bb_entry->code = getaddr;
3568 MONO_ADD_INS (cfg->bb_entry, getaddr);
3570 cfg->got_var_allocated = TRUE;
3573 * Add a dummy use to keep the got_var alive, since real uses might
3574 * only be generated by the back ends.
3575 * Add it to end_bblock, so the variable's lifetime covers the whole
3577 * It would be better to make the usage of the got var explicit in all
3578 * cases when the backend needs it (i.e. calls, throw etc.), so this
3579 * wouldn't be needed.
3581 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3582 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3585 static int inline_limit;
3586 static gboolean inline_limit_inited;
3589 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3591 MonoMethodHeaderSummary header;
3593 #ifdef MONO_ARCH_SOFT_FLOAT
3594 MonoMethodSignature *sig = mono_method_signature (method);
3598 if (cfg->generic_sharing_context)
3601 if (cfg->inline_depth > 10)
3604 #ifdef MONO_ARCH_HAVE_LMF_OPS
3605 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3606 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3607 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3612 if (!mono_method_get_header_summary (method, &header))
3615 /*runtime, icall and pinvoke are checked by summary call*/
3616 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3617 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3618 (method->klass->marshalbyref) ||
3622 /* also consider num_locals? */
3623 /* Do the size check early to avoid creating vtables */
3624 if (!inline_limit_inited) {
3625 if (getenv ("MONO_INLINELIMIT"))
3626 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3628 inline_limit = INLINE_LENGTH_LIMIT;
3629 inline_limit_inited = TRUE;
3631 if (header.code_size >= inline_limit)
3635 * if we can initialize the class of the method right away, we do,
3636 * otherwise we don't allow inlining if the class needs initialization,
3637 * since it would mean inserting a call to mono_runtime_class_init()
3638 * inside the inlined code
3640 if (!(cfg->opt & MONO_OPT_SHARED)) {
3641 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3642 if (cfg->run_cctors && method->klass->has_cctor) {
3643 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3644 if (!method->klass->runtime_info)
3645 /* No vtable created yet */
3647 vtable = mono_class_vtable (cfg->domain, method->klass);
3650 /* This makes so that inline cannot trigger */
3651 /* .cctors: too many apps depend on them */
3652 /* running with a specific order... */
3653 if (! vtable->initialized)
3655 mono_runtime_class_init (vtable);
3657 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3658 if (!method->klass->runtime_info)
3659 /* No vtable created yet */
3661 vtable = mono_class_vtable (cfg->domain, method->klass);
3664 if (!vtable->initialized)
3669 * If we're compiling for shared code
3670 * the cctor will need to be run at aot method load time, for example,
3671 * or at the end of the compilation of the inlining method.
3673 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3678 * CAS - do not inline methods with declarative security
3679 * Note: this has to be before any possible return TRUE;
3681 if (mono_method_has_declsec (method))
3684 #ifdef MONO_ARCH_SOFT_FLOAT
3686 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3688 for (i = 0; i < sig->param_count; ++i)
3689 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3697 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3699 if (vtable->initialized && !cfg->compile_aot)
3702 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3705 if (!mono_class_needs_cctor_run (vtable->klass, method))
3708 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3709 /* The initialization is already done before the method is called */
3716 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
3720 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3722 mono_class_init (klass);
3723 size = mono_class_array_element_size (klass);
3725 mult_reg = alloc_preg (cfg);
3726 array_reg = arr->dreg;
3727 index_reg = index->dreg;
3729 #if SIZEOF_REGISTER == 8
3730 /* The array reg is 64 bits but the index reg is only 32 */
3731 if (COMPILE_LLVM (cfg)) {
3733 index2_reg = index_reg;
3735 index2_reg = alloc_preg (cfg);
3736 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3739 if (index->type == STACK_I8) {
3740 index2_reg = alloc_preg (cfg);
3741 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3743 index2_reg = index_reg;
3748 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3750 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3751 if (size == 1 || size == 2 || size == 4 || size == 8) {
3752 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3754 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3755 ins->type = STACK_PTR;
3761 add_reg = alloc_preg (cfg);
3763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3764 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3765 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3766 ins->type = STACK_PTR;
3767 MONO_ADD_INS (cfg->cbb, ins);
3772 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3774 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3776 int bounds_reg = alloc_preg (cfg);
3777 int add_reg = alloc_preg (cfg);
3778 int mult_reg = alloc_preg (cfg);
3779 int mult2_reg = alloc_preg (cfg);
3780 int low1_reg = alloc_preg (cfg);
3781 int low2_reg = alloc_preg (cfg);
3782 int high1_reg = alloc_preg (cfg);
3783 int high2_reg = alloc_preg (cfg);
3784 int realidx1_reg = alloc_preg (cfg);
3785 int realidx2_reg = alloc_preg (cfg);
3786 int sum_reg = alloc_preg (cfg);
3791 mono_class_init (klass);
3792 size = mono_class_array_element_size (klass);
3794 index1 = index_ins1->dreg;
3795 index2 = index_ins2->dreg;
3797 /* range checking */
3798 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3799 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3802 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3803 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3804 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3805 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3806 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3807 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3809 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3810 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3811 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3813 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3814 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3815 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3817 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3818 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3819 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3820 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3821 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3823 ins->type = STACK_MP;
3825 MONO_ADD_INS (cfg->cbb, ins);
3832 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3836 MonoMethod *addr_method;
3839 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3842 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
3844 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3845 /* emit_ldelema_2 depends on OP_LMUL */
3846 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3847 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3851 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3852 addr_method = mono_marshal_get_array_address (rank, element_size);
3853 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3858 static MonoBreakPolicy
3859 always_insert_breakpoint (MonoMethod *method)
3861 return MONO_BREAK_POLICY_ALWAYS;
3864 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3867 * mono_set_break_policy:
3868 * policy_callback: the new callback function
3870 * Allow embedders to decide wherther to actually obey breakpoint instructions
3871 * (both break IL instructions and Debugger.Break () method calls), for example
3872 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3873 * untrusted or semi-trusted code.
3875 * @policy_callback will be called every time a break point instruction needs to
3876 * be inserted with the method argument being the method that calls Debugger.Break()
3877 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3878 * if it wants the breakpoint to not be effective in the given method.
3879 * #MONO_BREAK_POLICY_ALWAYS is the default.
3882 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3884 if (policy_callback)
3885 break_policy_func = policy_callback;
3887 break_policy_func = always_insert_breakpoint;
3891 should_insert_brekpoint (MonoMethod *method) {
3892 switch (break_policy_func (method)) {
3893 case MONO_BREAK_POLICY_ALWAYS:
3895 case MONO_BREAK_POLICY_NEVER:
3897 case MONO_BREAK_POLICY_ON_DBG:
3898 return mono_debug_using_mono_debugger ();
3900 g_warning ("Incorrect value returned from break policy callback");
3905 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
3907 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
3909 MonoInst *addr, *store, *load;
3910 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
3912 /* the bounds check is already done by the callers */
3913 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
3915 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
3916 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
3918 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3919 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3925 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3927 MonoInst *ins = NULL;
3929 static MonoClass *runtime_helpers_class = NULL;
3930 if (! runtime_helpers_class)
3931 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3932 "System.Runtime.CompilerServices", "RuntimeHelpers");
3934 if (cmethod->klass == mono_defaults.string_class) {
3935 if (strcmp (cmethod->name, "get_Chars") == 0) {
3936 int dreg = alloc_ireg (cfg);
3937 int index_reg = alloc_preg (cfg);
3938 int mult_reg = alloc_preg (cfg);
3939 int add_reg = alloc_preg (cfg);
3941 #if SIZEOF_REGISTER == 8
3942 /* The array reg is 64 bits but the index reg is only 32 */
3943 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3945 index_reg = args [1]->dreg;
3947 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3949 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3950 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3951 add_reg = ins->dreg;
3952 /* Avoid a warning */
3954 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3957 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3958 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3960 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3962 type_from_op (ins, NULL, NULL);
3964 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3965 int dreg = alloc_ireg (cfg);
3966 /* Decompose later to allow more optimizations */
3967 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3968 ins->type = STACK_I4;
3969 cfg->cbb->has_array_access = TRUE;
3970 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3973 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3974 int mult_reg = alloc_preg (cfg);
3975 int add_reg = alloc_preg (cfg);
3977 /* The corlib functions check for oob already. */
3978 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3979 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3980 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3981 return cfg->cbb->last_ins;
3984 } else if (cmethod->klass == mono_defaults.object_class) {
3986 if (strcmp (cmethod->name, "GetType") == 0) {
3987 int dreg = alloc_preg (cfg);
3988 int vt_reg = alloc_preg (cfg);
3989 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3990 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3991 type_from_op (ins, NULL, NULL);
3994 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3995 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3996 int dreg = alloc_ireg (cfg);
3997 int t1 = alloc_ireg (cfg);
3999 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4000 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4001 ins->type = STACK_I4;
4005 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4006 MONO_INST_NEW (cfg, ins, OP_NOP);
4007 MONO_ADD_INS (cfg->cbb, ins);
4011 } else if (cmethod->klass == mono_defaults.array_class) {
4012 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4013 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4014 if (cmethod->name [0] != 'g')
4017 if (strcmp (cmethod->name, "get_Rank") == 0) {
4018 int dreg = alloc_ireg (cfg);
4019 int vtable_reg = alloc_preg (cfg);
4020 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4021 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4022 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4023 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4024 type_from_op (ins, NULL, NULL);
4027 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4028 int dreg = alloc_ireg (cfg);
4030 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4031 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4032 type_from_op (ins, NULL, NULL);
4037 } else if (cmethod->klass == runtime_helpers_class) {
4039 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4040 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4044 } else if (cmethod->klass == mono_defaults.thread_class) {
4045 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4046 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4047 MONO_ADD_INS (cfg->cbb, ins);
4049 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4050 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4051 MONO_ADD_INS (cfg->cbb, ins);
4054 } else if (cmethod->klass == mono_defaults.monitor_class) {
4055 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4056 if (strcmp (cmethod->name, "Enter") == 0) {
4059 if (COMPILE_LLVM (cfg)) {
4061 * Pass the argument normally, the LLVM backend will handle the
4062 * calling convention problems.
4064 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4066 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4067 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4068 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4069 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4072 return (MonoInst*)call;
4073 } else if (strcmp (cmethod->name, "Exit") == 0) {
4076 if (COMPILE_LLVM (cfg)) {
4077 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4079 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4080 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4081 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4082 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4085 return (MonoInst*)call;
4087 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4088 MonoMethod *fast_method = NULL;
4090 /* Avoid infinite recursion */
4091 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4092 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4093 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4096 if (strcmp (cmethod->name, "Enter") == 0 ||
4097 strcmp (cmethod->name, "Exit") == 0)
4098 fast_method = mono_monitor_get_fast_path (cmethod);
4102 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4104 } else if (cmethod->klass->image == mono_defaults.corlib &&
4105 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4106 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4109 #if SIZEOF_REGISTER == 8
4110 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4111 /* 64 bit reads are already atomic */
4112 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4113 ins->dreg = mono_alloc_preg (cfg);
4114 ins->inst_basereg = args [0]->dreg;
4115 ins->inst_offset = 0;
4116 MONO_ADD_INS (cfg->cbb, ins);
4120 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4121 if (strcmp (cmethod->name, "Increment") == 0) {
4122 MonoInst *ins_iconst;
4125 if (fsig->params [0]->type == MONO_TYPE_I4)
4126 opcode = OP_ATOMIC_ADD_NEW_I4;
4127 #if SIZEOF_REGISTER == 8
4128 else if (fsig->params [0]->type == MONO_TYPE_I8)
4129 opcode = OP_ATOMIC_ADD_NEW_I8;
4132 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4133 ins_iconst->inst_c0 = 1;
4134 ins_iconst->dreg = mono_alloc_ireg (cfg);
4135 MONO_ADD_INS (cfg->cbb, ins_iconst);
4137 MONO_INST_NEW (cfg, ins, opcode);
4138 ins->dreg = mono_alloc_ireg (cfg);
4139 ins->inst_basereg = args [0]->dreg;
4140 ins->inst_offset = 0;
4141 ins->sreg2 = ins_iconst->dreg;
4142 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4143 MONO_ADD_INS (cfg->cbb, ins);
4145 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4146 MonoInst *ins_iconst;
4149 if (fsig->params [0]->type == MONO_TYPE_I4)
4150 opcode = OP_ATOMIC_ADD_NEW_I4;
4151 #if SIZEOF_REGISTER == 8
4152 else if (fsig->params [0]->type == MONO_TYPE_I8)
4153 opcode = OP_ATOMIC_ADD_NEW_I8;
4156 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4157 ins_iconst->inst_c0 = -1;
4158 ins_iconst->dreg = mono_alloc_ireg (cfg);
4159 MONO_ADD_INS (cfg->cbb, ins_iconst);
4161 MONO_INST_NEW (cfg, ins, opcode);
4162 ins->dreg = mono_alloc_ireg (cfg);
4163 ins->inst_basereg = args [0]->dreg;
4164 ins->inst_offset = 0;
4165 ins->sreg2 = ins_iconst->dreg;
4166 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4167 MONO_ADD_INS (cfg->cbb, ins);
4169 } else if (strcmp (cmethod->name, "Add") == 0) {
4172 if (fsig->params [0]->type == MONO_TYPE_I4)
4173 opcode = OP_ATOMIC_ADD_NEW_I4;
4174 #if SIZEOF_REGISTER == 8
4175 else if (fsig->params [0]->type == MONO_TYPE_I8)
4176 opcode = OP_ATOMIC_ADD_NEW_I8;
4180 MONO_INST_NEW (cfg, ins, opcode);
4181 ins->dreg = mono_alloc_ireg (cfg);
4182 ins->inst_basereg = args [0]->dreg;
4183 ins->inst_offset = 0;
4184 ins->sreg2 = args [1]->dreg;
4185 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4186 MONO_ADD_INS (cfg->cbb, ins);
4189 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4191 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4192 if (strcmp (cmethod->name, "Exchange") == 0) {
4194 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4196 if (fsig->params [0]->type == MONO_TYPE_I4)
4197 opcode = OP_ATOMIC_EXCHANGE_I4;
4198 #if SIZEOF_REGISTER == 8
4199 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4200 (fsig->params [0]->type == MONO_TYPE_I))
4201 opcode = OP_ATOMIC_EXCHANGE_I8;
4203 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4204 opcode = OP_ATOMIC_EXCHANGE_I4;
4209 MONO_INST_NEW (cfg, ins, opcode);
4210 ins->dreg = mono_alloc_ireg (cfg);
4211 ins->inst_basereg = args [0]->dreg;
4212 ins->inst_offset = 0;
4213 ins->sreg2 = args [1]->dreg;
4214 MONO_ADD_INS (cfg->cbb, ins);
4216 switch (fsig->params [0]->type) {
4218 ins->type = STACK_I4;
4222 ins->type = STACK_I8;
4224 case MONO_TYPE_OBJECT:
4225 ins->type = STACK_OBJ;
4228 g_assert_not_reached ();
4231 #if HAVE_WRITE_BARRIERS
4233 MonoInst *dummy_use;
4234 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4235 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4236 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4240 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4242 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4243 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4245 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4246 if (fsig->params [1]->type == MONO_TYPE_I4)
4248 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4249 size = sizeof (gpointer);
4250 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4253 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4254 ins->dreg = alloc_ireg (cfg);
4255 ins->sreg1 = args [0]->dreg;
4256 ins->sreg2 = args [1]->dreg;
4257 ins->sreg3 = args [2]->dreg;
4258 ins->type = STACK_I4;
4259 MONO_ADD_INS (cfg->cbb, ins);
4260 } else if (size == 8) {
4261 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4262 ins->dreg = alloc_ireg (cfg);
4263 ins->sreg1 = args [0]->dreg;
4264 ins->sreg2 = args [1]->dreg;
4265 ins->sreg3 = args [2]->dreg;
4266 ins->type = STACK_I8;
4267 MONO_ADD_INS (cfg->cbb, ins);
4269 /* g_assert_not_reached (); */
4271 #if HAVE_WRITE_BARRIERS
4273 MonoInst *dummy_use;
4274 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4275 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4276 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4280 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4284 } else if (cmethod->klass->image == mono_defaults.corlib) {
4285 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4286 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4287 if (should_insert_brekpoint (cfg->method))
4288 MONO_INST_NEW (cfg, ins, OP_BREAK);
4290 MONO_INST_NEW (cfg, ins, OP_NOP);
4291 MONO_ADD_INS (cfg->cbb, ins);
4294 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4295 && strcmp (cmethod->klass->name, "Environment") == 0) {
4297 EMIT_NEW_ICONST (cfg, ins, 1);
4299 EMIT_NEW_ICONST (cfg, ins, 0);
4303 } else if (cmethod->klass == mono_defaults.math_class) {
4305 * There is general branches code for Min/Max, but it does not work for
4307 * http://everything2.com/?node_id=1051618
4311 #ifdef MONO_ARCH_SIMD_INTRINSICS
4312 if (cfg->opt & MONO_OPT_SIMD) {
4313 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4319 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4323 * This entry point could be used later for arbitrary method
4326 inline static MonoInst*
4327 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4328 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4330 if (method->klass == mono_defaults.string_class) {
4331 /* managed string allocation support */
4332 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4333 MonoInst *iargs [2];
4334 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4335 MonoMethod *managed_alloc = NULL;
4337 g_assert (vtable); /*Should not fail since it System.String*/
4338 #ifndef MONO_CROSS_COMPILE
4339 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4343 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4344 iargs [1] = args [0];
4345 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4352 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4354 MonoInst *store, *temp;
4357 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4358 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4361 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4362 * would be different than the MonoInst's used to represent arguments, and
4363 * the ldelema implementation can't deal with that.
4364 * Solution: When ldelema is used on an inline argument, create a var for
4365 * it, emit ldelema on that var, and emit the saving code below in
4366 * inline_method () if needed.
4368 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4369 cfg->args [i] = temp;
4370 /* This uses cfg->args [i] which is set by the preceeding line */
4371 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4372 store->cil_code = sp [0]->cil_code;
4377 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4378 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4380 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4382 check_inline_called_method_name_limit (MonoMethod *called_method)
4385 static char *limit = NULL;
4387 if (limit == NULL) {
4388 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4390 if (limit_string != NULL)
4391 limit = limit_string;
4393 limit = (char *) "";
4396 if (limit [0] != '\0') {
4397 char *called_method_name = mono_method_full_name (called_method, TRUE);
4399 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4400 g_free (called_method_name);
4402 //return (strncmp_result <= 0);
4403 return (strncmp_result == 0);
4410 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4412 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4415 static char *limit = NULL;
4417 if (limit == NULL) {
4418 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4419 if (limit_string != NULL) {
4420 limit = limit_string;
4422 limit = (char *) "";
4426 if (limit [0] != '\0') {
4427 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4429 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4430 g_free (caller_method_name);
4432 //return (strncmp_result <= 0);
4433 return (strncmp_result == 0);
4441 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4442 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4444 MonoInst *ins, *rvar = NULL;
4445 MonoMethodHeader *cheader;
4446 MonoBasicBlock *ebblock, *sbblock;
4448 MonoMethod *prev_inlined_method;
4449 MonoInst **prev_locals, **prev_args;
4450 MonoType **prev_arg_types;
4451 guint prev_real_offset;
4452 GHashTable *prev_cbb_hash;
4453 MonoBasicBlock **prev_cil_offset_to_bb;
4454 MonoBasicBlock *prev_cbb;
4455 unsigned char* prev_cil_start;
4456 guint32 prev_cil_offset_to_bb_len;
4457 MonoMethod *prev_current_method;
4458 MonoGenericContext *prev_generic_context;
4459 gboolean ret_var_set, prev_ret_var_set;
4461 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4463 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4464 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4467 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4468 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4472 if (cfg->verbose_level > 2)
4473 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4475 if (!cmethod->inline_info) {
4476 mono_jit_stats.inlineable_methods++;
4477 cmethod->inline_info = 1;
4480 /* allocate local variables */
4481 cheader = mono_method_get_header (cmethod);
4483 if (cheader == NULL || mono_loader_get_last_error ()) {
4485 mono_metadata_free_mh (cheader);
4486 mono_loader_clear_error ();
4490 /* allocate space to store the return value */
4491 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4492 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4496 prev_locals = cfg->locals;
4497 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4498 for (i = 0; i < cheader->num_locals; ++i)
4499 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4501 /* allocate start and end blocks */
4502 /* This is needed so if the inline is aborted, we can clean up */
4503 NEW_BBLOCK (cfg, sbblock);
4504 sbblock->real_offset = real_offset;
4506 NEW_BBLOCK (cfg, ebblock);
4507 ebblock->block_num = cfg->num_bblocks++;
4508 ebblock->real_offset = real_offset;
4510 prev_args = cfg->args;
4511 prev_arg_types = cfg->arg_types;
4512 prev_inlined_method = cfg->inlined_method;
4513 cfg->inlined_method = cmethod;
4514 cfg->ret_var_set = FALSE;
4515 cfg->inline_depth ++;
4516 prev_real_offset = cfg->real_offset;
4517 prev_cbb_hash = cfg->cbb_hash;
4518 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4519 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4520 prev_cil_start = cfg->cil_start;
4521 prev_cbb = cfg->cbb;
4522 prev_current_method = cfg->current_method;
4523 prev_generic_context = cfg->generic_context;
4524 prev_ret_var_set = cfg->ret_var_set;
4526 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4528 ret_var_set = cfg->ret_var_set;
4530 cfg->inlined_method = prev_inlined_method;
4531 cfg->real_offset = prev_real_offset;
4532 cfg->cbb_hash = prev_cbb_hash;
4533 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4534 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4535 cfg->cil_start = prev_cil_start;
4536 cfg->locals = prev_locals;
4537 cfg->args = prev_args;
4538 cfg->arg_types = prev_arg_types;
4539 cfg->current_method = prev_current_method;
4540 cfg->generic_context = prev_generic_context;
4541 cfg->ret_var_set = prev_ret_var_set;
4542 cfg->inline_depth --;
4544 if ((costs >= 0 && costs < 60) || inline_allways) {
4545 if (cfg->verbose_level > 2)
4546 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4548 mono_jit_stats.inlined_methods++;
4550 /* always add some code to avoid block split failures */
4551 MONO_INST_NEW (cfg, ins, OP_NOP);
4552 MONO_ADD_INS (prev_cbb, ins);
4554 prev_cbb->next_bb = sbblock;
4555 link_bblock (cfg, prev_cbb, sbblock);
4558 * Get rid of the begin and end bblocks if possible to aid local
4561 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4563 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4564 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4566 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4567 MonoBasicBlock *prev = ebblock->in_bb [0];
4568 mono_merge_basic_blocks (cfg, prev, ebblock);
4570 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4571 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4572 cfg->cbb = prev_cbb;
4580 * If the inlined method contains only a throw, then the ret var is not
4581 * set, so set it to a dummy value.
4584 static double r8_0 = 0.0;
4586 switch (rvar->type) {
4588 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4591 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4596 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4599 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4600 ins->type = STACK_R8;
4601 ins->inst_p0 = (void*)&r8_0;
4602 ins->dreg = rvar->dreg;
4603 MONO_ADD_INS (cfg->cbb, ins);
4606 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4609 g_assert_not_reached ();
4613 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4616 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4619 if (cfg->verbose_level > 2)
4620 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4621 cfg->exception_type = MONO_EXCEPTION_NONE;
4622 mono_loader_clear_error ();
4624 /* This gets rid of the newly added bblocks */
4625 cfg->cbb = prev_cbb;
4627 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
4632 * Some of these comments may well be out-of-date.
4633 * Design decisions: we do a single pass over the IL code (and we do bblock
4634 * splitting/merging in the few cases when it's required: a back jump to an IL
4635 * address that was not already seen as bblock starting point).
4636 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4637 * Complex operations are decomposed in simpler ones right away. We need to let the
4638 * arch-specific code peek and poke inside this process somehow (except when the
4639 * optimizations can take advantage of the full semantic info of coarse opcodes).
4640 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4641 * MonoInst->opcode initially is the IL opcode or some simplification of that
4642 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4643 * opcode with value bigger than OP_LAST.
4644 * At this point the IR can be handed over to an interpreter, a dumb code generator
4645 * or to the optimizing code generator that will translate it to SSA form.
4647 * Profiling directed optimizations.
4648 * We may compile by default with few or no optimizations and instrument the code
4649 * or the user may indicate what methods to optimize the most either in a config file
4650 * or through repeated runs where the compiler applies offline the optimizations to
4651 * each method and then decides if it was worth it.
4654 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4655 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4656 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4657 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4658 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4659 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4660 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4661 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4663 /* offset from br.s -> br like opcodes */
4664 #define BIG_BRANCH_OFFSET 13
4667 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4669 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4671 return b == NULL || b == bb;
4675 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4677 unsigned char *ip = start;
4678 unsigned char *target;
4681 MonoBasicBlock *bblock;
4682 const MonoOpcode *opcode;
4685 cli_addr = ip - start;
4686 i = mono_opcode_value ((const guint8 **)&ip, end);
4689 opcode = &mono_opcodes [i];
4690 switch (opcode->argument) {
4691 case MonoInlineNone:
4694 case MonoInlineString:
4695 case MonoInlineType:
4696 case MonoInlineField:
4697 case MonoInlineMethod:
4700 case MonoShortInlineR:
4707 case MonoShortInlineVar:
4708 case MonoShortInlineI:
4711 case MonoShortInlineBrTarget:
4712 target = start + cli_addr + 2 + (signed char)ip [1];
4713 GET_BBLOCK (cfg, bblock, target);
4716 GET_BBLOCK (cfg, bblock, ip);
4718 case MonoInlineBrTarget:
4719 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4720 GET_BBLOCK (cfg, bblock, target);
4723 GET_BBLOCK (cfg, bblock, ip);
4725 case MonoInlineSwitch: {
4726 guint32 n = read32 (ip + 1);
4729 cli_addr += 5 + 4 * n;
4730 target = start + cli_addr;
4731 GET_BBLOCK (cfg, bblock, target);
4733 for (j = 0; j < n; ++j) {
4734 target = start + cli_addr + (gint32)read32 (ip);
4735 GET_BBLOCK (cfg, bblock, target);
4745 g_assert_not_reached ();
4748 if (i == CEE_THROW) {
4749 unsigned char *bb_start = ip - 1;
4751 /* Find the start of the bblock containing the throw */
4753 while ((bb_start >= start) && !bblock) {
4754 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4758 bblock->out_of_line = 1;
4767 static inline MonoMethod *
4768 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4772 if (m->wrapper_type != MONO_WRAPPER_NONE)
4773 return mono_method_get_wrapper_data (m, token);
4775 method = mono_get_method_full (m->klass->image, token, klass, context);
4780 static inline MonoMethod *
4781 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4783 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4785 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4791 static inline MonoClass*
4792 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4796 if (method->wrapper_type != MONO_WRAPPER_NONE)
4797 klass = mono_method_get_wrapper_data (method, token);
4799 klass = mono_class_get_full (method->klass->image, token, context);
4801 mono_class_init (klass);
4806 * Returns TRUE if the JIT should abort inlining because "callee"
4807 * is influenced by security attributes.
4810 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4814 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4818 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4819 if (result == MONO_JIT_SECURITY_OK)
4822 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4823 /* Generate code to throw a SecurityException before the actual call/link */
4824 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4827 NEW_ICONST (cfg, args [0], 4);
4828 NEW_METHODCONST (cfg, args [1], caller);
4829 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4830 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4831 /* don't hide previous results */
4832 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4833 cfg->exception_data = result;
4841 throw_exception (void)
4843 static MonoMethod *method = NULL;
4846 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4847 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4854 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4856 MonoMethod *thrower = throw_exception ();
4859 EMIT_NEW_PCONST (cfg, args [0], ex);
4860 mono_emit_method_call (cfg, thrower, args, NULL);
4864 * Return the original method is a wrapper is specified. We can only access
4865 * the custom attributes from the original method.
4868 get_original_method (MonoMethod *method)
4870 if (method->wrapper_type == MONO_WRAPPER_NONE)
4873 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4874 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4877 /* in other cases we need to find the original method */
4878 return mono_marshal_method_from_wrapper (method);
4882 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4883 MonoBasicBlock *bblock, unsigned char *ip)
4885 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4886 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4888 emit_throw_exception (cfg, ex);
4892 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4893 MonoBasicBlock *bblock, unsigned char *ip)
4895 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4896 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4898 emit_throw_exception (cfg, ex);
4902 * Check that the IL instructions at ip are the array initialization
4903 * sequence and return the pointer to the data and the size.
4906 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4909 * newarr[System.Int32]
4911 * ldtoken field valuetype ...
4912 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4914 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4915 guint32 token = read32 (ip + 7);
4916 guint32 field_token = read32 (ip + 2);
4917 guint32 field_index = field_token & 0xffffff;
4919 const char *data_ptr;
4921 MonoMethod *cmethod;
4922 MonoClass *dummy_class;
4923 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4929 *out_field_token = field_token;
4931 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4934 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4936 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4937 case MONO_TYPE_BOOLEAN:
4941 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4942 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4943 case MONO_TYPE_CHAR:
4953 return NULL; /* stupid ARM FP swapped format */
4963 if (size > mono_type_size (field->type, &dummy_align))
4966 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4967 if (!method->klass->image->dynamic) {
4968 field_index = read32 (ip + 2) & 0xffffff;
4969 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4970 data_ptr = mono_image_rva_map (method->klass->image, rva);
4971 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4972 /* for aot code we do the lookup on load */
4973 if (aot && data_ptr)
4974 return GUINT_TO_POINTER (rva);
4976 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4978 data_ptr = mono_field_get_data (field);
4986 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4988 char *method_fname = mono_method_full_name (method, TRUE);
4990 MonoMethodHeader *header = mono_method_get_header (method);
4992 if (header->code_size == 0)
4993 method_code = g_strdup ("method body is empty.");
4995 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4996 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4997 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4998 g_free (method_fname);
4999 g_free (method_code);
5000 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5004 set_exception_object (MonoCompile *cfg, MonoException *exception)
5006 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5007 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5008 cfg->exception_ptr = exception;
5012 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5016 if (cfg->generic_sharing_context)
5017 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5019 type = &klass->byval_arg;
5020 return MONO_TYPE_IS_REFERENCE (type);
5024 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5027 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5028 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5029 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5030 /* Optimize reg-reg moves away */
5032 * Can't optimize other opcodes, since sp[0] might point to
5033 * the last ins of a decomposed opcode.
5035 sp [0]->dreg = (cfg)->locals [n]->dreg;
5037 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5042 * ldloca inhibits many optimizations so try to get rid of it in common
5045 static inline unsigned char *
5046 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5055 local = read16 (ip + 2);
5059 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5060 gboolean skip = FALSE;
5062 /* From the INITOBJ case */
5063 token = read32 (ip + 2);
5064 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5065 CHECK_TYPELOAD (klass);
5066 if (generic_class_is_reference_type (cfg, klass)) {
5067 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5068 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5069 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5070 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5071 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5084 is_exception_class (MonoClass *class)
5087 if (class == mono_defaults.exception_class)
5089 class = class->parent;
5095 * mono_method_to_ir:
5097 * Translate the .net IL into linear IR.
5100 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5101 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5102 guint inline_offset, gboolean is_virtual_call)
5105 MonoInst *ins, **sp, **stack_start;
5106 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5107 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5108 MonoMethod *cmethod, *method_definition;
5109 MonoInst **arg_array;
5110 MonoMethodHeader *header;
5112 guint32 token, ins_flag;
5114 MonoClass *constrained_call = NULL;
5115 unsigned char *ip, *end, *target, *err_pos;
5116 static double r8_0 = 0.0;
5117 MonoMethodSignature *sig;
5118 MonoGenericContext *generic_context = NULL;
5119 MonoGenericContainer *generic_container = NULL;
5120 MonoType **param_types;
5121 int i, n, start_new_bblock, dreg;
5122 int num_calls = 0, inline_costs = 0;
5123 int breakpoint_id = 0;
5125 MonoBoolean security, pinvoke;
5126 MonoSecurityManager* secman = NULL;
5127 MonoDeclSecurityActions actions;
5128 GSList *class_inits = NULL;
5129 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5131 gboolean init_locals, seq_points, skip_dead_blocks;
5133 /* serialization and xdomain stuff may need access to private fields and methods */
5134 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5135 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5136 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5137 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5138 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5139 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5141 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5143 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5144 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5145 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5146 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5148 image = method->klass->image;
5149 header = mono_method_get_header (method);
5150 generic_container = mono_method_get_generic_container (method);
5151 sig = mono_method_signature (method);
5152 num_args = sig->hasthis + sig->param_count;
5153 ip = (unsigned char*)header->code;
5154 cfg->cil_start = ip;
5155 end = ip + header->code_size;
5156 mono_jit_stats.cil_code_size += header->code_size;
5157 init_locals = header->init_locals;
5159 seq_points = cfg->gen_seq_points && cfg->method == method;
5162 * Methods without init_locals set could cause asserts in various passes
5167 method_definition = method;
5168 while (method_definition->is_inflated) {
5169 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5170 method_definition = imethod->declaring;
5173 /* SkipVerification is not allowed if core-clr is enabled */
5174 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5176 dont_verify_stloc = TRUE;
5179 if (!dont_verify && mini_method_verify (cfg, method_definition))
5180 goto exception_exit;
5182 if (mono_debug_using_mono_debugger ())
5183 cfg->keep_cil_nops = TRUE;
5185 if (sig->is_inflated)
5186 generic_context = mono_method_get_context (method);
5187 else if (generic_container)
5188 generic_context = &generic_container->context;
5189 cfg->generic_context = generic_context;
5191 if (!cfg->generic_sharing_context)
5192 g_assert (!sig->has_type_parameters);
5194 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5195 g_assert (method->is_inflated);
5196 g_assert (mono_method_get_context (method)->method_inst);
5198 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5199 g_assert (sig->generic_param_count);
5201 if (cfg->method == method) {
5202 cfg->real_offset = 0;
5204 cfg->real_offset = inline_offset;
5207 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5208 cfg->cil_offset_to_bb_len = header->code_size;
5210 cfg->current_method = method;
5212 if (cfg->verbose_level > 2)
5213 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5215 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5217 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5218 for (n = 0; n < sig->param_count; ++n)
5219 param_types [n + sig->hasthis] = sig->params [n];
5220 cfg->arg_types = param_types;
5222 dont_inline = g_list_prepend (dont_inline, method);
5223 if (cfg->method == method) {
5225 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5226 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5229 NEW_BBLOCK (cfg, start_bblock);
5230 cfg->bb_entry = start_bblock;
5231 start_bblock->cil_code = NULL;
5232 start_bblock->cil_length = 0;
5235 NEW_BBLOCK (cfg, end_bblock);
5236 cfg->bb_exit = end_bblock;
5237 end_bblock->cil_code = NULL;
5238 end_bblock->cil_length = 0;
5239 g_assert (cfg->num_bblocks == 2);
5241 arg_array = cfg->args;
5243 if (header->num_clauses) {
5244 cfg->spvars = g_hash_table_new (NULL, NULL);
5245 cfg->exvars = g_hash_table_new (NULL, NULL);
5247 /* handle exception clauses */
5248 for (i = 0; i < header->num_clauses; ++i) {
5249 MonoBasicBlock *try_bb;
5250 MonoExceptionClause *clause = &header->clauses [i];
5251 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5252 try_bb->real_offset = clause->try_offset;
5253 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5254 tblock->real_offset = clause->handler_offset;
5255 tblock->flags |= BB_EXCEPTION_HANDLER;
5257 link_bblock (cfg, try_bb, tblock);
5259 if (*(ip + clause->handler_offset) == CEE_POP)
5260 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5262 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5263 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5264 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5265 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5266 MONO_ADD_INS (tblock, ins);
5268 /* todo: is a fault block unsafe to optimize? */
5269 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5270 tblock->flags |= BB_EXCEPTION_UNSAFE;
5274 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5276 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5278 /* catch and filter blocks get the exception object on the stack */
5279 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5280 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5281 MonoInst *dummy_use;
5283 /* mostly like handle_stack_args (), but just sets the input args */
5284 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5285 tblock->in_scount = 1;
5286 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5287 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5290 * Add a dummy use for the exvar so its liveness info will be
5294 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5296 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5297 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5298 tblock->flags |= BB_EXCEPTION_HANDLER;
5299 tblock->real_offset = clause->data.filter_offset;
5300 tblock->in_scount = 1;
5301 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5302 /* The filter block shares the exvar with the handler block */
5303 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5304 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5305 MONO_ADD_INS (tblock, ins);
5309 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5310 clause->data.catch_class &&
5311 cfg->generic_sharing_context &&
5312 mono_class_check_context_used (clause->data.catch_class)) {
5314 * In shared generic code with catch
5315 * clauses containing type variables
5316 * the exception handling code has to
5317 * be able to get to the rgctx.
5318 * Therefore we have to make sure that
5319 * the vtable/mrgctx argument (for
5320 * static or generic methods) or the
5321 * "this" argument (for non-static
5322 * methods) are live.
5324 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5325 mini_method_get_context (method)->method_inst ||
5326 method->klass->valuetype) {
5327 mono_get_vtable_var (cfg);
5329 MonoInst *dummy_use;
5331 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5336 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5337 cfg->cbb = start_bblock;
5338 cfg->args = arg_array;
5339 mono_save_args (cfg, sig, inline_args);
5342 /* FIRST CODE BLOCK */
5343 NEW_BBLOCK (cfg, bblock);
5344 bblock->cil_code = ip;
5348 ADD_BBLOCK (cfg, bblock);
5350 if (cfg->method == method) {
5351 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5352 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5353 MONO_INST_NEW (cfg, ins, OP_BREAK);
5354 MONO_ADD_INS (bblock, ins);
5358 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5359 secman = mono_security_manager_get_methods ();
5361 security = (secman && mono_method_has_declsec (method));
5362 /* at this point having security doesn't mean we have any code to generate */
5363 if (security && (cfg->method == method)) {
5364 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5365 * And we do not want to enter the next section (with allocation) if we
5366 * have nothing to generate */
5367 security = mono_declsec_get_demands (method, &actions);
5370 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5371 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5373 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5374 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5375 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5377 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5378 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5382 mono_custom_attrs_free (custom);
5385 custom = mono_custom_attrs_from_class (wrapped->klass);
5386 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5390 mono_custom_attrs_free (custom);
5393 /* not a P/Invoke after all */
5398 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5399 /* we use a separate basic block for the initialization code */
5400 NEW_BBLOCK (cfg, init_localsbb);
5401 cfg->bb_init = init_localsbb;
5402 init_localsbb->real_offset = cfg->real_offset;
5403 start_bblock->next_bb = init_localsbb;
5404 init_localsbb->next_bb = bblock;
5405 link_bblock (cfg, start_bblock, init_localsbb);
5406 link_bblock (cfg, init_localsbb, bblock);
5408 cfg->cbb = init_localsbb;
5410 start_bblock->next_bb = bblock;
5411 link_bblock (cfg, start_bblock, bblock);
5414 /* at this point we know, if security is TRUE, that some code needs to be generated */
5415 if (security && (cfg->method == method)) {
5418 mono_jit_stats.cas_demand_generation++;
5420 if (actions.demand.blob) {
5421 /* Add code for SecurityAction.Demand */
5422 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5423 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5424 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5425 mono_emit_method_call (cfg, secman->demand, args, NULL);
5427 if (actions.noncasdemand.blob) {
5428 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5429 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5430 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5431 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5432 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5433 mono_emit_method_call (cfg, secman->demand, args, NULL);
5435 if (actions.demandchoice.blob) {
5436 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5437 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5438 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5439 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5440 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5444 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5446 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5449 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5450 /* check if this is native code, e.g. an icall or a p/invoke */
5451 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5452 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5454 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5455 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5457 /* if this ia a native call then it can only be JITted from platform code */
5458 if ((icall || pinvk) && method->klass && method->klass->image) {
5459 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5460 MonoException *ex = icall ? mono_get_exception_security () :
5461 mono_get_exception_method_access ();
5462 emit_throw_exception (cfg, ex);
5469 if (header->code_size == 0)
5472 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5477 if (cfg->method == method)
5478 mono_debug_init_method (cfg, bblock, breakpoint_id);
5480 for (n = 0; n < header->num_locals; ++n) {
5481 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5486 /* We force the vtable variable here for all shared methods
5487 for the possibility that they might show up in a stack
5488 trace where their exact instantiation is needed. */
5489 if (cfg->generic_sharing_context && method == cfg->method) {
5490 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5491 mini_method_get_context (method)->method_inst ||
5492 method->klass->valuetype) {
5493 mono_get_vtable_var (cfg);
5495 /* FIXME: Is there a better way to do this?
5496 We need the variable live for the duration
5497 of the whole method. */
5498 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5502 /* add a check for this != NULL to inlined methods */
5503 if (is_virtual_call) {
5506 NEW_ARGLOAD (cfg, arg_ins, 0);
5507 MONO_ADD_INS (cfg->cbb, arg_ins);
5508 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5511 skip_dead_blocks = !dont_verify;
5512 if (skip_dead_blocks) {
5513 original_bb = bb = mono_basic_block_split (method, &error);
5514 if (!mono_error_ok (&error)) {
5515 mono_error_cleanup (&error);
5521 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5522 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5525 start_new_bblock = 0;
5528 if (cfg->method == method)
5529 cfg->real_offset = ip - header->code;
5531 cfg->real_offset = inline_offset;
5536 if (start_new_bblock) {
5537 bblock->cil_length = ip - bblock->cil_code;
5538 if (start_new_bblock == 2) {
5539 g_assert (ip == tblock->cil_code);
5541 GET_BBLOCK (cfg, tblock, ip);
5543 bblock->next_bb = tblock;
5546 start_new_bblock = 0;
5547 for (i = 0; i < bblock->in_scount; ++i) {
5548 if (cfg->verbose_level > 3)
5549 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5550 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5554 g_slist_free (class_inits);
5557 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5558 link_bblock (cfg, bblock, tblock);
5559 if (sp != stack_start) {
5560 handle_stack_args (cfg, stack_start, sp - stack_start);
5562 CHECK_UNVERIFIABLE (cfg);
5564 bblock->next_bb = tblock;
5567 for (i = 0; i < bblock->in_scount; ++i) {
5568 if (cfg->verbose_level > 3)
5569 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5570 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5573 g_slist_free (class_inits);
5578 if (skip_dead_blocks) {
5579 int ip_offset = ip - header->code;
5581 if (ip_offset == bb->end)
5585 int op_size = mono_opcode_size (ip, end);
5586 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5588 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5590 if (ip_offset + op_size == bb->end) {
5591 MONO_INST_NEW (cfg, ins, OP_NOP);
5592 MONO_ADD_INS (bblock, ins);
5593 start_new_bblock = 1;
5601 * Sequence points are points where the debugger can place a breakpoint.
5602 * Currently, we generate these automatically at points where the IL
5605 if (seq_points && sp == stack_start) {
5606 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5607 MONO_ADD_INS (cfg->cbb, ins);
5610 bblock->real_offset = cfg->real_offset;
5612 if ((cfg->method == method) && cfg->coverage_info) {
5613 guint32 cil_offset = ip - header->code;
5614 cfg->coverage_info->data [cil_offset].cil_code = ip;
5616 /* TODO: Use an increment here */
5617 #if defined(TARGET_X86)
5618 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5619 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5621 MONO_ADD_INS (cfg->cbb, ins);
5623 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5624 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5628 if (cfg->verbose_level > 3)
5629 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5633 if (cfg->keep_cil_nops)
5634 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5636 MONO_INST_NEW (cfg, ins, OP_NOP);
5638 MONO_ADD_INS (bblock, ins);
5641 if (should_insert_brekpoint (cfg->method))
5642 MONO_INST_NEW (cfg, ins, OP_BREAK);
5644 MONO_INST_NEW (cfg, ins, OP_NOP);
5646 MONO_ADD_INS (bblock, ins);
5652 CHECK_STACK_OVF (1);
5653 n = (*ip)-CEE_LDARG_0;
5655 EMIT_NEW_ARGLOAD (cfg, ins, n);
5663 CHECK_STACK_OVF (1);
5664 n = (*ip)-CEE_LDLOC_0;
5666 EMIT_NEW_LOCLOAD (cfg, ins, n);
5675 n = (*ip)-CEE_STLOC_0;
5678 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5680 emit_stloc_ir (cfg, sp, header, n);
5687 CHECK_STACK_OVF (1);
5690 EMIT_NEW_ARGLOAD (cfg, ins, n);
5696 CHECK_STACK_OVF (1);
5699 NEW_ARGLOADA (cfg, ins, n);
5700 MONO_ADD_INS (cfg->cbb, ins);
5710 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5712 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5717 CHECK_STACK_OVF (1);
5720 EMIT_NEW_LOCLOAD (cfg, ins, n);
5724 case CEE_LDLOCA_S: {
5725 unsigned char *tmp_ip;
5727 CHECK_STACK_OVF (1);
5728 CHECK_LOCAL (ip [1]);
5730 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5736 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5745 CHECK_LOCAL (ip [1]);
5746 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5748 emit_stloc_ir (cfg, sp, header, ip [1]);
5753 CHECK_STACK_OVF (1);
5754 EMIT_NEW_PCONST (cfg, ins, NULL);
5755 ins->type = STACK_OBJ;
5760 CHECK_STACK_OVF (1);
5761 EMIT_NEW_ICONST (cfg, ins, -1);
5774 CHECK_STACK_OVF (1);
5775 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5781 CHECK_STACK_OVF (1);
5783 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5789 CHECK_STACK_OVF (1);
5790 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5796 CHECK_STACK_OVF (1);
5797 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5798 ins->type = STACK_I8;
5799 ins->dreg = alloc_dreg (cfg, STACK_I8);
5801 ins->inst_l = (gint64)read64 (ip);
5802 MONO_ADD_INS (bblock, ins);
5808 gboolean use_aotconst = FALSE;
5810 #ifdef TARGET_POWERPC
5811 /* FIXME: Clean this up */
5812 if (cfg->compile_aot)
5813 use_aotconst = TRUE;
5816 /* FIXME: we should really allocate this only late in the compilation process */
5817 f = mono_domain_alloc (cfg->domain, sizeof (float));
5819 CHECK_STACK_OVF (1);
5825 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5827 dreg = alloc_freg (cfg);
5828 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5829 ins->type = STACK_R8;
5831 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5832 ins->type = STACK_R8;
5833 ins->dreg = alloc_dreg (cfg, STACK_R8);
5835 MONO_ADD_INS (bblock, ins);
5845 gboolean use_aotconst = FALSE;
5847 #ifdef TARGET_POWERPC
5848 /* FIXME: Clean this up */
5849 if (cfg->compile_aot)
5850 use_aotconst = TRUE;
5853 /* FIXME: we should really allocate this only late in the compilation process */
5854 d = mono_domain_alloc (cfg->domain, sizeof (double));
5856 CHECK_STACK_OVF (1);
5862 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5864 dreg = alloc_freg (cfg);
5865 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5866 ins->type = STACK_R8;
5868 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5869 ins->type = STACK_R8;
5870 ins->dreg = alloc_dreg (cfg, STACK_R8);
5872 MONO_ADD_INS (bblock, ins);
5881 MonoInst *temp, *store;
5883 CHECK_STACK_OVF (1);
5887 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5888 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5890 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5893 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5906 if (sp [0]->type == STACK_R8)
5907 /* we need to pop the value from the x86 FP stack */
5908 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5917 if (stack_start != sp)
5919 token = read32 (ip + 1);
5920 /* FIXME: check the signature matches */
5921 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5926 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5927 GENERIC_SHARING_FAILURE (CEE_JMP);
5929 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5930 CHECK_CFG_EXCEPTION;
5932 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5934 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5937 /* Handle tail calls similarly to calls */
5938 n = fsig->param_count + fsig->hasthis;
5940 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5941 call->method = cmethod;
5942 call->tail_call = TRUE;
5943 call->signature = mono_method_signature (cmethod);
5944 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5945 call->inst.inst_p0 = cmethod;
5946 for (i = 0; i < n; ++i)
5947 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5949 mono_arch_emit_call (cfg, call);
5950 MONO_ADD_INS (bblock, (MonoInst*)call);
5953 for (i = 0; i < num_args; ++i)
5954 /* Prevent arguments from being optimized away */
5955 arg_array [i]->flags |= MONO_INST_VOLATILE;
5957 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5958 ins = (MonoInst*)call;
5959 ins->inst_p0 = cmethod;
5960 MONO_ADD_INS (bblock, ins);
5964 start_new_bblock = 1;
5969 case CEE_CALLVIRT: {
5970 MonoInst *addr = NULL;
5971 MonoMethodSignature *fsig = NULL;
5973 int virtual = *ip == CEE_CALLVIRT;
5974 int calli = *ip == CEE_CALLI;
5975 gboolean pass_imt_from_rgctx = FALSE;
5976 MonoInst *imt_arg = NULL;
5977 gboolean pass_vtable = FALSE;
5978 gboolean pass_mrgctx = FALSE;
5979 MonoInst *vtable_arg = NULL;
5980 gboolean check_this = FALSE;
5981 gboolean supported_tail_call = FALSE;
5984 token = read32 (ip + 1);
5991 if (method->wrapper_type != MONO_WRAPPER_NONE)
5992 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5994 fsig = mono_metadata_parse_signature (image, token);
5996 n = fsig->param_count + fsig->hasthis;
5998 if (method->dynamic && fsig->pinvoke) {
6002 * This is a call through a function pointer using a pinvoke
6003 * signature. Have to create a wrapper and call that instead.
6004 * FIXME: This is very slow, need to create a wrapper at JIT time
6005 * instead based on the signature.
6007 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6008 EMIT_NEW_PCONST (cfg, args [1], fsig);
6010 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6013 MonoMethod *cil_method;
6015 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6016 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6017 cil_method = cmethod;
6018 } else if (constrained_call) {
6019 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6021 * This is needed since get_method_constrained can't find
6022 * the method in klass representing a type var.
6023 * The type var is guaranteed to be a reference type in this
6026 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6027 cil_method = cmethod;
6028 g_assert (!cmethod->klass->valuetype);
6030 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6033 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6034 cil_method = cmethod;
6039 if (!dont_verify && !cfg->skip_visibility) {
6040 MonoMethod *target_method = cil_method;
6041 if (method->is_inflated) {
6042 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6044 if (!mono_method_can_access_method (method_definition, target_method) &&
6045 !mono_method_can_access_method (method, cil_method))
6046 METHOD_ACCESS_FAILURE;
6049 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6050 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6052 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6053 /* MS.NET seems to silently convert this to a callvirt */
6058 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6059 * converts to a callvirt.
6061 * tests/bug-515884.il is an example of this behavior
6063 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6064 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6065 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6069 if (!cmethod->klass->inited)
6070 if (!mono_class_init (cmethod->klass))
6073 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6074 mini_class_is_system_array (cmethod->klass)) {
6075 array_rank = cmethod->klass->rank;
6076 fsig = mono_method_signature (cmethod);
6078 fsig = mono_method_signature (cmethod);
6083 if (fsig->pinvoke) {
6084 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6085 check_for_pending_exc, FALSE);
6086 fsig = mono_method_signature (wrapper);
6087 } else if (constrained_call) {
6088 fsig = mono_method_signature (cmethod);
6090 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6094 mono_save_token_info (cfg, image, token, cil_method);
6096 n = fsig->param_count + fsig->hasthis;
6098 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6099 if (check_linkdemand (cfg, method, cmethod))
6101 CHECK_CFG_EXCEPTION;
6104 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6105 g_assert_not_reached ();
6108 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6111 if (!cfg->generic_sharing_context && cmethod)
6112 g_assert (!mono_method_check_context_used (cmethod));
6116 //g_assert (!virtual || fsig->hasthis);
6120 if (constrained_call) {
6122 * We have the `constrained.' prefix opcode.
6124 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6126 * The type parameter is instantiated as a valuetype,
6127 * but that type doesn't override the method we're
6128 * calling, so we need to box `this'.
6130 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6131 ins->klass = constrained_call;
6132 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6133 CHECK_CFG_EXCEPTION;
6134 } else if (!constrained_call->valuetype) {
6135 int dreg = alloc_preg (cfg);
6138 * The type parameter is instantiated as a reference
6139 * type. We have a managed pointer on the stack, so
6140 * we need to dereference it here.
6142 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6143 ins->type = STACK_OBJ;
6145 } else if (cmethod->klass->valuetype)
6147 constrained_call = NULL;
6150 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6154 * If the callee is a shared method, then its static cctor
6155 * might not get called after the call was patched.
6157 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6158 emit_generic_class_init (cfg, cmethod->klass);
6159 CHECK_TYPELOAD (cmethod->klass);
6162 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6163 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6164 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6165 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6166 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6169 * Pass vtable iff target method might
6170 * be shared, which means that sharing
6171 * is enabled for its class and its
6172 * context is sharable (and it's not a
6175 if (sharing_enabled && context_sharable &&
6176 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6180 if (cmethod && mini_method_get_context (cmethod) &&
6181 mini_method_get_context (cmethod)->method_inst) {
6182 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6183 MonoGenericContext *context = mini_method_get_context (cmethod);
6184 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6186 g_assert (!pass_vtable);
6188 if (sharing_enabled && context_sharable)
6192 if (cfg->generic_sharing_context && cmethod) {
6193 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6195 context_used = mono_method_check_context_used (cmethod);
6197 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6198 /* Generic method interface
6199 calls are resolved via a
6200 helper function and don't
6202 if (!cmethod_context || !cmethod_context->method_inst)
6203 pass_imt_from_rgctx = TRUE;
6207 * If a shared method calls another
6208 * shared method then the caller must
6209 * have a generic sharing context
6210 * because the magic trampoline
6211 * requires it. FIXME: We shouldn't
6212 * have to force the vtable/mrgctx
6213 * variable here. Instead there
6214 * should be a flag in the cfg to
6215 * request a generic sharing context.
6218 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6219 mono_get_vtable_var (cfg);
6224 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6226 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6228 CHECK_TYPELOAD (cmethod->klass);
6229 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6234 g_assert (!vtable_arg);
6236 if (!cfg->compile_aot) {
6238 * emit_get_rgctx_method () calls mono_class_vtable () so check
6239 * for type load errors before.
6241 mono_class_setup_vtable (cmethod->klass);
6242 CHECK_TYPELOAD (cmethod->klass);
6245 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6247 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6248 MONO_METHOD_IS_FINAL (cmethod)) {
6255 if (pass_imt_from_rgctx) {
6256 g_assert (!pass_vtable);
6259 imt_arg = emit_get_rgctx_method (cfg, context_used,
6260 cmethod, MONO_RGCTX_INFO_METHOD);
6264 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6266 /* Calling virtual generic methods */
6267 if (cmethod && virtual &&
6268 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6269 !(MONO_METHOD_IS_FINAL (cmethod) &&
6270 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6271 mono_method_signature (cmethod)->generic_param_count) {
6272 MonoInst *this_temp, *this_arg_temp, *store;
6273 MonoInst *iargs [4];
6275 g_assert (mono_method_signature (cmethod)->is_inflated);
6277 /* Prevent inlining of methods that contain indirect calls */
6280 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6281 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6282 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6283 g_assert (!imt_arg);
6285 g_assert (cmethod->is_inflated);
6286 imt_arg = emit_get_rgctx_method (cfg, context_used,
6287 cmethod, MONO_RGCTX_INFO_METHOD);
6288 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6292 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6293 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6294 MONO_ADD_INS (bblock, store);
6296 /* FIXME: This should be a managed pointer */
6297 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6299 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6300 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6301 cmethod, MONO_RGCTX_INFO_METHOD);
6302 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6303 addr = mono_emit_jit_icall (cfg,
6304 mono_helper_compile_generic_method, iargs);
6306 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6308 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6311 if (!MONO_TYPE_IS_VOID (fsig->ret))
6312 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6314 CHECK_CFG_EXCEPTION;
6321 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6322 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6324 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6328 /* FIXME: runtime generic context pointer for jumps? */
6329 /* FIXME: handle this for generic sharing eventually */
6330 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6333 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6336 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6337 /* Handle tail calls similarly to calls */
6338 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6340 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6341 call->tail_call = TRUE;
6342 call->method = cmethod;
6343 call->signature = mono_method_signature (cmethod);
6346 * We implement tail calls by storing the actual arguments into the
6347 * argument variables, then emitting a CEE_JMP.
6349 for (i = 0; i < n; ++i) {
6350 /* Prevent argument from being register allocated */
6351 arg_array [i]->flags |= MONO_INST_VOLATILE;
6352 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6356 ins = (MonoInst*)call;
6357 ins->inst_p0 = cmethod;
6358 ins->inst_p1 = arg_array [0];
6359 MONO_ADD_INS (bblock, ins);
6360 link_bblock (cfg, bblock, end_bblock);
6361 start_new_bblock = 1;
6363 CHECK_CFG_EXCEPTION;
6365 /* skip CEE_RET as well */
6371 /* Conversion to a JIT intrinsic */
6372 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6373 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6374 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6379 CHECK_CFG_EXCEPTION;
6387 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6388 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6389 mono_method_check_inlining (cfg, cmethod) &&
6390 !g_list_find (dont_inline, cmethod)) {
6392 gboolean allways = FALSE;
6394 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6395 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6396 /* Prevent inlining of methods that call wrappers */
6398 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6402 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6404 cfg->real_offset += 5;
6407 if (!MONO_TYPE_IS_VOID (fsig->ret))
6408 /* *sp is already set by inline_method */
6411 inline_costs += costs;
6417 inline_costs += 10 * num_calls++;
6419 /* Tail recursion elimination */
6420 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6421 gboolean has_vtargs = FALSE;
6424 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6427 /* keep it simple */
6428 for (i = fsig->param_count - 1; i >= 0; i--) {
6429 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6434 for (i = 0; i < n; ++i)
6435 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6436 MONO_INST_NEW (cfg, ins, OP_BR);
6437 MONO_ADD_INS (bblock, ins);
6438 tblock = start_bblock->out_bb [0];
6439 link_bblock (cfg, bblock, tblock);
6440 ins->inst_target_bb = tblock;
6441 start_new_bblock = 1;
6443 /* skip the CEE_RET, too */
6444 if (ip_in_bb (cfg, bblock, ip + 5))
6454 /* Generic sharing */
6455 /* FIXME: only do this for generic methods if
6456 they are not shared! */
6457 if (context_used && !imt_arg && !array_rank &&
6458 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6459 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6460 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6461 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6464 g_assert (cfg->generic_sharing_context && cmethod);
6468 * We are compiling a call to a
6469 * generic method from shared code,
6470 * which means that we have to look up
6471 * the method in the rgctx and do an
6474 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6477 /* Indirect calls */
6479 g_assert (!imt_arg);
6481 if (*ip == CEE_CALL)
6482 g_assert (context_used);
6483 else if (*ip == CEE_CALLI)
6484 g_assert (!vtable_arg);
6486 /* FIXME: what the hell is this??? */
6487 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6488 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6490 /* Prevent inlining of methods with indirect calls */
6494 #ifdef MONO_ARCH_RGCTX_REG
6496 int rgctx_reg = mono_alloc_preg (cfg);
6498 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6499 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6500 call = (MonoCallInst*)ins;
6501 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6502 cfg->uses_rgctx_reg = TRUE;
6503 call->rgctx_reg = TRUE;
6508 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6510 * Instead of emitting an indirect call, emit a direct call
6511 * with the contents of the aotconst as the patch info.
6513 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6515 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6516 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6519 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6522 if (!MONO_TYPE_IS_VOID (fsig->ret))
6523 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6525 CHECK_CFG_EXCEPTION;
6536 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6537 if (sp [fsig->param_count]->type == STACK_OBJ) {
6538 MonoInst *iargs [2];
6541 iargs [1] = sp [fsig->param_count];
6543 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6546 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6547 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6548 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6549 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6551 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6554 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6555 if (!cmethod->klass->element_class->valuetype && !readonly)
6556 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6557 CHECK_TYPELOAD (cmethod->klass);
6560 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6563 g_assert_not_reached ();
6566 CHECK_CFG_EXCEPTION;
6573 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6575 if (!MONO_TYPE_IS_VOID (fsig->ret))
6576 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6578 CHECK_CFG_EXCEPTION;
6588 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6590 } else if (imt_arg) {
6591 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6593 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6596 if (!MONO_TYPE_IS_VOID (fsig->ret))
6597 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6599 CHECK_CFG_EXCEPTION;
6606 if (cfg->method != method) {
6607 /* return from inlined method */
6609 * If in_count == 0, that means the ret is unreachable due to
6610 * being preceeded by a throw. In that case, inline_method () will
6611 * handle setting the return value
6612 * (test case: test_0_inline_throw ()).
6614 if (return_var && cfg->cbb->in_count) {
6618 //g_assert (returnvar != -1);
6619 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6620 cfg->ret_var_set = TRUE;
6624 MonoType *ret_type = mono_method_signature (method)->ret;
6628 * Place a seq point here too even through the IL stack is not
6629 * empty, so a step over on
6632 * will work correctly.
6634 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6635 MONO_ADD_INS (cfg->cbb, ins);
6638 g_assert (!return_var);
6641 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6644 if (!cfg->vret_addr) {
6647 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6649 EMIT_NEW_RETLOADA (cfg, ret_addr);
6651 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6652 ins->klass = mono_class_from_mono_type (ret_type);
6655 #ifdef MONO_ARCH_SOFT_FLOAT
6656 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6657 MonoInst *iargs [1];
6661 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6662 mono_arch_emit_setret (cfg, method, conv);
6664 mono_arch_emit_setret (cfg, method, *sp);
6667 mono_arch_emit_setret (cfg, method, *sp);
6672 if (sp != stack_start)
6674 MONO_INST_NEW (cfg, ins, OP_BR);
6676 ins->inst_target_bb = end_bblock;
6677 MONO_ADD_INS (bblock, ins);
6678 link_bblock (cfg, bblock, end_bblock);
6679 start_new_bblock = 1;
6683 MONO_INST_NEW (cfg, ins, OP_BR);
6685 target = ip + 1 + (signed char)(*ip);
6687 GET_BBLOCK (cfg, tblock, target);
6688 link_bblock (cfg, bblock, tblock);
6689 ins->inst_target_bb = tblock;
6690 if (sp != stack_start) {
6691 handle_stack_args (cfg, stack_start, sp - stack_start);
6693 CHECK_UNVERIFIABLE (cfg);
6695 MONO_ADD_INS (bblock, ins);
6696 start_new_bblock = 1;
6697 inline_costs += BRANCH_COST;
6711 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6713 target = ip + 1 + *(signed char*)ip;
6719 inline_costs += BRANCH_COST;
6723 MONO_INST_NEW (cfg, ins, OP_BR);
6726 target = ip + 4 + (gint32)read32(ip);
6728 GET_BBLOCK (cfg, tblock, target);
6729 link_bblock (cfg, bblock, tblock);
6730 ins->inst_target_bb = tblock;
6731 if (sp != stack_start) {
6732 handle_stack_args (cfg, stack_start, sp - stack_start);
6734 CHECK_UNVERIFIABLE (cfg);
6737 MONO_ADD_INS (bblock, ins);
6739 start_new_bblock = 1;
6740 inline_costs += BRANCH_COST;
6747 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6748 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6749 guint32 opsize = is_short ? 1 : 4;
6751 CHECK_OPSIZE (opsize);
6753 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6756 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6761 GET_BBLOCK (cfg, tblock, target);
6762 link_bblock (cfg, bblock, tblock);
6763 GET_BBLOCK (cfg, tblock, ip);
6764 link_bblock (cfg, bblock, tblock);
6766 if (sp != stack_start) {
6767 handle_stack_args (cfg, stack_start, sp - stack_start);
6768 CHECK_UNVERIFIABLE (cfg);
6771 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6772 cmp->sreg1 = sp [0]->dreg;
6773 type_from_op (cmp, sp [0], NULL);
6776 #if SIZEOF_REGISTER == 4
6777 if (cmp->opcode == OP_LCOMPARE_IMM) {
6778 /* Convert it to OP_LCOMPARE */
6779 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6780 ins->type = STACK_I8;
6781 ins->dreg = alloc_dreg (cfg, STACK_I8);
6783 MONO_ADD_INS (bblock, ins);
6784 cmp->opcode = OP_LCOMPARE;
6785 cmp->sreg2 = ins->dreg;
6788 MONO_ADD_INS (bblock, cmp);
6790 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6791 type_from_op (ins, sp [0], NULL);
6792 MONO_ADD_INS (bblock, ins);
6793 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6794 GET_BBLOCK (cfg, tblock, target);
6795 ins->inst_true_bb = tblock;
6796 GET_BBLOCK (cfg, tblock, ip);
6797 ins->inst_false_bb = tblock;
6798 start_new_bblock = 2;
6801 inline_costs += BRANCH_COST;
6816 MONO_INST_NEW (cfg, ins, *ip);
6818 target = ip + 4 + (gint32)read32(ip);
6824 inline_costs += BRANCH_COST;
6828 MonoBasicBlock **targets;
6829 MonoBasicBlock *default_bblock;
6830 MonoJumpInfoBBTable *table;
6831 int offset_reg = alloc_preg (cfg);
6832 int target_reg = alloc_preg (cfg);
6833 int table_reg = alloc_preg (cfg);
6834 int sum_reg = alloc_preg (cfg);
6835 gboolean use_op_switch;
6839 n = read32 (ip + 1);
6842 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6846 CHECK_OPSIZE (n * sizeof (guint32));
6847 target = ip + n * sizeof (guint32);
6849 GET_BBLOCK (cfg, default_bblock, target);
6851 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6852 for (i = 0; i < n; ++i) {
6853 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6854 targets [i] = tblock;
6858 if (sp != stack_start) {
6860 * Link the current bb with the targets as well, so handle_stack_args
6861 * will set their in_stack correctly.
6863 link_bblock (cfg, bblock, default_bblock);
6864 for (i = 0; i < n; ++i)
6865 link_bblock (cfg, bblock, targets [i]);
6867 handle_stack_args (cfg, stack_start, sp - stack_start);
6869 CHECK_UNVERIFIABLE (cfg);
6872 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6873 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6876 for (i = 0; i < n; ++i)
6877 link_bblock (cfg, bblock, targets [i]);
6879 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6880 table->table = targets;
6881 table->table_size = n;
6883 use_op_switch = FALSE;
6885 /* ARM implements SWITCH statements differently */
6886 /* FIXME: Make it use the generic implementation */
6887 if (!cfg->compile_aot)
6888 use_op_switch = TRUE;
6891 if (COMPILE_LLVM (cfg))
6892 use_op_switch = TRUE;
6894 cfg->cbb->has_jump_table = 1;
6896 if (use_op_switch) {
6897 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6898 ins->sreg1 = src1->dreg;
6899 ins->inst_p0 = table;
6900 ins->inst_many_bb = targets;
6901 ins->klass = GUINT_TO_POINTER (n);
6902 MONO_ADD_INS (cfg->cbb, ins);
6904 if (sizeof (gpointer) == 8)
6905 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6909 #if SIZEOF_REGISTER == 8
6910 /* The upper word might not be zero, and we add it to a 64 bit address later */
6911 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6914 if (cfg->compile_aot) {
6915 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6917 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6918 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6919 ins->inst_p0 = table;
6920 ins->dreg = table_reg;
6921 MONO_ADD_INS (cfg->cbb, ins);
6924 /* FIXME: Use load_memindex */
6925 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6926 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6927 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6929 start_new_bblock = 1;
6930 inline_costs += (BRANCH_COST * 2);
6950 dreg = alloc_freg (cfg);
6953 dreg = alloc_lreg (cfg);
6956 dreg = alloc_preg (cfg);
6959 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6960 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6961 ins->flags |= ins_flag;
6963 MONO_ADD_INS (bblock, ins);
6978 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6979 ins->flags |= ins_flag;
6981 MONO_ADD_INS (bblock, ins);
6983 #if HAVE_WRITE_BARRIERS
6984 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6985 MonoInst *dummy_use;
6986 /* insert call to write barrier */
6987 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6988 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6989 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7000 MONO_INST_NEW (cfg, ins, (*ip));
7002 ins->sreg1 = sp [0]->dreg;
7003 ins->sreg2 = sp [1]->dreg;
7004 type_from_op (ins, sp [0], sp [1]);
7006 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7008 /* Use the immediate opcodes if possible */
7009 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7010 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7011 if (imm_opcode != -1) {
7012 ins->opcode = imm_opcode;
7013 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7016 sp [1]->opcode = OP_NOP;
7020 MONO_ADD_INS ((cfg)->cbb, (ins));
7022 *sp++ = mono_decompose_opcode (cfg, ins);
7039 MONO_INST_NEW (cfg, ins, (*ip));
7041 ins->sreg1 = sp [0]->dreg;
7042 ins->sreg2 = sp [1]->dreg;
7043 type_from_op (ins, sp [0], sp [1]);
7045 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7046 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7048 /* FIXME: Pass opcode to is_inst_imm */
7050 /* Use the immediate opcodes if possible */
7051 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7054 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7055 if (imm_opcode != -1) {
7056 ins->opcode = imm_opcode;
7057 if (sp [1]->opcode == OP_I8CONST) {
7058 #if SIZEOF_REGISTER == 8
7059 ins->inst_imm = sp [1]->inst_l;
7061 ins->inst_ls_word = sp [1]->inst_ls_word;
7062 ins->inst_ms_word = sp [1]->inst_ms_word;
7066 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7069 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7070 if (sp [1]->next == NULL)
7071 sp [1]->opcode = OP_NOP;
7074 MONO_ADD_INS ((cfg)->cbb, (ins));
7076 *sp++ = mono_decompose_opcode (cfg, ins);
7089 case CEE_CONV_OVF_I8:
7090 case CEE_CONV_OVF_U8:
7094 /* Special case this earlier so we have long constants in the IR */
7095 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7096 int data = sp [-1]->inst_c0;
7097 sp [-1]->opcode = OP_I8CONST;
7098 sp [-1]->type = STACK_I8;
7099 #if SIZEOF_REGISTER == 8
7100 if ((*ip) == CEE_CONV_U8)
7101 sp [-1]->inst_c0 = (guint32)data;
7103 sp [-1]->inst_c0 = data;
7105 sp [-1]->inst_ls_word = data;
7106 if ((*ip) == CEE_CONV_U8)
7107 sp [-1]->inst_ms_word = 0;
7109 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7111 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7118 case CEE_CONV_OVF_I4:
7119 case CEE_CONV_OVF_I1:
7120 case CEE_CONV_OVF_I2:
7121 case CEE_CONV_OVF_I:
7122 case CEE_CONV_OVF_U:
7125 if (sp [-1]->type == STACK_R8) {
7126 ADD_UNOP (CEE_CONV_OVF_I8);
7133 case CEE_CONV_OVF_U1:
7134 case CEE_CONV_OVF_U2:
7135 case CEE_CONV_OVF_U4:
7138 if (sp [-1]->type == STACK_R8) {
7139 ADD_UNOP (CEE_CONV_OVF_U8);
7146 case CEE_CONV_OVF_I1_UN:
7147 case CEE_CONV_OVF_I2_UN:
7148 case CEE_CONV_OVF_I4_UN:
7149 case CEE_CONV_OVF_I8_UN:
7150 case CEE_CONV_OVF_U1_UN:
7151 case CEE_CONV_OVF_U2_UN:
7152 case CEE_CONV_OVF_U4_UN:
7153 case CEE_CONV_OVF_U8_UN:
7154 case CEE_CONV_OVF_I_UN:
7155 case CEE_CONV_OVF_U_UN:
7162 CHECK_CFG_EXCEPTION;
7166 case CEE_ADD_OVF_UN:
7168 case CEE_MUL_OVF_UN:
7170 case CEE_SUB_OVF_UN:
7178 token = read32 (ip + 1);
7179 klass = mini_get_class (method, token, generic_context);
7180 CHECK_TYPELOAD (klass);
7182 if (generic_class_is_reference_type (cfg, klass)) {
7183 MonoInst *store, *load;
7184 int dreg = alloc_preg (cfg);
7186 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7187 load->flags |= ins_flag;
7188 MONO_ADD_INS (cfg->cbb, load);
7190 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7191 store->flags |= ins_flag;
7192 MONO_ADD_INS (cfg->cbb, store);
7194 #if HAVE_WRITE_BARRIERS
7195 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER) {
7196 MonoInst *dummy_use;
7197 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7198 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7199 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7203 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7215 token = read32 (ip + 1);
7216 klass = mini_get_class (method, token, generic_context);
7217 CHECK_TYPELOAD (klass);
7219 /* Optimize the common ldobj+stloc combination */
7229 loc_index = ip [5] - CEE_STLOC_0;
7236 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7237 CHECK_LOCAL (loc_index);
7239 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7240 ins->dreg = cfg->locals [loc_index]->dreg;
7246 /* Optimize the ldobj+stobj combination */
7247 /* The reference case ends up being a load+store anyway */
7248 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7253 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7260 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7269 CHECK_STACK_OVF (1);
7271 n = read32 (ip + 1);
7273 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7274 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7275 ins->type = STACK_OBJ;
7278 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7279 MonoInst *iargs [1];
7281 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7282 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7284 if (cfg->opt & MONO_OPT_SHARED) {
7285 MonoInst *iargs [3];
7287 if (cfg->compile_aot) {
7288 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7290 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7291 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7292 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7293 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7294 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7296 if (bblock->out_of_line) {
7297 MonoInst *iargs [2];
7299 if (image == mono_defaults.corlib) {
7301 * Avoid relocations in AOT and save some space by using a
7302 * version of helper_ldstr specialized to mscorlib.
7304 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7305 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7307 /* Avoid creating the string object */
7308 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7309 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7310 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7314 if (cfg->compile_aot) {
7315 NEW_LDSTRCONST (cfg, ins, image, n);
7317 MONO_ADD_INS (bblock, ins);
7320 NEW_PCONST (cfg, ins, NULL);
7321 ins->type = STACK_OBJ;
7322 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7324 MONO_ADD_INS (bblock, ins);
7333 MonoInst *iargs [2];
7334 MonoMethodSignature *fsig;
7337 MonoInst *vtable_arg = NULL;
7340 token = read32 (ip + 1);
7341 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7344 fsig = mono_method_get_signature (cmethod, image, token);
7348 mono_save_token_info (cfg, image, token, cmethod);
7350 if (!mono_class_init (cmethod->klass))
7353 if (cfg->generic_sharing_context)
7354 context_used = mono_method_check_context_used (cmethod);
7356 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7357 if (check_linkdemand (cfg, method, cmethod))
7359 CHECK_CFG_EXCEPTION;
7360 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7361 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7364 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7365 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7366 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7367 mono_class_vtable (cfg->domain, cmethod->klass);
7368 CHECK_TYPELOAD (cmethod->klass);
7370 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7371 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7374 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7375 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7377 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7379 CHECK_TYPELOAD (cmethod->klass);
7380 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7385 n = fsig->param_count;
7389 * Generate smaller code for the common newobj <exception> instruction in
7390 * argument checking code.
7392 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7393 is_exception_class (cmethod->klass) && n <= 2 &&
7394 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7395 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7396 MonoInst *iargs [3];
7398 g_assert (!vtable_arg);
7402 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7405 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7409 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7414 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7417 g_assert_not_reached ();
7425 /* move the args to allow room for 'this' in the first position */
7431 /* check_call_signature () requires sp[0] to be set */
7432 this_ins.type = STACK_OBJ;
7434 if (check_call_signature (cfg, fsig, sp))
7439 if (mini_class_is_system_array (cmethod->klass)) {
7440 g_assert (!vtable_arg);
7442 *sp = emit_get_rgctx_method (cfg, context_used,
7443 cmethod, MONO_RGCTX_INFO_METHOD);
7445 /* Avoid varargs in the common case */
7446 if (fsig->param_count == 1)
7447 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7448 else if (fsig->param_count == 2)
7449 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7450 else if (fsig->param_count == 3)
7451 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7453 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7454 } else if (cmethod->string_ctor) {
7455 g_assert (!context_used);
7456 g_assert (!vtable_arg);
7457 /* we simply pass a null pointer */
7458 EMIT_NEW_PCONST (cfg, *sp, NULL);
7459 /* now call the string ctor */
7460 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7462 MonoInst* callvirt_this_arg = NULL;
7464 if (cmethod->klass->valuetype) {
7465 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7466 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7467 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7472 * The code generated by mini_emit_virtual_call () expects
7473 * iargs [0] to be a boxed instance, but luckily the vcall
7474 * will be transformed into a normal call there.
7476 } else if (context_used) {
7477 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
7480 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7482 CHECK_TYPELOAD (cmethod->klass);
7485 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7486 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7487 * As a workaround, we call class cctors before allocating objects.
7489 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7490 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7491 if (cfg->verbose_level > 2)
7492 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7493 class_inits = g_slist_prepend (class_inits, vtable);
7496 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
7499 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7502 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7504 /* Now call the actual ctor */
7505 /* Avoid virtual calls to ctors if possible */
7506 if (cmethod->klass->marshalbyref)
7507 callvirt_this_arg = sp [0];
7509 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7510 mono_method_check_inlining (cfg, cmethod) &&
7511 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7512 !g_list_find (dont_inline, cmethod)) {
7515 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7516 cfg->real_offset += 5;
7519 inline_costs += costs - 5;
7522 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7524 } else if (context_used &&
7525 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7526 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7527 MonoInst *cmethod_addr;
7529 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7530 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7532 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7535 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7536 callvirt_this_arg, NULL, vtable_arg);
7540 if (alloc == NULL) {
7542 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7543 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7557 token = read32 (ip + 1);
7558 klass = mini_get_class (method, token, generic_context);
7559 CHECK_TYPELOAD (klass);
7560 if (sp [0]->type != STACK_OBJ)
7563 if (cfg->generic_sharing_context)
7564 context_used = mono_class_check_context_used (klass);
7566 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7573 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7575 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7579 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7580 MonoMethod *mono_castclass;
7581 MonoInst *iargs [1];
7584 mono_castclass = mono_marshal_get_castclass (klass);
7587 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7588 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7589 g_assert (costs > 0);
7592 cfg->real_offset += 5;
7597 inline_costs += costs;
7600 ins = handle_castclass (cfg, klass, *sp, context_used);
7601 CHECK_CFG_EXCEPTION;
7611 token = read32 (ip + 1);
7612 klass = mini_get_class (method, token, generic_context);
7613 CHECK_TYPELOAD (klass);
7614 if (sp [0]->type != STACK_OBJ)
7617 if (cfg->generic_sharing_context)
7618 context_used = mono_class_check_context_used (klass);
7620 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7627 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7629 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7633 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7634 MonoMethod *mono_isinst;
7635 MonoInst *iargs [1];
7638 mono_isinst = mono_marshal_get_isinst (klass);
7641 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7642 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7643 g_assert (costs > 0);
7646 cfg->real_offset += 5;
7651 inline_costs += costs;
7654 ins = handle_isinst (cfg, klass, *sp, context_used);
7655 CHECK_CFG_EXCEPTION;
7662 case CEE_UNBOX_ANY: {
7666 token = read32 (ip + 1);
7667 klass = mini_get_class (method, token, generic_context);
7668 CHECK_TYPELOAD (klass);
7670 mono_save_token_info (cfg, image, token, klass);
7672 if (cfg->generic_sharing_context)
7673 context_used = mono_class_check_context_used (klass);
7675 if (generic_class_is_reference_type (cfg, klass)) {
7676 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7678 MonoInst *iargs [2];
7683 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7684 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7688 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7689 MonoMethod *mono_castclass;
7690 MonoInst *iargs [1];
7693 mono_castclass = mono_marshal_get_castclass (klass);
7696 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7697 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7699 g_assert (costs > 0);
7702 cfg->real_offset += 5;
7706 inline_costs += costs;
7708 ins = handle_castclass (cfg, klass, *sp, 0);
7709 CHECK_CFG_EXCEPTION;
7717 if (mono_class_is_nullable (klass)) {
7718 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7725 ins = handle_unbox (cfg, klass, sp, context_used);
7731 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7744 token = read32 (ip + 1);
7745 klass = mini_get_class (method, token, generic_context);
7746 CHECK_TYPELOAD (klass);
7748 mono_save_token_info (cfg, image, token, klass);
7750 if (cfg->generic_sharing_context)
7751 context_used = mono_class_check_context_used (klass);
7753 if (generic_class_is_reference_type (cfg, klass)) {
7759 if (klass == mono_defaults.void_class)
7761 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7763 /* frequent check in generic code: box (struct), brtrue */
7764 if (!mono_class_is_nullable (klass) &&
7765 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7766 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7768 MONO_INST_NEW (cfg, ins, OP_BR);
7769 if (*ip == CEE_BRTRUE_S) {
7772 target = ip + 1 + (signed char)(*ip);
7777 target = ip + 4 + (gint)(read32 (ip));
7780 GET_BBLOCK (cfg, tblock, target);
7781 link_bblock (cfg, bblock, tblock);
7782 ins->inst_target_bb = tblock;
7783 GET_BBLOCK (cfg, tblock, ip);
7785 * This leads to some inconsistency, since the two bblocks are
7786 * not really connected, but it is needed for handling stack
7787 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7788 * FIXME: This should only be needed if sp != stack_start, but that
7789 * doesn't work for some reason (test failure in mcs/tests on x86).
7791 link_bblock (cfg, bblock, tblock);
7792 if (sp != stack_start) {
7793 handle_stack_args (cfg, stack_start, sp - stack_start);
7795 CHECK_UNVERIFIABLE (cfg);
7797 MONO_ADD_INS (bblock, ins);
7798 start_new_bblock = 1;
7802 *sp++ = handle_box (cfg, val, klass, context_used);
7804 CHECK_CFG_EXCEPTION;
7813 token = read32 (ip + 1);
7814 klass = mini_get_class (method, token, generic_context);
7815 CHECK_TYPELOAD (klass);
7817 mono_save_token_info (cfg, image, token, klass);
7819 if (cfg->generic_sharing_context)
7820 context_used = mono_class_check_context_used (klass);
7822 if (mono_class_is_nullable (klass)) {
7825 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7826 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7830 ins = handle_unbox (cfg, klass, sp, context_used);
7840 MonoClassField *field;
7844 if (*ip == CEE_STFLD) {
7851 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7853 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7856 token = read32 (ip + 1);
7857 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7858 field = mono_method_get_wrapper_data (method, token);
7859 klass = field->parent;
7862 field = mono_field_from_token (image, token, &klass, generic_context);
7866 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7867 FIELD_ACCESS_FAILURE;
7868 mono_class_init (klass);
7870 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7871 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7872 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7873 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7876 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7877 if (*ip == CEE_STFLD) {
7878 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7880 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7881 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7882 MonoInst *iargs [5];
7885 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7886 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7887 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7891 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7892 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7893 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7894 g_assert (costs > 0);
7896 cfg->real_offset += 5;
7899 inline_costs += costs;
7901 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7906 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7908 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7910 #if HAVE_WRITE_BARRIERS
7911 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7912 /* insert call to write barrier */
7913 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7914 MonoInst *iargs [2], *dummy_use;
7917 dreg = alloc_preg (cfg);
7918 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7920 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7922 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7926 store->flags |= ins_flag;
7933 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7934 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7935 MonoInst *iargs [4];
7938 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7939 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7940 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7941 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7942 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7943 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7945 g_assert (costs > 0);
7947 cfg->real_offset += 5;
7951 inline_costs += costs;
7953 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7957 if (sp [0]->type == STACK_VTYPE) {
7960 /* Have to compute the address of the variable */
7962 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7964 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7966 g_assert (var->klass == klass);
7968 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7972 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7974 if (*ip == CEE_LDFLDA) {
7975 dreg = alloc_preg (cfg);
7977 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7978 ins->klass = mono_class_from_mono_type (field->type);
7979 ins->type = STACK_MP;
7984 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7985 load->flags |= ins_flag;
7986 load->flags |= MONO_INST_FAULT;
7997 MonoClassField *field;
7998 gpointer addr = NULL;
7999 gboolean is_special_static;
8002 token = read32 (ip + 1);
8004 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8005 field = mono_method_get_wrapper_data (method, token);
8006 klass = field->parent;
8009 field = mono_field_from_token (image, token, &klass, generic_context);
8012 mono_class_init (klass);
8013 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8014 FIELD_ACCESS_FAILURE;
8016 /* if the class is Critical then transparent code cannot access it's fields */
8017 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8018 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8021 * We can only support shared generic static
8022 * field access on architectures where the
8023 * trampoline code has been extended to handle
8024 * the generic class init.
8026 #ifndef MONO_ARCH_VTABLE_REG
8027 GENERIC_SHARING_FAILURE (*ip);
8030 if (cfg->generic_sharing_context)
8031 context_used = mono_class_check_context_used (klass);
8033 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8035 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8036 * to be called here.
8038 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8039 mono_class_vtable (cfg->domain, klass);
8040 CHECK_TYPELOAD (klass);
8042 mono_domain_lock (cfg->domain);
8043 if (cfg->domain->special_static_fields)
8044 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8045 mono_domain_unlock (cfg->domain);
8047 is_special_static = mono_class_field_is_special_static (field);
8049 /* Generate IR to compute the field address */
8050 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8052 * Fast access to TLS data
8053 * Inline version of get_thread_static_data () in
8057 int idx, static_data_reg, array_reg, dreg;
8058 MonoInst *thread_ins;
8060 // offset &= 0x7fffffff;
8061 // idx = (offset >> 24) - 1;
8062 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8064 thread_ins = mono_get_thread_intrinsic (cfg);
8065 MONO_ADD_INS (cfg->cbb, thread_ins);
8066 static_data_reg = alloc_ireg (cfg);
8067 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8069 if (cfg->compile_aot) {
8070 int offset_reg, offset2_reg, idx_reg;
8072 /* For TLS variables, this will return the TLS offset */
8073 EMIT_NEW_SFLDACONST (cfg, ins, field);
8074 offset_reg = ins->dreg;
8075 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8076 idx_reg = alloc_ireg (cfg);
8077 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8078 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8080 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8081 array_reg = alloc_ireg (cfg);
8082 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8083 offset2_reg = alloc_ireg (cfg);
8084 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8085 dreg = alloc_ireg (cfg);
8086 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8088 offset = (gsize)addr & 0x7fffffff;
8089 idx = (offset >> 24) - 1;
8091 array_reg = alloc_ireg (cfg);
8092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8093 dreg = alloc_ireg (cfg);
8094 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8096 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8097 (cfg->compile_aot && is_special_static) ||
8098 (context_used && is_special_static)) {
8099 MonoInst *iargs [2];
8101 g_assert (field->parent);
8102 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8104 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8105 field, MONO_RGCTX_INFO_CLASS_FIELD);
8107 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8109 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8110 } else if (context_used) {
8111 MonoInst *static_data;
8114 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8115 method->klass->name_space, method->klass->name, method->name,
8116 depth, field->offset);
8119 if (mono_class_needs_cctor_run (klass, method)) {
8123 vtable = emit_get_rgctx_klass (cfg, context_used,
8124 klass, MONO_RGCTX_INFO_VTABLE);
8126 // FIXME: This doesn't work since it tries to pass the argument
8127 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8129 * The vtable pointer is always passed in a register regardless of
8130 * the calling convention, so assign it manually, and make a call
8131 * using a signature without parameters.
8133 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8134 #ifdef MONO_ARCH_VTABLE_REG
8135 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8136 cfg->uses_vtable_reg = TRUE;
8143 * The pointer we're computing here is
8145 * super_info.static_data + field->offset
8147 static_data = emit_get_rgctx_klass (cfg, context_used,
8148 klass, MONO_RGCTX_INFO_STATIC_DATA);
8150 if (field->offset == 0) {
8153 int addr_reg = mono_alloc_preg (cfg);
8154 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8156 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8157 MonoInst *iargs [2];
8159 g_assert (field->parent);
8160 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8161 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8162 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8164 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8166 CHECK_TYPELOAD (klass);
8168 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8169 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8170 if (cfg->verbose_level > 2)
8171 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8172 class_inits = g_slist_prepend (class_inits, vtable);
8174 if (cfg->run_cctors) {
8176 /* This makes so that inline cannot trigger */
8177 /* .cctors: too many apps depend on them */
8178 /* running with a specific order... */
8179 if (! vtable->initialized)
8181 ex = mono_runtime_class_init_full (vtable, FALSE);
8183 set_exception_object (cfg, ex);
8184 goto exception_exit;
8188 addr = (char*)vtable->data + field->offset;
8190 if (cfg->compile_aot)
8191 EMIT_NEW_SFLDACONST (cfg, ins, field);
8193 EMIT_NEW_PCONST (cfg, ins, addr);
8195 MonoInst *iargs [1];
8196 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8197 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8201 /* Generate IR to do the actual load/store operation */
8203 if (*ip == CEE_LDSFLDA) {
8204 ins->klass = mono_class_from_mono_type (field->type);
8205 ins->type = STACK_PTR;
8207 } else if (*ip == CEE_STSFLD) {
8212 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8213 store->flags |= ins_flag;
8215 gboolean is_const = FALSE;
8216 MonoVTable *vtable = NULL;
8218 if (!context_used) {
8219 vtable = mono_class_vtable (cfg->domain, klass);
8220 CHECK_TYPELOAD (klass);
8222 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8223 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8224 gpointer addr = (char*)vtable->data + field->offset;
8225 int ro_type = field->type->type;
8226 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8227 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8229 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8232 case MONO_TYPE_BOOLEAN:
8234 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8238 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8241 case MONO_TYPE_CHAR:
8243 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8247 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8252 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8256 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8262 case MONO_TYPE_FNPTR:
8263 #ifndef HAVE_MOVING_COLLECTOR
8264 case MONO_TYPE_STRING:
8265 case MONO_TYPE_OBJECT:
8266 case MONO_TYPE_CLASS:
8267 case MONO_TYPE_SZARRAY:
8268 case MONO_TYPE_ARRAY:
8270 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8271 type_to_eval_stack_type ((cfg), field->type, *sp);
8276 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8281 case MONO_TYPE_VALUETYPE:
8291 CHECK_STACK_OVF (1);
8293 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8294 load->flags |= ins_flag;
8307 token = read32 (ip + 1);
8308 klass = mini_get_class (method, token, generic_context);
8309 CHECK_TYPELOAD (klass);
8310 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8311 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8312 #if HAVE_WRITE_BARRIERS
8313 if (cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
8314 generic_class_is_reference_type (cfg, klass)) {
8315 MonoInst *dummy_use;
8316 /* insert call to write barrier */
8317 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8318 mono_emit_method_call (cfg, write_barrier, sp, NULL);
8319 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
8332 const char *data_ptr;
8334 guint32 field_token;
8340 token = read32 (ip + 1);
8342 klass = mini_get_class (method, token, generic_context);
8343 CHECK_TYPELOAD (klass);
8345 if (cfg->generic_sharing_context)
8346 context_used = mono_class_check_context_used (klass);
8348 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8349 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8350 ins->sreg1 = sp [0]->dreg;
8351 ins->type = STACK_I4;
8352 ins->dreg = alloc_ireg (cfg);
8353 MONO_ADD_INS (cfg->cbb, ins);
8354 *sp = mono_decompose_opcode (cfg, ins);
8359 MonoClass *array_class = mono_array_class_get (klass, 1);
8360 /* FIXME: we cannot get a managed
8361 allocator because we can't get the
8362 open generic class's vtable. We
8363 have the same problem in
8364 handle_alloc(). This
8365 needs to be solved so that we can
8366 have managed allocs of shared
8369 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8370 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8372 MonoMethod *managed_alloc = NULL;
8374 /* FIXME: Decompose later to help abcrem */
8377 args [0] = emit_get_rgctx_klass (cfg, context_used,
8378 array_class, MONO_RGCTX_INFO_VTABLE);
8383 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8385 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8387 if (cfg->opt & MONO_OPT_SHARED) {
8388 /* Decompose now to avoid problems with references to the domainvar */
8389 MonoInst *iargs [3];
8391 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8392 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8395 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8397 /* Decompose later since it is needed by abcrem */
8398 MonoClass *array_type = mono_array_class_get (klass, 1);
8399 mono_class_vtable (cfg->domain, array_type);
8400 CHECK_TYPELOAD (array_type);
8402 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8403 ins->dreg = alloc_preg (cfg);
8404 ins->sreg1 = sp [0]->dreg;
8405 ins->inst_newa_class = klass;
8406 ins->type = STACK_OBJ;
8408 MONO_ADD_INS (cfg->cbb, ins);
8409 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8410 cfg->cbb->has_array_access = TRUE;
8412 /* Needed so mono_emit_load_get_addr () gets called */
8413 mono_get_got_var (cfg);
8423 * we inline/optimize the initialization sequence if possible.
8424 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8425 * for small sizes open code the memcpy
8426 * ensure the rva field is big enough
8428 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8429 MonoMethod *memcpy_method = get_memcpy_method ();
8430 MonoInst *iargs [3];
8431 int add_reg = alloc_preg (cfg);
8433 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8434 if (cfg->compile_aot) {
8435 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8437 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8439 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8440 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8449 if (sp [0]->type != STACK_OBJ)
8452 dreg = alloc_preg (cfg);
8453 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8454 ins->dreg = alloc_preg (cfg);
8455 ins->sreg1 = sp [0]->dreg;
8456 ins->type = STACK_I4;
8457 MONO_ADD_INS (cfg->cbb, ins);
8458 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8459 cfg->cbb->has_array_access = TRUE;
8467 if (sp [0]->type != STACK_OBJ)
8470 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8472 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8473 CHECK_TYPELOAD (klass);
8474 /* we need to make sure that this array is exactly the type it needs
8475 * to be for correctness. the wrappers are lax with their usage
8476 * so we need to ignore them here
8478 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8479 MonoClass *array_class = mono_array_class_get (klass, 1);
8480 mini_emit_check_array_type (cfg, sp [0], array_class);
8481 CHECK_TYPELOAD (array_class);
8485 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8500 case CEE_LDELEM_REF: {
8506 if (*ip == CEE_LDELEM) {
8508 token = read32 (ip + 1);
8509 klass = mini_get_class (method, token, generic_context);
8510 CHECK_TYPELOAD (klass);
8511 mono_class_init (klass);
8514 klass = array_access_to_klass (*ip);
8516 if (sp [0]->type != STACK_OBJ)
8519 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8521 if (sp [1]->opcode == OP_ICONST) {
8522 int array_reg = sp [0]->dreg;
8523 int index_reg = sp [1]->dreg;
8524 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8526 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8527 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8529 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8530 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8533 if (*ip == CEE_LDELEM)
8546 case CEE_STELEM_REF:
8553 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8555 if (*ip == CEE_STELEM) {
8557 token = read32 (ip + 1);
8558 klass = mini_get_class (method, token, generic_context);
8559 CHECK_TYPELOAD (klass);
8560 mono_class_init (klass);
8563 klass = array_access_to_klass (*ip);
8565 if (sp [0]->type != STACK_OBJ)
8568 /* storing a NULL doesn't need any of the complex checks in stelemref */
8569 if (generic_class_is_reference_type (cfg, klass) &&
8570 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8571 MonoMethod* helper = mono_marshal_get_stelemref ();
8572 MonoInst *iargs [3];
8574 if (sp [0]->type != STACK_OBJ)
8576 if (sp [2]->type != STACK_OBJ)
8583 mono_emit_method_call (cfg, helper, iargs, NULL);
8585 if (sp [1]->opcode == OP_ICONST) {
8586 int array_reg = sp [0]->dreg;
8587 int index_reg = sp [1]->dreg;
8588 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8590 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8591 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8593 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
8594 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8598 if (*ip == CEE_STELEM)
8605 case CEE_CKFINITE: {
8609 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8610 ins->sreg1 = sp [0]->dreg;
8611 ins->dreg = alloc_freg (cfg);
8612 ins->type = STACK_R8;
8613 MONO_ADD_INS (bblock, ins);
8615 *sp++ = mono_decompose_opcode (cfg, ins);
8620 case CEE_REFANYVAL: {
8621 MonoInst *src_var, *src;
8623 int klass_reg = alloc_preg (cfg);
8624 int dreg = alloc_preg (cfg);
8627 MONO_INST_NEW (cfg, ins, *ip);
8630 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8631 CHECK_TYPELOAD (klass);
8632 mono_class_init (klass);
8634 if (cfg->generic_sharing_context)
8635 context_used = mono_class_check_context_used (klass);
8638 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8640 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8641 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8645 MonoInst *klass_ins;
8647 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8648 klass, MONO_RGCTX_INFO_KLASS);
8651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8652 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8654 mini_emit_class_check (cfg, klass_reg, klass);
8656 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8657 ins->type = STACK_MP;
8662 case CEE_MKREFANY: {
8663 MonoInst *loc, *addr;
8666 MONO_INST_NEW (cfg, ins, *ip);
8669 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8670 CHECK_TYPELOAD (klass);
8671 mono_class_init (klass);
8673 if (cfg->generic_sharing_context)
8674 context_used = mono_class_check_context_used (klass);
8676 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8677 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8680 MonoInst *const_ins;
8681 int type_reg = alloc_preg (cfg);
8683 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8685 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8687 } else if (cfg->compile_aot) {
8688 int const_reg = alloc_preg (cfg);
8689 int type_reg = alloc_preg (cfg);
8691 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8696 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8697 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8699 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8701 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8702 ins->type = STACK_VTYPE;
8703 ins->klass = mono_defaults.typed_reference_class;
8710 MonoClass *handle_class;
8712 CHECK_STACK_OVF (1);
8715 n = read32 (ip + 1);
8717 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8718 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8719 handle = mono_method_get_wrapper_data (method, n);
8720 handle_class = mono_method_get_wrapper_data (method, n + 1);
8721 if (handle_class == mono_defaults.typehandle_class)
8722 handle = &((MonoClass*)handle)->byval_arg;
8725 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8729 mono_class_init (handle_class);
8730 if (cfg->generic_sharing_context) {
8731 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8732 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8733 /* This case handles ldtoken
8734 of an open type, like for
8737 } else if (handle_class == mono_defaults.typehandle_class) {
8738 /* If we get a MONO_TYPE_CLASS
8739 then we need to provide the
8741 instantiation of it. */
8742 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8745 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8746 } else if (handle_class == mono_defaults.fieldhandle_class)
8747 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8748 else if (handle_class == mono_defaults.methodhandle_class)
8749 context_used = mono_method_check_context_used (handle);
8751 g_assert_not_reached ();
8754 if ((cfg->opt & MONO_OPT_SHARED) &&
8755 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8756 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8757 MonoInst *addr, *vtvar, *iargs [3];
8758 int method_context_used;
8760 if (cfg->generic_sharing_context)
8761 method_context_used = mono_method_check_context_used (method);
8763 method_context_used = 0;
8765 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8767 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8768 EMIT_NEW_ICONST (cfg, iargs [1], n);
8769 if (method_context_used) {
8770 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8771 method, MONO_RGCTX_INFO_METHOD);
8772 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8774 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8775 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8777 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8779 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8781 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8783 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8784 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8785 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8786 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8787 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8788 MonoClass *tclass = mono_class_from_mono_type (handle);
8790 mono_class_init (tclass);
8792 ins = emit_get_rgctx_klass (cfg, context_used,
8793 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8794 } else if (cfg->compile_aot) {
8795 if (method->wrapper_type) {
8796 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8797 /* Special case for static synchronized wrappers */
8798 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8800 /* FIXME: n is not a normal token */
8801 cfg->disable_aot = TRUE;
8802 EMIT_NEW_PCONST (cfg, ins, NULL);
8805 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8808 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8810 ins->type = STACK_OBJ;
8811 ins->klass = cmethod->klass;
8814 MonoInst *addr, *vtvar;
8816 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8819 if (handle_class == mono_defaults.typehandle_class) {
8820 ins = emit_get_rgctx_klass (cfg, context_used,
8821 mono_class_from_mono_type (handle),
8822 MONO_RGCTX_INFO_TYPE);
8823 } else if (handle_class == mono_defaults.methodhandle_class) {
8824 ins = emit_get_rgctx_method (cfg, context_used,
8825 handle, MONO_RGCTX_INFO_METHOD);
8826 } else if (handle_class == mono_defaults.fieldhandle_class) {
8827 ins = emit_get_rgctx_field (cfg, context_used,
8828 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8830 g_assert_not_reached ();
8832 } else if (cfg->compile_aot) {
8833 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8835 EMIT_NEW_PCONST (cfg, ins, handle);
8837 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8838 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8839 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8849 MONO_INST_NEW (cfg, ins, OP_THROW);
8851 ins->sreg1 = sp [0]->dreg;
8853 bblock->out_of_line = TRUE;
8854 MONO_ADD_INS (bblock, ins);
8855 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8856 MONO_ADD_INS (bblock, ins);
8859 link_bblock (cfg, bblock, end_bblock);
8860 start_new_bblock = 1;
8862 case CEE_ENDFINALLY:
8863 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8864 MONO_ADD_INS (bblock, ins);
8866 start_new_bblock = 1;
8869 * Control will leave the method so empty the stack, otherwise
8870 * the next basic block will start with a nonempty stack.
8872 while (sp != stack_start) {
8880 if (*ip == CEE_LEAVE) {
8882 target = ip + 5 + (gint32)read32(ip + 1);
8885 target = ip + 2 + (signed char)(ip [1]);
8888 /* empty the stack */
8889 while (sp != stack_start) {
8894 * If this leave statement is in a catch block, check for a
8895 * pending exception, and rethrow it if necessary.
8896 * We avoid doing this in runtime invoke wrappers, since those are called
8897 * by native code which excepts the wrapper to catch all exceptions.
8899 for (i = 0; i < header->num_clauses; ++i) {
8900 MonoExceptionClause *clause = &header->clauses [i];
8903 * Use <= in the final comparison to handle clauses with multiple
8904 * leave statements, like in bug #78024.
8905 * The ordering of the exception clauses guarantees that we find the
8908 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8910 MonoBasicBlock *dont_throw;
8915 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8918 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8920 NEW_BBLOCK (cfg, dont_throw);
8923 * Currently, we allways rethrow the abort exception, despite the
8924 * fact that this is not correct. See thread6.cs for an example.
8925 * But propagating the abort exception is more important than
8926 * getting the sematics right.
8928 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8929 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8930 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8932 MONO_START_BB (cfg, dont_throw);
8937 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8939 MonoExceptionClause *clause;
8941 for (tmp = handlers; tmp; tmp = tmp->next) {
8943 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
8945 link_bblock (cfg, bblock, tblock);
8946 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8947 ins->inst_target_bb = tblock;
8948 ins->inst_eh_block = clause;
8949 MONO_ADD_INS (bblock, ins);
8950 bblock->has_call_handler = 1;
8951 if (COMPILE_LLVM (cfg)) {
8952 MonoBasicBlock *target_bb;
8955 * Link the finally bblock with the target, since it will
8956 * conceptually branch there.
8957 * FIXME: Have to link the bblock containing the endfinally.
8959 GET_BBLOCK (cfg, target_bb, target);
8960 link_bblock (cfg, tblock, target_bb);
8963 g_list_free (handlers);
8966 MONO_INST_NEW (cfg, ins, OP_BR);
8967 MONO_ADD_INS (bblock, ins);
8968 GET_BBLOCK (cfg, tblock, target);
8969 link_bblock (cfg, bblock, tblock);
8970 ins->inst_target_bb = tblock;
8971 start_new_bblock = 1;
8973 if (*ip == CEE_LEAVE)
8982 * Mono specific opcodes
8984 case MONO_CUSTOM_PREFIX: {
8986 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8990 case CEE_MONO_ICALL: {
8992 MonoJitICallInfo *info;
8994 token = read32 (ip + 2);
8995 func = mono_method_get_wrapper_data (method, token);
8996 info = mono_find_jit_icall_by_addr (func);
8999 CHECK_STACK (info->sig->param_count);
9000 sp -= info->sig->param_count;
9002 ins = mono_emit_jit_icall (cfg, info->func, sp);
9003 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9007 inline_costs += 10 * num_calls++;
9011 case CEE_MONO_LDPTR: {
9014 CHECK_STACK_OVF (1);
9016 token = read32 (ip + 2);
9018 ptr = mono_method_get_wrapper_data (method, token);
9019 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9020 MonoJitICallInfo *callinfo;
9021 const char *icall_name;
9023 icall_name = method->name + strlen ("__icall_wrapper_");
9024 g_assert (icall_name);
9025 callinfo = mono_find_jit_icall_by_name (icall_name);
9026 g_assert (callinfo);
9028 if (ptr == callinfo->func) {
9029 /* Will be transformed into an AOTCONST later */
9030 EMIT_NEW_PCONST (cfg, ins, ptr);
9036 /* FIXME: Generalize this */
9037 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9038 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9043 EMIT_NEW_PCONST (cfg, ins, ptr);
9046 inline_costs += 10 * num_calls++;
9047 /* Can't embed random pointers into AOT code */
9048 cfg->disable_aot = 1;
9051 case CEE_MONO_ICALL_ADDR: {
9052 MonoMethod *cmethod;
9055 CHECK_STACK_OVF (1);
9057 token = read32 (ip + 2);
9059 cmethod = mono_method_get_wrapper_data (method, token);
9061 if (cfg->compile_aot) {
9062 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9064 ptr = mono_lookup_internal_call (cmethod);
9066 EMIT_NEW_PCONST (cfg, ins, ptr);
9072 case CEE_MONO_VTADDR: {
9073 MonoInst *src_var, *src;
9079 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9080 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9085 case CEE_MONO_NEWOBJ: {
9086 MonoInst *iargs [2];
9088 CHECK_STACK_OVF (1);
9090 token = read32 (ip + 2);
9091 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9092 mono_class_init (klass);
9093 NEW_DOMAINCONST (cfg, iargs [0]);
9094 MONO_ADD_INS (cfg->cbb, iargs [0]);
9095 NEW_CLASSCONST (cfg, iargs [1], klass);
9096 MONO_ADD_INS (cfg->cbb, iargs [1]);
9097 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9099 inline_costs += 10 * num_calls++;
9102 case CEE_MONO_OBJADDR:
9105 MONO_INST_NEW (cfg, ins, OP_MOVE);
9106 ins->dreg = alloc_preg (cfg);
9107 ins->sreg1 = sp [0]->dreg;
9108 ins->type = STACK_MP;
9109 MONO_ADD_INS (cfg->cbb, ins);
9113 case CEE_MONO_LDNATIVEOBJ:
9115 * Similar to LDOBJ, but instead load the unmanaged
9116 * representation of the vtype to the stack.
9121 token = read32 (ip + 2);
9122 klass = mono_method_get_wrapper_data (method, token);
9123 g_assert (klass->valuetype);
9124 mono_class_init (klass);
9127 MonoInst *src, *dest, *temp;
9130 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9131 temp->backend.is_pinvoke = 1;
9132 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9133 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9135 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9136 dest->type = STACK_VTYPE;
9137 dest->klass = klass;
9143 case CEE_MONO_RETOBJ: {
9145 * Same as RET, but return the native representation of a vtype
9148 g_assert (cfg->ret);
9149 g_assert (mono_method_signature (method)->pinvoke);
9154 token = read32 (ip + 2);
9155 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9157 if (!cfg->vret_addr) {
9158 g_assert (cfg->ret_var_is_local);
9160 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9162 EMIT_NEW_RETLOADA (cfg, ins);
9164 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9166 if (sp != stack_start)
9169 MONO_INST_NEW (cfg, ins, OP_BR);
9170 ins->inst_target_bb = end_bblock;
9171 MONO_ADD_INS (bblock, ins);
9172 link_bblock (cfg, bblock, end_bblock);
9173 start_new_bblock = 1;
9177 case CEE_MONO_CISINST:
9178 case CEE_MONO_CCASTCLASS: {
9183 token = read32 (ip + 2);
9184 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9185 if (ip [1] == CEE_MONO_CISINST)
9186 ins = handle_cisinst (cfg, klass, sp [0]);
9188 ins = handle_ccastclass (cfg, klass, sp [0]);
9194 case CEE_MONO_SAVE_LMF:
9195 case CEE_MONO_RESTORE_LMF:
9196 #ifdef MONO_ARCH_HAVE_LMF_OPS
9197 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9198 MONO_ADD_INS (bblock, ins);
9199 cfg->need_lmf_area = TRUE;
9203 case CEE_MONO_CLASSCONST:
9204 CHECK_STACK_OVF (1);
9206 token = read32 (ip + 2);
9207 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9210 inline_costs += 10 * num_calls++;
9212 case CEE_MONO_NOT_TAKEN:
9213 bblock->out_of_line = TRUE;
9217 CHECK_STACK_OVF (1);
9219 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9220 ins->dreg = alloc_preg (cfg);
9221 ins->inst_offset = (gint32)read32 (ip + 2);
9222 ins->type = STACK_PTR;
9223 MONO_ADD_INS (bblock, ins);
9227 case CEE_MONO_DYN_CALL: {
9230 /* It would be easier to call a trampoline, but that would put an
9231 * extra frame on the stack, confusing exception handling. So
9232 * implement it inline using an opcode for now.
9235 if (!cfg->dyn_call_var) {
9236 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9237 /* prevent it from being register allocated */
9238 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9241 /* Has to use a call inst since it local regalloc expects it */
9242 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9243 ins = (MonoInst*)call;
9245 ins->sreg1 = sp [0]->dreg;
9246 ins->sreg2 = sp [1]->dreg;
9247 MONO_ADD_INS (bblock, ins);
9249 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9250 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9254 inline_costs += 10 * num_calls++;
9259 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9269 /* somewhat similar to LDTOKEN */
9270 MonoInst *addr, *vtvar;
9271 CHECK_STACK_OVF (1);
9272 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9274 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9275 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9277 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9278 ins->type = STACK_VTYPE;
9279 ins->klass = mono_defaults.argumenthandle_class;
9292 * The following transforms:
9293 * CEE_CEQ into OP_CEQ
9294 * CEE_CGT into OP_CGT
9295 * CEE_CGT_UN into OP_CGT_UN
9296 * CEE_CLT into OP_CLT
9297 * CEE_CLT_UN into OP_CLT_UN
9299 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9301 MONO_INST_NEW (cfg, ins, cmp->opcode);
9303 cmp->sreg1 = sp [0]->dreg;
9304 cmp->sreg2 = sp [1]->dreg;
9305 type_from_op (cmp, sp [0], sp [1]);
9307 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9308 cmp->opcode = OP_LCOMPARE;
9309 else if (sp [0]->type == STACK_R8)
9310 cmp->opcode = OP_FCOMPARE;
9312 cmp->opcode = OP_ICOMPARE;
9313 MONO_ADD_INS (bblock, cmp);
9314 ins->type = STACK_I4;
9315 ins->dreg = alloc_dreg (cfg, ins->type);
9316 type_from_op (ins, sp [0], sp [1]);
9318 if (cmp->opcode == OP_FCOMPARE) {
9320 * The backends expect the fceq opcodes to do the
9323 cmp->opcode = OP_NOP;
9324 ins->sreg1 = cmp->sreg1;
9325 ins->sreg2 = cmp->sreg2;
9327 MONO_ADD_INS (bblock, ins);
9334 MonoMethod *cil_method;
9335 gboolean needs_static_rgctx_invoke;
9337 CHECK_STACK_OVF (1);
9339 n = read32 (ip + 2);
9340 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9343 mono_class_init (cmethod->klass);
9345 mono_save_token_info (cfg, image, n, cmethod);
9347 if (cfg->generic_sharing_context)
9348 context_used = mono_method_check_context_used (cmethod);
9350 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9352 cil_method = cmethod;
9353 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9354 METHOD_ACCESS_FAILURE;
9356 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9357 if (check_linkdemand (cfg, method, cmethod))
9359 CHECK_CFG_EXCEPTION;
9360 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9361 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9365 * Optimize the common case of ldftn+delegate creation
9367 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9368 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9369 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9371 int invoke_context_used = 0;
9373 invoke = mono_get_delegate_invoke (ctor_method->klass);
9374 if (!invoke || !mono_method_signature (invoke))
9377 if (cfg->generic_sharing_context)
9378 invoke_context_used = mono_method_check_context_used (invoke);
9380 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9381 /* FIXME: SGEN support */
9382 if (invoke_context_used == 0) {
9383 MonoInst *target_ins;
9386 if (cfg->verbose_level > 3)
9387 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9388 target_ins = sp [-1];
9390 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9391 CHECK_CFG_EXCEPTION;
9400 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9401 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9405 inline_costs += 10 * num_calls++;
9408 case CEE_LDVIRTFTN: {
9413 n = read32 (ip + 2);
9414 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9417 mono_class_init (cmethod->klass);
9419 if (cfg->generic_sharing_context)
9420 context_used = mono_method_check_context_used (cmethod);
9422 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9423 if (check_linkdemand (cfg, method, cmethod))
9425 CHECK_CFG_EXCEPTION;
9426 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9427 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9433 args [1] = emit_get_rgctx_method (cfg, context_used,
9434 cmethod, MONO_RGCTX_INFO_METHOD);
9437 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9439 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9442 inline_costs += 10 * num_calls++;
9446 CHECK_STACK_OVF (1);
9448 n = read16 (ip + 2);
9450 EMIT_NEW_ARGLOAD (cfg, ins, n);
9455 CHECK_STACK_OVF (1);
9457 n = read16 (ip + 2);
9459 NEW_ARGLOADA (cfg, ins, n);
9460 MONO_ADD_INS (cfg->cbb, ins);
9468 n = read16 (ip + 2);
9470 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9472 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9476 CHECK_STACK_OVF (1);
9478 n = read16 (ip + 2);
9480 EMIT_NEW_LOCLOAD (cfg, ins, n);
9485 unsigned char *tmp_ip;
9486 CHECK_STACK_OVF (1);
9488 n = read16 (ip + 2);
9491 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9497 EMIT_NEW_LOCLOADA (cfg, ins, n);
9506 n = read16 (ip + 2);
9508 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9510 emit_stloc_ir (cfg, sp, header, n);
9517 if (sp != stack_start)
9519 if (cfg->method != method)
9521 * Inlining this into a loop in a parent could lead to
9522 * stack overflows which is different behavior than the
9523 * non-inlined case, thus disable inlining in this case.
9525 goto inline_failure;
9527 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9528 ins->dreg = alloc_preg (cfg);
9529 ins->sreg1 = sp [0]->dreg;
9530 ins->type = STACK_PTR;
9531 MONO_ADD_INS (cfg->cbb, ins);
9533 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9535 ins->flags |= MONO_INST_INIT;
9540 case CEE_ENDFILTER: {
9541 MonoExceptionClause *clause, *nearest;
9542 int cc, nearest_num;
9546 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9548 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9549 ins->sreg1 = (*sp)->dreg;
9550 MONO_ADD_INS (bblock, ins);
9551 start_new_bblock = 1;
9556 for (cc = 0; cc < header->num_clauses; ++cc) {
9557 clause = &header->clauses [cc];
9558 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9559 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9560 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9566 if ((ip - header->code) != nearest->handler_offset)
9571 case CEE_UNALIGNED_:
9572 ins_flag |= MONO_INST_UNALIGNED;
9573 /* FIXME: record alignment? we can assume 1 for now */
9578 ins_flag |= MONO_INST_VOLATILE;
9582 ins_flag |= MONO_INST_TAILCALL;
9583 cfg->flags |= MONO_CFG_HAS_TAIL;
9584 /* Can't inline tail calls at this time */
9585 inline_costs += 100000;
9592 token = read32 (ip + 2);
9593 klass = mini_get_class (method, token, generic_context);
9594 CHECK_TYPELOAD (klass);
9595 if (generic_class_is_reference_type (cfg, klass))
9596 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9598 mini_emit_initobj (cfg, *sp, NULL, klass);
9602 case CEE_CONSTRAINED_:
9604 token = read32 (ip + 2);
9605 if (method->wrapper_type != MONO_WRAPPER_NONE)
9606 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9608 constrained_call = mono_class_get_full (image, token, generic_context);
9609 CHECK_TYPELOAD (constrained_call);
9614 MonoInst *iargs [3];
9618 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9619 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9620 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9621 /* emit_memset only works when val == 0 */
9622 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9627 if (ip [1] == CEE_CPBLK) {
9628 MonoMethod *memcpy_method = get_memcpy_method ();
9629 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9631 MonoMethod *memset_method = get_memset_method ();
9632 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9642 ins_flag |= MONO_INST_NOTYPECHECK;
9644 ins_flag |= MONO_INST_NORANGECHECK;
9645 /* we ignore the no-nullcheck for now since we
9646 * really do it explicitly only when doing callvirt->call
9652 int handler_offset = -1;
9654 for (i = 0; i < header->num_clauses; ++i) {
9655 MonoExceptionClause *clause = &header->clauses [i];
9656 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9657 handler_offset = clause->handler_offset;
9662 bblock->flags |= BB_EXCEPTION_UNSAFE;
9664 g_assert (handler_offset != -1);
9666 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9667 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9668 ins->sreg1 = load->dreg;
9669 MONO_ADD_INS (bblock, ins);
9671 link_bblock (cfg, bblock, end_bblock);
9672 start_new_bblock = 1;
9680 CHECK_STACK_OVF (1);
9682 token = read32 (ip + 2);
9683 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9684 MonoType *type = mono_type_create_from_typespec (image, token);
9685 token = mono_type_size (type, &ialign);
9687 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9688 CHECK_TYPELOAD (klass);
9689 mono_class_init (klass);
9690 token = mono_class_value_size (klass, &align);
9692 EMIT_NEW_ICONST (cfg, ins, token);
9697 case CEE_REFANYTYPE: {
9698 MonoInst *src_var, *src;
9704 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9706 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9707 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9708 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9726 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9736 g_warning ("opcode 0x%02x not handled", *ip);
9740 if (start_new_bblock != 1)
9743 bblock->cil_length = ip - bblock->cil_code;
9744 bblock->next_bb = end_bblock;
9746 if (cfg->method == method && cfg->domainvar) {
9748 MonoInst *get_domain;
9750 cfg->cbb = init_localsbb;
9752 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9753 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9756 get_domain->dreg = alloc_preg (cfg);
9757 MONO_ADD_INS (cfg->cbb, get_domain);
9759 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9760 MONO_ADD_INS (cfg->cbb, store);
9763 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
9764 if (cfg->compile_aot)
9765 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9766 mono_get_got_var (cfg);
9769 if (cfg->method == method && cfg->got_var)
9770 mono_emit_load_got_addr (cfg);
9775 cfg->cbb = init_localsbb;
9777 for (i = 0; i < header->num_locals; ++i) {
9778 MonoType *ptype = header->locals [i];
9779 int t = ptype->type;
9780 dreg = cfg->locals [i]->dreg;
9782 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9783 t = mono_class_enum_basetype (ptype->data.klass)->type;
9785 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9786 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9787 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9788 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9789 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9790 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9791 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9792 ins->type = STACK_R8;
9793 ins->inst_p0 = (void*)&r8_0;
9794 ins->dreg = alloc_dreg (cfg, STACK_R8);
9795 MONO_ADD_INS (init_localsbb, ins);
9796 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9797 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9798 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9799 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9801 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9806 if (cfg->init_ref_vars && cfg->method == method) {
9807 /* Emit initialization for ref vars */
9808 // FIXME: Avoid duplication initialization for IL locals.
9809 for (i = 0; i < cfg->num_varinfo; ++i) {
9810 MonoInst *ins = cfg->varinfo [i];
9812 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9813 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9817 /* Add a sequence point for method entry/exit events */
9819 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9820 MONO_ADD_INS (init_localsbb, ins);
9821 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9822 MONO_ADD_INS (cfg->bb_exit, ins);
9827 if (cfg->method == method) {
9829 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9830 bb->region = mono_find_block_region (cfg, bb->real_offset);
9832 mono_create_spvar_for_region (cfg, bb->region);
9833 if (cfg->verbose_level > 2)
9834 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9838 g_slist_free (class_inits);
9839 dont_inline = g_list_remove (dont_inline, method);
9841 if (inline_costs < 0) {
9844 /* Method is too large */
9845 mname = mono_method_full_name (method, TRUE);
9846 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9847 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9849 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9850 mono_basic_block_free (original_bb);
9854 if ((cfg->verbose_level > 2) && (cfg->method == method))
9855 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9857 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9858 mono_basic_block_free (original_bb);
9859 return inline_costs;
9862 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9869 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9873 set_exception_type_from_invalid_il (cfg, method, ip);
9877 g_slist_free (class_inits);
9878 mono_basic_block_free (original_bb);
9879 dont_inline = g_list_remove (dont_inline, method);
9880 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
9885 store_membase_reg_to_store_membase_imm (int opcode)
9888 case OP_STORE_MEMBASE_REG:
9889 return OP_STORE_MEMBASE_IMM;
9890 case OP_STOREI1_MEMBASE_REG:
9891 return OP_STOREI1_MEMBASE_IMM;
9892 case OP_STOREI2_MEMBASE_REG:
9893 return OP_STOREI2_MEMBASE_IMM;
9894 case OP_STOREI4_MEMBASE_REG:
9895 return OP_STOREI4_MEMBASE_IMM;
9896 case OP_STOREI8_MEMBASE_REG:
9897 return OP_STOREI8_MEMBASE_IMM;
9899 g_assert_not_reached ();
9905 #endif /* DISABLE_JIT */
9908 mono_op_to_op_imm (int opcode)
9918 return OP_IDIV_UN_IMM;
9922 return OP_IREM_UN_IMM;
9936 return OP_ISHR_UN_IMM;
9953 return OP_LSHR_UN_IMM;
9956 return OP_COMPARE_IMM;
9958 return OP_ICOMPARE_IMM;
9960 return OP_LCOMPARE_IMM;
9962 case OP_STORE_MEMBASE_REG:
9963 return OP_STORE_MEMBASE_IMM;
9964 case OP_STOREI1_MEMBASE_REG:
9965 return OP_STOREI1_MEMBASE_IMM;
9966 case OP_STOREI2_MEMBASE_REG:
9967 return OP_STOREI2_MEMBASE_IMM;
9968 case OP_STOREI4_MEMBASE_REG:
9969 return OP_STOREI4_MEMBASE_IMM;
9971 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9973 return OP_X86_PUSH_IMM;
9974 case OP_X86_COMPARE_MEMBASE_REG:
9975 return OP_X86_COMPARE_MEMBASE_IMM;
9977 #if defined(TARGET_AMD64)
9978 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9979 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9981 case OP_VOIDCALL_REG:
9990 return OP_LOCALLOC_IMM;
9997 ldind_to_load_membase (int opcode)
10001 return OP_LOADI1_MEMBASE;
10003 return OP_LOADU1_MEMBASE;
10005 return OP_LOADI2_MEMBASE;
10007 return OP_LOADU2_MEMBASE;
10009 return OP_LOADI4_MEMBASE;
10011 return OP_LOADU4_MEMBASE;
10013 return OP_LOAD_MEMBASE;
10014 case CEE_LDIND_REF:
10015 return OP_LOAD_MEMBASE;
10017 return OP_LOADI8_MEMBASE;
10019 return OP_LOADR4_MEMBASE;
10021 return OP_LOADR8_MEMBASE;
10023 g_assert_not_reached ();
10030 stind_to_store_membase (int opcode)
10034 return OP_STOREI1_MEMBASE_REG;
10036 return OP_STOREI2_MEMBASE_REG;
10038 return OP_STOREI4_MEMBASE_REG;
10040 case CEE_STIND_REF:
10041 return OP_STORE_MEMBASE_REG;
10043 return OP_STOREI8_MEMBASE_REG;
10045 return OP_STORER4_MEMBASE_REG;
10047 return OP_STORER8_MEMBASE_REG;
10049 g_assert_not_reached ();
10056 mono_load_membase_to_load_mem (int opcode)
10058 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10059 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10061 case OP_LOAD_MEMBASE:
10062 return OP_LOAD_MEM;
10063 case OP_LOADU1_MEMBASE:
10064 return OP_LOADU1_MEM;
10065 case OP_LOADU2_MEMBASE:
10066 return OP_LOADU2_MEM;
10067 case OP_LOADI4_MEMBASE:
10068 return OP_LOADI4_MEM;
10069 case OP_LOADU4_MEMBASE:
10070 return OP_LOADU4_MEM;
10071 #if SIZEOF_REGISTER == 8
10072 case OP_LOADI8_MEMBASE:
10073 return OP_LOADI8_MEM;
10082 op_to_op_dest_membase (int store_opcode, int opcode)
10084 #if defined(TARGET_X86)
10085 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10090 return OP_X86_ADD_MEMBASE_REG;
10092 return OP_X86_SUB_MEMBASE_REG;
10094 return OP_X86_AND_MEMBASE_REG;
10096 return OP_X86_OR_MEMBASE_REG;
10098 return OP_X86_XOR_MEMBASE_REG;
10101 return OP_X86_ADD_MEMBASE_IMM;
10104 return OP_X86_SUB_MEMBASE_IMM;
10107 return OP_X86_AND_MEMBASE_IMM;
10110 return OP_X86_OR_MEMBASE_IMM;
10113 return OP_X86_XOR_MEMBASE_IMM;
10119 #if defined(TARGET_AMD64)
10120 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10125 return OP_X86_ADD_MEMBASE_REG;
10127 return OP_X86_SUB_MEMBASE_REG;
10129 return OP_X86_AND_MEMBASE_REG;
10131 return OP_X86_OR_MEMBASE_REG;
10133 return OP_X86_XOR_MEMBASE_REG;
10135 return OP_X86_ADD_MEMBASE_IMM;
10137 return OP_X86_SUB_MEMBASE_IMM;
10139 return OP_X86_AND_MEMBASE_IMM;
10141 return OP_X86_OR_MEMBASE_IMM;
10143 return OP_X86_XOR_MEMBASE_IMM;
10145 return OP_AMD64_ADD_MEMBASE_REG;
10147 return OP_AMD64_SUB_MEMBASE_REG;
10149 return OP_AMD64_AND_MEMBASE_REG;
10151 return OP_AMD64_OR_MEMBASE_REG;
10153 return OP_AMD64_XOR_MEMBASE_REG;
10156 return OP_AMD64_ADD_MEMBASE_IMM;
10159 return OP_AMD64_SUB_MEMBASE_IMM;
10162 return OP_AMD64_AND_MEMBASE_IMM;
10165 return OP_AMD64_OR_MEMBASE_IMM;
10168 return OP_AMD64_XOR_MEMBASE_IMM;
10178 op_to_op_store_membase (int store_opcode, int opcode)
10180 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10183 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10184 return OP_X86_SETEQ_MEMBASE;
10186 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10187 return OP_X86_SETNE_MEMBASE;
10195 op_to_op_src1_membase (int load_opcode, int opcode)
10198 /* FIXME: This has sign extension issues */
10200 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10201 return OP_X86_COMPARE_MEMBASE8_IMM;
10204 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10209 return OP_X86_PUSH_MEMBASE;
10210 case OP_COMPARE_IMM:
10211 case OP_ICOMPARE_IMM:
10212 return OP_X86_COMPARE_MEMBASE_IMM;
10215 return OP_X86_COMPARE_MEMBASE_REG;
10219 #ifdef TARGET_AMD64
10220 /* FIXME: This has sign extension issues */
10222 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10223 return OP_X86_COMPARE_MEMBASE8_IMM;
10228 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10229 return OP_X86_PUSH_MEMBASE;
10231 /* FIXME: This only works for 32 bit immediates
10232 case OP_COMPARE_IMM:
10233 case OP_LCOMPARE_IMM:
10234 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10235 return OP_AMD64_COMPARE_MEMBASE_IMM;
10237 case OP_ICOMPARE_IMM:
10238 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10239 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10243 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10244 return OP_AMD64_COMPARE_MEMBASE_REG;
10247 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10248 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10257 op_to_op_src2_membase (int load_opcode, int opcode)
10260 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10266 return OP_X86_COMPARE_REG_MEMBASE;
10268 return OP_X86_ADD_REG_MEMBASE;
10270 return OP_X86_SUB_REG_MEMBASE;
10272 return OP_X86_AND_REG_MEMBASE;
10274 return OP_X86_OR_REG_MEMBASE;
10276 return OP_X86_XOR_REG_MEMBASE;
10280 #ifdef TARGET_AMD64
10283 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10284 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10288 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10289 return OP_AMD64_COMPARE_REG_MEMBASE;
10292 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10293 return OP_X86_ADD_REG_MEMBASE;
10295 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10296 return OP_X86_SUB_REG_MEMBASE;
10298 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10299 return OP_X86_AND_REG_MEMBASE;
10301 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10302 return OP_X86_OR_REG_MEMBASE;
10304 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10305 return OP_X86_XOR_REG_MEMBASE;
10307 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10308 return OP_AMD64_ADD_REG_MEMBASE;
10310 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10311 return OP_AMD64_SUB_REG_MEMBASE;
10313 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10314 return OP_AMD64_AND_REG_MEMBASE;
10316 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10317 return OP_AMD64_OR_REG_MEMBASE;
10319 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10320 return OP_AMD64_XOR_REG_MEMBASE;
10328 mono_op_to_op_imm_noemul (int opcode)
10331 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10337 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10345 return mono_op_to_op_imm (opcode);
10349 #ifndef DISABLE_JIT
10352 * mono_handle_global_vregs:
10354 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10358 mono_handle_global_vregs (MonoCompile *cfg)
10360 gint32 *vreg_to_bb;
10361 MonoBasicBlock *bb;
10364 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10366 #ifdef MONO_ARCH_SIMD_INTRINSICS
10367 if (cfg->uses_simd_intrinsics)
10368 mono_simd_simplify_indirection (cfg);
10371 /* Find local vregs used in more than one bb */
10372 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10373 MonoInst *ins = bb->code;
10374 int block_num = bb->block_num;
10376 if (cfg->verbose_level > 2)
10377 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10380 for (; ins; ins = ins->next) {
10381 const char *spec = INS_INFO (ins->opcode);
10382 int regtype = 0, regindex;
10385 if (G_UNLIKELY (cfg->verbose_level > 2))
10386 mono_print_ins (ins);
10388 g_assert (ins->opcode >= MONO_CEE_LAST);
10390 for (regindex = 0; regindex < 4; regindex ++) {
10393 if (regindex == 0) {
10394 regtype = spec [MONO_INST_DEST];
10395 if (regtype == ' ')
10398 } else if (regindex == 1) {
10399 regtype = spec [MONO_INST_SRC1];
10400 if (regtype == ' ')
10403 } else if (regindex == 2) {
10404 regtype = spec [MONO_INST_SRC2];
10405 if (regtype == ' ')
10408 } else if (regindex == 3) {
10409 regtype = spec [MONO_INST_SRC3];
10410 if (regtype == ' ')
10415 #if SIZEOF_REGISTER == 4
10416 /* In the LLVM case, the long opcodes are not decomposed */
10417 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10419 * Since some instructions reference the original long vreg,
10420 * and some reference the two component vregs, it is quite hard
10421 * to determine when it needs to be global. So be conservative.
10423 if (!get_vreg_to_inst (cfg, vreg)) {
10424 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10426 if (cfg->verbose_level > 2)
10427 printf ("LONG VREG R%d made global.\n", vreg);
10431 * Make the component vregs volatile since the optimizations can
10432 * get confused otherwise.
10434 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10435 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10439 g_assert (vreg != -1);
10441 prev_bb = vreg_to_bb [vreg];
10442 if (prev_bb == 0) {
10443 /* 0 is a valid block num */
10444 vreg_to_bb [vreg] = block_num + 1;
10445 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10446 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10449 if (!get_vreg_to_inst (cfg, vreg)) {
10450 if (G_UNLIKELY (cfg->verbose_level > 2))
10451 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10455 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10458 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10461 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10464 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10467 g_assert_not_reached ();
10471 /* Flag as having been used in more than one bb */
10472 vreg_to_bb [vreg] = -1;
10478 /* If a variable is used in only one bblock, convert it into a local vreg */
10479 for (i = 0; i < cfg->num_varinfo; i++) {
10480 MonoInst *var = cfg->varinfo [i];
10481 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10483 switch (var->type) {
10489 #if SIZEOF_REGISTER == 8
10492 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10493 /* Enabling this screws up the fp stack on x86 */
10496 /* Arguments are implicitly global */
10497 /* Putting R4 vars into registers doesn't work currently */
10498 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10500 * Make that the variable's liveness interval doesn't contain a call, since
10501 * that would cause the lvreg to be spilled, making the whole optimization
10504 /* This is too slow for JIT compilation */
10506 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10508 int def_index, call_index, ins_index;
10509 gboolean spilled = FALSE;
10514 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10515 const char *spec = INS_INFO (ins->opcode);
10517 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10518 def_index = ins_index;
10520 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10521 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10522 if (call_index > def_index) {
10528 if (MONO_IS_CALL (ins))
10529 call_index = ins_index;
10539 if (G_UNLIKELY (cfg->verbose_level > 2))
10540 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10541 var->flags |= MONO_INST_IS_DEAD;
10542 cfg->vreg_to_inst [var->dreg] = NULL;
10549 * Compress the varinfo and vars tables so the liveness computation is faster and
10550 * takes up less space.
10553 for (i = 0; i < cfg->num_varinfo; ++i) {
10554 MonoInst *var = cfg->varinfo [i];
10555 if (pos < i && cfg->locals_start == i)
10556 cfg->locals_start = pos;
10557 if (!(var->flags & MONO_INST_IS_DEAD)) {
10559 cfg->varinfo [pos] = cfg->varinfo [i];
10560 cfg->varinfo [pos]->inst_c0 = pos;
10561 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10562 cfg->vars [pos].idx = pos;
10563 #if SIZEOF_REGISTER == 4
10564 if (cfg->varinfo [pos]->type == STACK_I8) {
10565 /* Modify the two component vars too */
10568 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10569 var1->inst_c0 = pos;
10570 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10571 var1->inst_c0 = pos;
10578 cfg->num_varinfo = pos;
10579 if (cfg->locals_start > cfg->num_varinfo)
10580 cfg->locals_start = cfg->num_varinfo;
10584 * mono_spill_global_vars:
10586 * Generate spill code for variables which are not allocated to registers,
10587 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10588 * code is generated which could be optimized by the local optimization passes.
10591 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10593 MonoBasicBlock *bb;
10595 int orig_next_vreg;
10596 guint32 *vreg_to_lvreg;
10598 guint32 i, lvregs_len;
10599 gboolean dest_has_lvreg = FALSE;
10600 guint32 stacktypes [128];
10601 MonoInst **live_range_start, **live_range_end;
10602 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10604 *need_local_opts = FALSE;
10606 memset (spec2, 0, sizeof (spec2));
10608 /* FIXME: Move this function to mini.c */
10609 stacktypes ['i'] = STACK_PTR;
10610 stacktypes ['l'] = STACK_I8;
10611 stacktypes ['f'] = STACK_R8;
10612 #ifdef MONO_ARCH_SIMD_INTRINSICS
10613 stacktypes ['x'] = STACK_VTYPE;
10616 #if SIZEOF_REGISTER == 4
10617 /* Create MonoInsts for longs */
10618 for (i = 0; i < cfg->num_varinfo; i++) {
10619 MonoInst *ins = cfg->varinfo [i];
10621 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10622 switch (ins->type) {
10627 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10630 g_assert (ins->opcode == OP_REGOFFSET);
10632 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10634 tree->opcode = OP_REGOFFSET;
10635 tree->inst_basereg = ins->inst_basereg;
10636 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10638 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10640 tree->opcode = OP_REGOFFSET;
10641 tree->inst_basereg = ins->inst_basereg;
10642 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10652 /* FIXME: widening and truncation */
10655 * As an optimization, when a variable allocated to the stack is first loaded into
10656 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10657 * the variable again.
10659 orig_next_vreg = cfg->next_vreg;
10660 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10661 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10665 * These arrays contain the first and last instructions accessing a given
10667 * Since we emit bblocks in the same order we process them here, and we
10668 * don't split live ranges, these will precisely describe the live range of
10669 * the variable, i.e. the instruction range where a valid value can be found
10670 * in the variables location.
10671 * The live range is computed using the liveness info computed by the liveness pass.
10672 * We can't use vmv->range, since that is an abstract live range, and we need
10673 * one which is instruction precise.
10674 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10676 /* FIXME: Only do this if debugging info is requested */
10677 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10678 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10679 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10680 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10682 /* Add spill loads/stores */
10683 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10686 if (cfg->verbose_level > 2)
10687 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10689 /* Clear vreg_to_lvreg array */
10690 for (i = 0; i < lvregs_len; i++)
10691 vreg_to_lvreg [lvregs [i]] = 0;
10695 MONO_BB_FOR_EACH_INS (bb, ins) {
10696 const char *spec = INS_INFO (ins->opcode);
10697 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10698 gboolean store, no_lvreg;
10699 int sregs [MONO_MAX_SRC_REGS];
10701 if (G_UNLIKELY (cfg->verbose_level > 2))
10702 mono_print_ins (ins);
10704 if (ins->opcode == OP_NOP)
10708 * We handle LDADDR here as well, since it can only be decomposed
10709 * when variable addresses are known.
10711 if (ins->opcode == OP_LDADDR) {
10712 MonoInst *var = ins->inst_p0;
10714 if (var->opcode == OP_VTARG_ADDR) {
10715 /* Happens on SPARC/S390 where vtypes are passed by reference */
10716 MonoInst *vtaddr = var->inst_left;
10717 if (vtaddr->opcode == OP_REGVAR) {
10718 ins->opcode = OP_MOVE;
10719 ins->sreg1 = vtaddr->dreg;
10721 else if (var->inst_left->opcode == OP_REGOFFSET) {
10722 ins->opcode = OP_LOAD_MEMBASE;
10723 ins->inst_basereg = vtaddr->inst_basereg;
10724 ins->inst_offset = vtaddr->inst_offset;
10728 g_assert (var->opcode == OP_REGOFFSET);
10730 ins->opcode = OP_ADD_IMM;
10731 ins->sreg1 = var->inst_basereg;
10732 ins->inst_imm = var->inst_offset;
10735 *need_local_opts = TRUE;
10736 spec = INS_INFO (ins->opcode);
10739 if (ins->opcode < MONO_CEE_LAST) {
10740 mono_print_ins (ins);
10741 g_assert_not_reached ();
10745 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10749 if (MONO_IS_STORE_MEMBASE (ins)) {
10750 tmp_reg = ins->dreg;
10751 ins->dreg = ins->sreg2;
10752 ins->sreg2 = tmp_reg;
10755 spec2 [MONO_INST_DEST] = ' ';
10756 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10757 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10758 spec2 [MONO_INST_SRC3] = ' ';
10760 } else if (MONO_IS_STORE_MEMINDEX (ins))
10761 g_assert_not_reached ();
10766 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10767 printf ("\t %.3s %d", spec, ins->dreg);
10768 num_sregs = mono_inst_get_src_registers (ins, sregs);
10769 for (srcindex = 0; srcindex < 3; ++srcindex)
10770 printf (" %d", sregs [srcindex]);
10777 regtype = spec [MONO_INST_DEST];
10778 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10781 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10782 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10783 MonoInst *store_ins;
10785 MonoInst *def_ins = ins;
10786 int dreg = ins->dreg; /* The original vreg */
10788 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10790 if (var->opcode == OP_REGVAR) {
10791 ins->dreg = var->dreg;
10792 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10794 * Instead of emitting a load+store, use a _membase opcode.
10796 g_assert (var->opcode == OP_REGOFFSET);
10797 if (ins->opcode == OP_MOVE) {
10801 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10802 ins->inst_basereg = var->inst_basereg;
10803 ins->inst_offset = var->inst_offset;
10806 spec = INS_INFO (ins->opcode);
10810 g_assert (var->opcode == OP_REGOFFSET);
10812 prev_dreg = ins->dreg;
10814 /* Invalidate any previous lvreg for this vreg */
10815 vreg_to_lvreg [ins->dreg] = 0;
10819 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10821 store_opcode = OP_STOREI8_MEMBASE_REG;
10824 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10826 if (regtype == 'l') {
10827 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10828 mono_bblock_insert_after_ins (bb, ins, store_ins);
10829 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10830 mono_bblock_insert_after_ins (bb, ins, store_ins);
10831 def_ins = store_ins;
10834 g_assert (store_opcode != OP_STOREV_MEMBASE);
10836 /* Try to fuse the store into the instruction itself */
10837 /* FIXME: Add more instructions */
10838 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10839 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10840 ins->inst_imm = ins->inst_c0;
10841 ins->inst_destbasereg = var->inst_basereg;
10842 ins->inst_offset = var->inst_offset;
10843 spec = INS_INFO (ins->opcode);
10844 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10845 ins->opcode = store_opcode;
10846 ins->inst_destbasereg = var->inst_basereg;
10847 ins->inst_offset = var->inst_offset;
10851 tmp_reg = ins->dreg;
10852 ins->dreg = ins->sreg2;
10853 ins->sreg2 = tmp_reg;
10856 spec2 [MONO_INST_DEST] = ' ';
10857 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10858 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10859 spec2 [MONO_INST_SRC3] = ' ';
10861 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10862 // FIXME: The backends expect the base reg to be in inst_basereg
10863 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10865 ins->inst_basereg = var->inst_basereg;
10866 ins->inst_offset = var->inst_offset;
10867 spec = INS_INFO (ins->opcode);
10869 /* printf ("INS: "); mono_print_ins (ins); */
10870 /* Create a store instruction */
10871 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10873 /* Insert it after the instruction */
10874 mono_bblock_insert_after_ins (bb, ins, store_ins);
10876 def_ins = store_ins;
10879 * We can't assign ins->dreg to var->dreg here, since the
10880 * sregs could use it. So set a flag, and do it after
10883 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10884 dest_has_lvreg = TRUE;
10889 if (def_ins && !live_range_start [dreg]) {
10890 live_range_start [dreg] = def_ins;
10891 live_range_start_bb [dreg] = bb;
10898 num_sregs = mono_inst_get_src_registers (ins, sregs);
10899 for (srcindex = 0; srcindex < 3; ++srcindex) {
10900 regtype = spec [MONO_INST_SRC1 + srcindex];
10901 sreg = sregs [srcindex];
10903 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10904 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10905 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10906 MonoInst *use_ins = ins;
10907 MonoInst *load_ins;
10908 guint32 load_opcode;
10910 if (var->opcode == OP_REGVAR) {
10911 sregs [srcindex] = var->dreg;
10912 //mono_inst_set_src_registers (ins, sregs);
10913 live_range_end [sreg] = use_ins;
10914 live_range_end_bb [sreg] = bb;
10918 g_assert (var->opcode == OP_REGOFFSET);
10920 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10922 g_assert (load_opcode != OP_LOADV_MEMBASE);
10924 if (vreg_to_lvreg [sreg]) {
10925 g_assert (vreg_to_lvreg [sreg] != -1);
10927 /* The variable is already loaded to an lvreg */
10928 if (G_UNLIKELY (cfg->verbose_level > 2))
10929 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10930 sregs [srcindex] = vreg_to_lvreg [sreg];
10931 //mono_inst_set_src_registers (ins, sregs);
10935 /* Try to fuse the load into the instruction */
10936 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10937 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10938 sregs [0] = var->inst_basereg;
10939 //mono_inst_set_src_registers (ins, sregs);
10940 ins->inst_offset = var->inst_offset;
10941 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10942 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10943 sregs [1] = var->inst_basereg;
10944 //mono_inst_set_src_registers (ins, sregs);
10945 ins->inst_offset = var->inst_offset;
10947 if (MONO_IS_REAL_MOVE (ins)) {
10948 ins->opcode = OP_NOP;
10951 //printf ("%d ", srcindex); mono_print_ins (ins);
10953 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10955 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10956 if (var->dreg == prev_dreg) {
10958 * sreg refers to the value loaded by the load
10959 * emitted below, but we need to use ins->dreg
10960 * since it refers to the store emitted earlier.
10964 g_assert (sreg != -1);
10965 vreg_to_lvreg [var->dreg] = sreg;
10966 g_assert (lvregs_len < 1024);
10967 lvregs [lvregs_len ++] = var->dreg;
10971 sregs [srcindex] = sreg;
10972 //mono_inst_set_src_registers (ins, sregs);
10974 if (regtype == 'l') {
10975 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10976 mono_bblock_insert_before_ins (bb, ins, load_ins);
10977 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10978 mono_bblock_insert_before_ins (bb, ins, load_ins);
10979 use_ins = load_ins;
10982 #if SIZEOF_REGISTER == 4
10983 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10985 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10986 mono_bblock_insert_before_ins (bb, ins, load_ins);
10987 use_ins = load_ins;
10991 if (var->dreg < orig_next_vreg) {
10992 live_range_end [var->dreg] = use_ins;
10993 live_range_end_bb [var->dreg] = bb;
10997 mono_inst_set_src_registers (ins, sregs);
10999 if (dest_has_lvreg) {
11000 g_assert (ins->dreg != -1);
11001 vreg_to_lvreg [prev_dreg] = ins->dreg;
11002 g_assert (lvregs_len < 1024);
11003 lvregs [lvregs_len ++] = prev_dreg;
11004 dest_has_lvreg = FALSE;
11008 tmp_reg = ins->dreg;
11009 ins->dreg = ins->sreg2;
11010 ins->sreg2 = tmp_reg;
11013 if (MONO_IS_CALL (ins)) {
11014 /* Clear vreg_to_lvreg array */
11015 for (i = 0; i < lvregs_len; i++)
11016 vreg_to_lvreg [lvregs [i]] = 0;
11018 } else if (ins->opcode == OP_NOP) {
11020 MONO_INST_NULLIFY_SREGS (ins);
11023 if (cfg->verbose_level > 2)
11024 mono_print_ins_index (1, ins);
11027 /* Extend the live range based on the liveness info */
11028 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11029 for (i = 0; i < cfg->num_varinfo; i ++) {
11030 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11032 if (vreg_is_volatile (cfg, vi->vreg))
11033 /* The liveness info is incomplete */
11036 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11037 /* Live from at least the first ins of this bb */
11038 live_range_start [vi->vreg] = bb->code;
11039 live_range_start_bb [vi->vreg] = bb;
11042 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11043 /* Live at least until the last ins of this bb */
11044 live_range_end [vi->vreg] = bb->last_ins;
11045 live_range_end_bb [vi->vreg] = bb;
11051 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11053 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11054 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11056 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11057 for (i = 0; i < cfg->num_varinfo; ++i) {
11058 int vreg = MONO_VARINFO (cfg, i)->vreg;
11061 if (live_range_start [vreg]) {
11062 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11064 ins->inst_c1 = vreg;
11065 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11067 if (live_range_end [vreg]) {
11068 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11070 ins->inst_c1 = vreg;
11071 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11072 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11074 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11080 g_free (live_range_start);
11081 g_free (live_range_end);
11082 g_free (live_range_start_bb);
11083 g_free (live_range_end_bb);
11088 * - use 'iadd' instead of 'int_add'
11089 * - handling ovf opcodes: decompose in method_to_ir.
11090 * - unify iregs/fregs
11091 * -> partly done, the missing parts are:
11092 * - a more complete unification would involve unifying the hregs as well, so
11093 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11094 * would no longer map to the machine hregs, so the code generators would need to
11095 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11096 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11097 * fp/non-fp branches speeds it up by about 15%.
11098 * - use sext/zext opcodes instead of shifts
11100 * - get rid of TEMPLOADs if possible and use vregs instead
11101 * - clean up usage of OP_P/OP_ opcodes
11102 * - cleanup usage of DUMMY_USE
11103 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11105 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11106 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11107 * - make sure handle_stack_args () is called before the branch is emitted
11108 * - when the new IR is done, get rid of all unused stuff
11109 * - COMPARE/BEQ as separate instructions or unify them ?
11110 * - keeping them separate allows specialized compare instructions like
11111 * compare_imm, compare_membase
11112 * - most back ends unify fp compare+branch, fp compare+ceq
11113 * - integrate mono_save_args into inline_method
11114 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11115 * - handle long shift opts on 32 bit platforms somehow: they require
11116 * 3 sregs (2 for arg1 and 1 for arg2)
11117 * - make byref a 'normal' type.
11118 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11119 * variable if needed.
11120 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11121 * like inline_method.
11122 * - remove inlining restrictions
11123 * - fix LNEG and enable cfold of INEG
11124 * - generalize x86 optimizations like ldelema as a peephole optimization
11125 * - add store_mem_imm for amd64
11126 * - optimize the loading of the interruption flag in the managed->native wrappers
11127 * - avoid special handling of OP_NOP in passes
11128 * - move code inserting instructions into one function/macro.
11129 * - try a coalescing phase after liveness analysis
11130 * - add float -> vreg conversion + local optimizations on !x86
11131 * - figure out how to handle decomposed branches during optimizations, ie.
11132 * compare+branch, op_jump_table+op_br etc.
11133 * - promote RuntimeXHandles to vregs
11134 * - vtype cleanups:
11135 * - add a NEW_VARLOADA_VREG macro
11136 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11137 * accessing vtype fields.
11138 * - get rid of I8CONST on 64 bit platforms
11139 * - dealing with the increase in code size due to branches created during opcode
11141 * - use extended basic blocks
11142 * - all parts of the JIT
11143 * - handle_global_vregs () && local regalloc
11144 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11145 * - sources of increase in code size:
11148 * - isinst and castclass
11149 * - lvregs not allocated to global registers even if used multiple times
11150 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11152 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11153 * - add all micro optimizations from the old JIT
11154 * - put tree optimizations into the deadce pass
11155 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11156 * specific function.
11157 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11158 * fcompare + branchCC.
11159 * - create a helper function for allocating a stack slot, taking into account
11160 * MONO_CFG_HAS_SPILLUP.
11162 * - merge the ia64 switch changes.
11163 * - optimize mono_regstate2_alloc_int/float.
11164 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11165 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11166 * parts of the tree could be separated by other instructions, killing the tree
11167 * arguments, or stores killing loads etc. Also, should we fold loads into other
11168 * instructions if the result of the load is used multiple times ?
11169 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11170 * - LAST MERGE: 108395.
11171 * - when returning vtypes in registers, generate IR and append it to the end of the
11172 * last bb instead of doing it in the epilog.
11173 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11181 - When to decompose opcodes:
11182 - earlier: this makes some optimizations hard to implement, since the low level IR
11183 no longer contains the neccessary information. But it is easier to do.
11184 - later: harder to implement, enables more optimizations.
11185 - Branches inside bblocks:
11186 - created when decomposing complex opcodes.
11187 - branches to another bblock: harmless, but not tracked by the branch
11188 optimizations, so need to branch to a label at the start of the bblock.
11189 - branches to inside the same bblock: very problematic, trips up the local
11190 reg allocator. Can be fixed by spitting the current bblock, but that is a
11191 complex operation, since some local vregs can become global vregs etc.
11192 - Local/global vregs:
11193 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11194 local register allocator.
11195 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11196 structure, created by mono_create_var (). Assigned to hregs or the stack by
11197 the global register allocator.
11198 - When to do optimizations like alu->alu_imm:
11199 - earlier -> saves work later on since the IR will be smaller/simpler
11200 - later -> can work on more instructions
11201 - Handling of valuetypes:
11202 - When a vtype is pushed on the stack, a new temporary is created, an
11203 instruction computing its address (LDADDR) is emitted and pushed on
11204 the stack. Need to optimize cases when the vtype is used immediately as in
11205 argument passing, stloc etc.
11206 - Instead of the to_end stuff in the old JIT, simply call the function handling
11207 the values on the stack before emitting the last instruction of the bb.
11210 #endif /* DISABLE_JIT */