2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
53 #include <mono/metadata/mono-basic-block.h>
60 #include "jit-icalls.h"
62 #include "debugger-agent.h"
64 #define BRANCH_COST 100
65 #define INLINE_LENGTH_LIMIT 20
66 #define INLINE_FAILURE do {\
67 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
70 #define CHECK_CFG_EXCEPTION do {\
71 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
74 #define METHOD_ACCESS_FAILURE do { \
75 char *method_fname = mono_method_full_name (method, TRUE); \
76 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
77 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
78 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
79 g_free (method_fname); \
80 g_free (cil_method_fname); \
81 goto exception_exit; \
83 #define FIELD_ACCESS_FAILURE do { \
84 char *method_fname = mono_method_full_name (method, TRUE); \
85 char *field_fname = mono_field_full_name (field); \
86 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
87 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
88 g_free (method_fname); \
89 g_free (field_fname); \
90 goto exception_exit; \
92 #define GENERIC_SHARING_FAILURE(opcode) do { \
93 if (cfg->generic_sharing_context) { \
94 if (cfg->verbose_level > 2) \
95 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
96 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
97 goto exception_exit; \
101 /* Determine whenever 'ins' represents a load of the 'this' argument */
102 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
104 static int ldind_to_load_membase (int opcode);
105 static int stind_to_store_membase (int opcode);
107 int mono_op_to_op_imm (int opcode);
108 int mono_op_to_op_imm_noemul (int opcode);
110 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
111 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
112 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
114 /* helper methods signature */
115 extern MonoMethodSignature *helper_sig_class_init_trampoline;
116 extern MonoMethodSignature *helper_sig_domain_get;
117 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
118 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
120 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
123 * Instruction metadata
131 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
132 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
138 #if SIZEOF_REGISTER == 8
143 /* keep in sync with the enum in mini.h */
146 #include "mini-ops.h"
151 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
152 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
154 * This should contain the index of the last sreg + 1. This is not the same
155 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
157 const gint8 ins_sreg_counts[] = {
158 #include "mini-ops.h"
163 #define MONO_INIT_VARINFO(vi,id) do { \
164 (vi)->range.first_use.pos.bid = 0xffff; \
170 mono_inst_set_src_registers (MonoInst *ins, int *regs)
172 ins->sreg1 = regs [0];
173 ins->sreg2 = regs [1];
174 ins->sreg3 = regs [2];
178 mono_alloc_ireg (MonoCompile *cfg)
180 return alloc_ireg (cfg);
184 mono_alloc_freg (MonoCompile *cfg)
186 return alloc_freg (cfg);
190 mono_alloc_preg (MonoCompile *cfg)
192 return alloc_preg (cfg);
196 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
198 return alloc_dreg (cfg, stack_type);
202 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
208 switch (type->type) {
211 case MONO_TYPE_BOOLEAN:
223 case MONO_TYPE_FNPTR:
225 case MONO_TYPE_CLASS:
226 case MONO_TYPE_STRING:
227 case MONO_TYPE_OBJECT:
228 case MONO_TYPE_SZARRAY:
229 case MONO_TYPE_ARRAY:
233 #if SIZEOF_REGISTER == 8
242 case MONO_TYPE_VALUETYPE:
243 if (type->data.klass->enumtype) {
244 type = mono_class_enum_basetype (type->data.klass);
247 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
250 case MONO_TYPE_TYPEDBYREF:
252 case MONO_TYPE_GENERICINST:
253 type = &type->data.generic_class->container_class->byval_arg;
257 g_assert (cfg->generic_sharing_context);
260 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
266 mono_print_bb (MonoBasicBlock *bb, const char *msg)
271 printf ("\n%s %d: [IN: ", msg, bb->block_num);
272 for (i = 0; i < bb->in_count; ++i)
273 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
275 for (i = 0; i < bb->out_count; ++i)
276 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
278 for (tree = bb->code; tree; tree = tree->next)
279 mono_print_ins_index (-1, tree);
283 * Can't put this at the beginning, since other files reference stuff from this
288 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
290 #define GET_BBLOCK(cfg,tblock,ip) do { \
291 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
293 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
294 NEW_BBLOCK (cfg, (tblock)); \
295 (tblock)->cil_code = (ip); \
296 ADD_BBLOCK (cfg, (tblock)); \
300 #if defined(TARGET_X86) || defined(TARGET_AMD64)
301 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
302 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
303 (dest)->dreg = alloc_preg ((cfg)); \
304 (dest)->sreg1 = (sr1); \
305 (dest)->sreg2 = (sr2); \
306 (dest)->inst_imm = (imm); \
307 (dest)->backend.shift_amount = (shift); \
308 MONO_ADD_INS ((cfg)->cbb, (dest)); \
312 #if SIZEOF_REGISTER == 8
313 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
314 /* FIXME: Need to add many more cases */ \
315 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
317 int dr = alloc_preg (cfg); \
318 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
319 (ins)->sreg2 = widen->dreg; \
323 #define ADD_WIDEN_OP(ins, arg1, arg2)
326 #define ADD_BINOP(op) do { \
327 MONO_INST_NEW (cfg, ins, (op)); \
329 ins->sreg1 = sp [0]->dreg; \
330 ins->sreg2 = sp [1]->dreg; \
331 type_from_op (ins, sp [0], sp [1]); \
333 /* Have to insert a widening op */ \
334 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
335 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
336 MONO_ADD_INS ((cfg)->cbb, (ins)); \
337 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
340 #define ADD_UNOP(op) do { \
341 MONO_INST_NEW (cfg, ins, (op)); \
343 ins->sreg1 = sp [0]->dreg; \
344 type_from_op (ins, sp [0], NULL); \
346 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
347 MONO_ADD_INS ((cfg)->cbb, (ins)); \
348 *sp++ = mono_decompose_opcode (cfg, ins); \
351 #define ADD_BINCOND(next_block) do { \
354 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
355 cmp->sreg1 = sp [0]->dreg; \
356 cmp->sreg2 = sp [1]->dreg; \
357 type_from_op (cmp, sp [0], sp [1]); \
359 type_from_op (ins, sp [0], sp [1]); \
360 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
361 GET_BBLOCK (cfg, tblock, target); \
362 link_bblock (cfg, bblock, tblock); \
363 ins->inst_true_bb = tblock; \
364 if ((next_block)) { \
365 link_bblock (cfg, bblock, (next_block)); \
366 ins->inst_false_bb = (next_block); \
367 start_new_bblock = 1; \
369 GET_BBLOCK (cfg, tblock, ip); \
370 link_bblock (cfg, bblock, tblock); \
371 ins->inst_false_bb = tblock; \
372 start_new_bblock = 2; \
374 if (sp != stack_start) { \
375 handle_stack_args (cfg, stack_start, sp - stack_start); \
376 CHECK_UNVERIFIABLE (cfg); \
378 MONO_ADD_INS (bblock, cmp); \
379 MONO_ADD_INS (bblock, ins); \
383 * link_bblock: Links two basic blocks
385 * links two basic blocks in the control flow graph, the 'from'
386 * argument is the starting block and the 'to' argument is the block
387 * the control flow ends to after 'from'.
390 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
392 MonoBasicBlock **newa;
396 if (from->cil_code) {
398 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
400 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
403 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
405 printf ("edge from entry to exit\n");
410 for (i = 0; i < from->out_count; ++i) {
411 if (to == from->out_bb [i]) {
417 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
418 for (i = 0; i < from->out_count; ++i) {
419 newa [i] = from->out_bb [i];
427 for (i = 0; i < to->in_count; ++i) {
428 if (from == to->in_bb [i]) {
434 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
435 for (i = 0; i < to->in_count; ++i) {
436 newa [i] = to->in_bb [i];
445 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
447 link_bblock (cfg, from, to);
451 * mono_find_block_region:
453 * We mark each basic block with a region ID. We use that to avoid BB
454 * optimizations when blocks are in different regions.
457 * A region token that encodes where this region is, and information
458 * about the clause owner for this block.
460 * The region encodes the try/catch/filter clause that owns this block
461 * as well as the type. -1 is a special value that represents a block
462 * that is in none of try/catch/filter.
465 mono_find_block_region (MonoCompile *cfg, int offset)
467 MonoMethodHeader *header = cfg->header;
468 MonoExceptionClause *clause;
471 for (i = 0; i < header->num_clauses; ++i) {
472 clause = &header->clauses [i];
473 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
474 (offset < (clause->handler_offset)))
475 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
477 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
478 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
479 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
480 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
481 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
483 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
486 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
487 return ((i + 1) << 8) | clause->flags;
494 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
496 MonoMethodHeader *header = cfg->header;
497 MonoExceptionClause *clause;
501 for (i = 0; i < header->num_clauses; ++i) {
502 clause = &header->clauses [i];
503 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
504 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
505 if (clause->flags == type)
506 res = g_list_append (res, clause);
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
561 inst->klass = klass = mono_class_from_mono_type (type);
563 inst->type = STACK_MP;
568 switch (type->type) {
570 inst->type = STACK_INV;
574 case MONO_TYPE_BOOLEAN:
580 inst->type = STACK_I4;
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
597 inst->type = STACK_I8;
601 inst->type = STACK_R8;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
609 inst->type = STACK_VTYPE;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
783 ins->opcode = OP_ICOMPARE;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
800 ins->opcode += beqops_op_map [src1->type];
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
833 ins->type = STACK_R8;
834 switch (src1->type) {
837 ins->opcode = OP_ICONV_TO_R_UN;
840 ins->opcode = OP_LCONV_TO_R_UN;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
868 ins->type = STACK_PTR;
869 switch (src1->type) {
871 ins->opcode = OP_ICONV_TO_U;
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
878 ins->opcode = OP_MOVE;
882 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_FCONV_TO_U;
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
910 ins->type = STACK_R8;
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
969 param_table [STACK_MAX] [STACK_MAX] = {
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
978 switch (args->type) {
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
993 if (!sig->params [i]->byref)
997 if (sig->params [i]->byref)
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1011 if (sig->params [i]->byref)
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 t = mono_type_get_underlying_type (t);
1099 case MONO_TYPE_BOOLEAN:
1102 case MONO_TYPE_CHAR:
1109 case MONO_TYPE_FNPTR:
1111 case MONO_TYPE_CLASS:
1112 case MONO_TYPE_STRING:
1113 case MONO_TYPE_OBJECT:
1114 case MONO_TYPE_SZARRAY:
1115 case MONO_TYPE_ARRAY:
1123 case MONO_TYPE_VALUETYPE:
1124 case MONO_TYPE_TYPEDBYREF:
1126 case MONO_TYPE_GENERICINST:
1127 if (mono_type_generic_inst_is_valuetype (t))
1133 g_assert_not_reached ();
1140 array_access_to_klass (int opcode)
1144 return mono_defaults.byte_class;
1146 return mono_defaults.uint16_class;
1149 return mono_defaults.int_class;
1152 return mono_defaults.sbyte_class;
1155 return mono_defaults.int16_class;
1158 return mono_defaults.int32_class;
1160 return mono_defaults.uint32_class;
1163 return mono_defaults.int64_class;
1166 return mono_defaults.single_class;
1169 return mono_defaults.double_class;
1170 case CEE_LDELEM_REF:
1171 case CEE_STELEM_REF:
1172 return mono_defaults.object_class;
1174 g_assert_not_reached ();
1180 * We try to share variables when possible
1183 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1188 /* inlining can result in deeper stacks */
1189 if (slot >= cfg->header->max_stack)
1190 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1192 pos = ins->type - 1 + slot * STACK_MAX;
1194 switch (ins->type) {
1201 if ((vnum = cfg->intvars [pos]))
1202 return cfg->varinfo [vnum];
1203 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1204 cfg->intvars [pos] = res->inst_c0;
1207 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1213 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1216 * Don't use this if a generic_context is set, since that means AOT can't
1217 * look up the method using just the image+token.
1218 * table == 0 means this is a reference made from a wrapper.
1220 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1221 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1222 jump_info_token->image = image;
1223 jump_info_token->token = token;
1224 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1229 * This function is called to handle items that are left on the evaluation stack
1230 * at basic block boundaries. What happens is that we save the values to local variables
1231 * and we reload them later when first entering the target basic block (with the
1232 * handle_loaded_temps () function).
1233 * A single joint point will use the same variables (stored in the array bb->out_stack or
1234 * bb->in_stack, if the basic block is before or after the joint point).
1236 * This function needs to be called _before_ emitting the last instruction of
1237 * the bb (i.e. before emitting a branch).
1238 * If the stack merge fails at a join point, cfg->unverifiable is set.
1241 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1244 MonoBasicBlock *bb = cfg->cbb;
1245 MonoBasicBlock *outb;
1246 MonoInst *inst, **locals;
1251 if (cfg->verbose_level > 3)
1252 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1253 if (!bb->out_scount) {
1254 bb->out_scount = count;
1255 //printf ("bblock %d has out:", bb->block_num);
1257 for (i = 0; i < bb->out_count; ++i) {
1258 outb = bb->out_bb [i];
1259 /* exception handlers are linked, but they should not be considered for stack args */
1260 if (outb->flags & BB_EXCEPTION_HANDLER)
1262 //printf (" %d", outb->block_num);
1263 if (outb->in_stack) {
1265 bb->out_stack = outb->in_stack;
1271 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1272 for (i = 0; i < count; ++i) {
1274 * try to reuse temps already allocated for this purpouse, if they occupy the same
1275 * stack slot and if they are of the same type.
1276 * This won't cause conflicts since if 'local' is used to
1277 * store one of the values in the in_stack of a bblock, then
1278 * the same variable will be used for the same outgoing stack
1280 * This doesn't work when inlining methods, since the bblocks
1281 * in the inlined methods do not inherit their in_stack from
1282 * the bblock they are inlined to. See bug #58863 for an
1285 if (cfg->inlined_method)
1286 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1288 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1293 for (i = 0; i < bb->out_count; ++i) {
1294 outb = bb->out_bb [i];
1295 /* exception handlers are linked, but they should not be considered for stack args */
1296 if (outb->flags & BB_EXCEPTION_HANDLER)
1298 if (outb->in_scount) {
1299 if (outb->in_scount != bb->out_scount) {
1300 cfg->unverifiable = TRUE;
1303 continue; /* check they are the same locals */
1305 outb->in_scount = count;
1306 outb->in_stack = bb->out_stack;
1309 locals = bb->out_stack;
1311 for (i = 0; i < count; ++i) {
1312 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1313 inst->cil_code = sp [i]->cil_code;
1314 sp [i] = locals [i];
1315 if (cfg->verbose_level > 3)
1316 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1320 * It is possible that the out bblocks already have in_stack assigned, and
1321 * the in_stacks differ. In this case, we will store to all the different
1328 /* Find a bblock which has a different in_stack */
1330 while (bindex < bb->out_count) {
1331 outb = bb->out_bb [bindex];
1332 /* exception handlers are linked, but they should not be considered for stack args */
1333 if (outb->flags & BB_EXCEPTION_HANDLER) {
1337 if (outb->in_stack != locals) {
1338 for (i = 0; i < count; ++i) {
1339 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1340 inst->cil_code = sp [i]->cil_code;
1341 sp [i] = locals [i];
1342 if (cfg->verbose_level > 3)
1343 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1345 locals = outb->in_stack;
1354 /* Emit code which loads interface_offsets [klass->interface_id]
1355 * The array is stored in memory before vtable.
1358 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1360 if (cfg->compile_aot) {
1361 int ioffset_reg = alloc_preg (cfg);
1362 int iid_reg = alloc_preg (cfg);
1364 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1365 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1369 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1374 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1376 int ibitmap_reg = alloc_preg (cfg);
1377 #ifdef COMPRESSED_INTERFACE_BITMAP
1379 MonoInst *res, *ins;
1380 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1381 MONO_ADD_INS (cfg->cbb, ins);
1383 if (cfg->compile_aot)
1384 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1386 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1387 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1388 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1390 int ibitmap_byte_reg = alloc_preg (cfg);
1392 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1394 if (cfg->compile_aot) {
1395 int iid_reg = alloc_preg (cfg);
1396 int shifted_iid_reg = alloc_preg (cfg);
1397 int ibitmap_byte_address_reg = alloc_preg (cfg);
1398 int masked_iid_reg = alloc_preg (cfg);
1399 int iid_one_bit_reg = alloc_preg (cfg);
1400 int iid_bit_reg = alloc_preg (cfg);
1401 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1402 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1403 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1404 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1405 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1406 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1407 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1408 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1411 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1417 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1418 * stored in "klass_reg" implements the interface "klass".
1421 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1423 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1427 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1428 * stored in "vtable_reg" implements the interface "klass".
1431 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1433 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1437 * Emit code which checks whenever the interface id of @klass is smaller than
1438 * than the value given by max_iid_reg.
1441 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1442 MonoBasicBlock *false_target)
1444 if (cfg->compile_aot) {
1445 int iid_reg = alloc_preg (cfg);
1446 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1447 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1450 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1454 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1457 /* Same as above, but obtains max_iid from a vtable */
1459 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1460 MonoBasicBlock *false_target)
1462 int max_iid_reg = alloc_preg (cfg);
1464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1465 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1468 /* Same as above, but obtains max_iid from a klass */
1470 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1471 MonoBasicBlock *false_target)
1473 int max_iid_reg = alloc_preg (cfg);
1475 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1476 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1480 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1482 int idepth_reg = alloc_preg (cfg);
1483 int stypes_reg = alloc_preg (cfg);
1484 int stype = alloc_preg (cfg);
1486 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1487 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1488 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1489 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1491 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1494 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1495 } else if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1512 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1514 int intf_reg = alloc_preg (cfg);
1516 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1517 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1520 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1522 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1526 * Variant of the above that takes a register to the class, not the vtable.
1529 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1531 int intf_bit_reg = alloc_preg (cfg);
1533 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1534 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1537 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1539 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1543 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1546 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1547 } else if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1560 return mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1564 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1566 if (cfg->compile_aot) {
1567 int const_reg = alloc_preg (cfg);
1568 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1569 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1571 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1573 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1577 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1580 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1583 int rank_reg = alloc_preg (cfg);
1584 int eclass_reg = alloc_preg (cfg);
1586 g_assert (!klass_inst);
1587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1589 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1590 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1592 if (klass->cast_class == mono_defaults.object_class) {
1593 int parent_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1595 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1596 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1597 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1598 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1599 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1600 } else if (klass->cast_class == mono_defaults.enum_class) {
1601 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1602 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1603 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1605 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1606 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1609 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1610 /* Check that the object is a vector too */
1611 int bounds_reg = alloc_preg (cfg);
1612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1614 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1617 int idepth_reg = alloc_preg (cfg);
1618 int stypes_reg = alloc_preg (cfg);
1619 int stype = alloc_preg (cfg);
1621 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1622 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1623 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1624 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1626 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1627 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1628 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1633 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1635 return mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1639 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1643 g_assert (val == 0);
1648 if ((size <= 4) && (size <= align)) {
1651 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1654 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1657 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1659 #if SIZEOF_REGISTER == 8
1661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1667 val_reg = alloc_preg (cfg);
1669 if (SIZEOF_REGISTER == 8)
1670 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1672 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1675 /* This could be optimized further if neccesary */
1677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1684 #if !NO_UNALIGNED_ACCESS
1685 if (SIZEOF_REGISTER == 8) {
1687 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1692 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1700 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1705 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1716 #endif /* DISABLE_JIT */
1719 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1726 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1727 g_assert (size < 10000);
1730 /* This could be optimized further if neccesary */
1732 cur_reg = alloc_preg (cfg);
1733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1734 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1741 #if !NO_UNALIGNED_ACCESS
1742 if (SIZEOF_REGISTER == 8) {
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1755 cur_reg = alloc_preg (cfg);
1756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1763 cur_reg = alloc_preg (cfg);
1764 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1765 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1771 cur_reg = alloc_preg (cfg);
1772 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1783 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1786 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 type = mini_get_basic_type_from_generic (gsctx, type);
1790 switch (type->type) {
1791 case MONO_TYPE_VOID:
1792 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1795 case MONO_TYPE_BOOLEAN:
1798 case MONO_TYPE_CHAR:
1801 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1805 case MONO_TYPE_FNPTR:
1806 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1807 case MONO_TYPE_CLASS:
1808 case MONO_TYPE_STRING:
1809 case MONO_TYPE_OBJECT:
1810 case MONO_TYPE_SZARRAY:
1811 case MONO_TYPE_ARRAY:
1812 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1815 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1818 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1819 case MONO_TYPE_VALUETYPE:
1820 if (type->data.klass->enumtype) {
1821 type = mono_class_enum_basetype (type->data.klass);
1824 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1825 case MONO_TYPE_TYPEDBYREF:
1826 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1827 case MONO_TYPE_GENERICINST:
1828 type = &type->data.generic_class->container_class->byval_arg;
1831 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1837 * target_type_is_incompatible:
1838 * @cfg: MonoCompile context
1840 * Check that the item @arg on the evaluation stack can be stored
1841 * in the target type (can be a local, or field, etc).
1842 * The cfg arg can be used to check if we need verification or just
1845 * Returns: non-0 value if arg can't be stored on a target.
1848 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1850 MonoType *simple_type;
1853 if (target->byref) {
1854 /* FIXME: check that the pointed to types match */
1855 if (arg->type == STACK_MP)
1856 return arg->klass != mono_class_from_mono_type (target);
1857 if (arg->type == STACK_PTR)
1862 simple_type = mono_type_get_underlying_type (target);
1863 switch (simple_type->type) {
1864 case MONO_TYPE_VOID:
1868 case MONO_TYPE_BOOLEAN:
1871 case MONO_TYPE_CHAR:
1874 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1878 /* STACK_MP is needed when setting pinned locals */
1879 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1884 case MONO_TYPE_FNPTR:
1885 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1888 case MONO_TYPE_CLASS:
1889 case MONO_TYPE_STRING:
1890 case MONO_TYPE_OBJECT:
1891 case MONO_TYPE_SZARRAY:
1892 case MONO_TYPE_ARRAY:
1893 if (arg->type != STACK_OBJ)
1895 /* FIXME: check type compatibility */
1899 if (arg->type != STACK_I8)
1904 if (arg->type != STACK_R8)
1907 case MONO_TYPE_VALUETYPE:
1908 if (arg->type != STACK_VTYPE)
1910 klass = mono_class_from_mono_type (simple_type);
1911 if (klass != arg->klass)
1914 case MONO_TYPE_TYPEDBYREF:
1915 if (arg->type != STACK_VTYPE)
1917 klass = mono_class_from_mono_type (simple_type);
1918 if (klass != arg->klass)
1921 case MONO_TYPE_GENERICINST:
1922 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1923 if (arg->type != STACK_VTYPE)
1925 klass = mono_class_from_mono_type (simple_type);
1926 if (klass != arg->klass)
1930 if (arg->type != STACK_OBJ)
1932 /* FIXME: check type compatibility */
1936 case MONO_TYPE_MVAR:
1937 /* FIXME: all the arguments must be references for now,
1938 * later look inside cfg and see if the arg num is
1939 * really a reference
1941 g_assert (cfg->generic_sharing_context);
1942 if (arg->type != STACK_OBJ)
1946 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1952 * Prepare arguments for passing to a function call.
1953 * Return a non-zero value if the arguments can't be passed to the given
1955 * The type checks are not yet complete and some conversions may need
1956 * casts on 32 or 64 bit architectures.
1958 * FIXME: implement this using target_type_is_incompatible ()
1961 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1963 MonoType *simple_type;
1967 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1971 for (i = 0; i < sig->param_count; ++i) {
1972 if (sig->params [i]->byref) {
1973 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1977 simple_type = sig->params [i];
1978 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1980 switch (simple_type->type) {
1981 case MONO_TYPE_VOID:
1986 case MONO_TYPE_BOOLEAN:
1989 case MONO_TYPE_CHAR:
1992 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1998 case MONO_TYPE_FNPTR:
1999 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2002 case MONO_TYPE_CLASS:
2003 case MONO_TYPE_STRING:
2004 case MONO_TYPE_OBJECT:
2005 case MONO_TYPE_SZARRAY:
2006 case MONO_TYPE_ARRAY:
2007 if (args [i]->type != STACK_OBJ)
2012 if (args [i]->type != STACK_I8)
2017 if (args [i]->type != STACK_R8)
2020 case MONO_TYPE_VALUETYPE:
2021 if (simple_type->data.klass->enumtype) {
2022 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2025 if (args [i]->type != STACK_VTYPE)
2028 case MONO_TYPE_TYPEDBYREF:
2029 if (args [i]->type != STACK_VTYPE)
2032 case MONO_TYPE_GENERICINST:
2033 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2037 g_error ("unknown type 0x%02x in check_call_signature",
2045 callvirt_to_call (int opcode)
2050 case OP_VOIDCALLVIRT:
2059 g_assert_not_reached ();
2066 callvirt_to_call_membase (int opcode)
2070 return OP_CALL_MEMBASE;
2071 case OP_VOIDCALLVIRT:
2072 return OP_VOIDCALL_MEMBASE;
2074 return OP_FCALL_MEMBASE;
2076 return OP_LCALL_MEMBASE;
2078 return OP_VCALL_MEMBASE;
2080 g_assert_not_reached ();
2086 #ifdef MONO_ARCH_HAVE_IMT
2088 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2090 #ifdef MONO_ARCH_IMT_REG
2091 int method_reg = alloc_preg (cfg);
2094 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2095 } else if (cfg->compile_aot) {
2096 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2099 MONO_INST_NEW (cfg, ins, OP_PCONST);
2100 ins->inst_p0 = call->method;
2101 ins->dreg = method_reg;
2102 MONO_ADD_INS (cfg->cbb, ins);
2105 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2107 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2112 static MonoJumpInfo *
2113 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2115 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2119 ji->data.target = target;
2124 inline static MonoCallInst *
2125 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2126 MonoInst **args, int calli, int virtual, int tail)
2129 #ifdef MONO_ARCH_SOFT_FLOAT
2134 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2136 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2139 call->signature = sig;
2141 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2144 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2145 call->vret_var = cfg->vret_addr;
2146 //g_assert_not_reached ();
2148 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2149 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2152 temp->backend.is_pinvoke = sig->pinvoke;
2155 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2156 * address of return value to increase optimization opportunities.
2157 * Before vtype decomposition, the dreg of the call ins itself represents the
2158 * fact the call modifies the return value. After decomposition, the call will
2159 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2160 * will be transformed into an LDADDR.
2162 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2163 loada->dreg = alloc_preg (cfg);
2164 loada->inst_p0 = temp;
2165 /* We reference the call too since call->dreg could change during optimization */
2166 loada->inst_p1 = call;
2167 MONO_ADD_INS (cfg->cbb, loada);
2169 call->inst.dreg = temp->dreg;
2171 call->vret_var = loada;
2172 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2173 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2175 #ifdef MONO_ARCH_SOFT_FLOAT
2176 if (COMPILE_SOFT_FLOAT (cfg)) {
2178 * If the call has a float argument, we would need to do an r8->r4 conversion using
2179 * an icall, but that cannot be done during the call sequence since it would clobber
2180 * the call registers + the stack. So we do it before emitting the call.
2182 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2184 MonoInst *in = call->args [i];
2186 if (i >= sig->hasthis)
2187 t = sig->params [i - sig->hasthis];
2189 t = &mono_defaults.int_class->byval_arg;
2190 t = mono_type_get_underlying_type (t);
2192 if (!t->byref && t->type == MONO_TYPE_R4) {
2193 MonoInst *iargs [1];
2197 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2199 /* The result will be in an int vreg */
2200 call->args [i] = conv;
2207 if (COMPILE_LLVM (cfg))
2208 mono_llvm_emit_call (cfg, call);
2210 mono_arch_emit_call (cfg, call);
2212 mono_arch_emit_call (cfg, call);
2215 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2216 cfg->flags |= MONO_CFG_HAS_CALLS;
2221 inline static MonoInst*
2222 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2224 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2226 call->inst.sreg1 = addr->dreg;
2228 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2230 return (MonoInst*)call;
2233 inline static MonoInst*
2234 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2236 #ifdef MONO_ARCH_RGCTX_REG
2241 rgctx_reg = mono_alloc_preg (cfg);
2242 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2244 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2246 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2247 cfg->uses_rgctx_reg = TRUE;
2248 call->rgctx_reg = TRUE;
2250 return (MonoInst*)call;
2252 g_assert_not_reached ();
2258 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2260 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2263 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2264 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2266 gboolean might_be_remote;
2267 gboolean virtual = this != NULL;
2268 gboolean enable_for_aot = TRUE;
2272 if (method->string_ctor) {
2273 /* Create the real signature */
2274 /* FIXME: Cache these */
2275 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2276 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2281 might_be_remote = this && sig->hasthis &&
2282 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2283 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2285 context_used = mono_method_check_context_used (method);
2286 if (might_be_remote && context_used) {
2289 g_assert (cfg->generic_sharing_context);
2291 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2293 return mono_emit_calli (cfg, sig, args, addr);
2296 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2298 if (might_be_remote)
2299 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2301 call->method = method;
2302 call->inst.flags |= MONO_INST_HAS_METHOD;
2303 call->inst.inst_left = this;
2306 int vtable_reg, slot_reg, this_reg;
2308 this_reg = this->dreg;
2310 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2311 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2312 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2314 /* Make a call to delegate->invoke_impl */
2315 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2316 call->inst.inst_basereg = this_reg;
2317 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2318 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2320 return (MonoInst*)call;
2324 if ((!cfg->compile_aot || enable_for_aot) &&
2325 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2326 (MONO_METHOD_IS_FINAL (method) &&
2327 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2328 !(method->klass->marshalbyref && context_used)) {
2330 * the method is not virtual, we just need to ensure this is not null
2331 * and then we can call the method directly.
2333 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2335 * The check above ensures method is not gshared, this is needed since
2336 * gshared methods can't have wrappers.
2338 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2341 if (!method->string_ctor)
2342 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2344 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2346 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2348 return (MonoInst*)call;
2351 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2353 * the method is virtual, but we can statically dispatch since either
2354 * it's class or the method itself are sealed.
2355 * But first we need to ensure it's not a null reference.
2357 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2359 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2360 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2362 return (MonoInst*)call;
2365 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2367 vtable_reg = alloc_preg (cfg);
2368 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2369 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2371 #ifdef MONO_ARCH_HAVE_IMT
2373 guint32 imt_slot = mono_method_get_imt_slot (method);
2374 emit_imt_argument (cfg, call, imt_arg);
2375 slot_reg = vtable_reg;
2376 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2379 if (slot_reg == -1) {
2380 slot_reg = alloc_preg (cfg);
2381 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2382 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2385 slot_reg = vtable_reg;
2386 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2387 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2388 #ifdef MONO_ARCH_HAVE_IMT
2390 g_assert (mono_method_signature (method)->generic_param_count);
2391 emit_imt_argument (cfg, call, imt_arg);
2396 call->inst.sreg1 = slot_reg;
2397 call->virtual = TRUE;
2400 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2402 return (MonoInst*)call;
2406 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2407 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2409 #ifdef MONO_ARCH_RGCTX_REG
2416 #ifdef MONO_ARCH_RGCTX_REG
2417 rgctx_reg = mono_alloc_preg (cfg);
2418 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2423 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2425 call = (MonoCallInst*)ins;
2427 #ifdef MONO_ARCH_RGCTX_REG
2428 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2429 cfg->uses_rgctx_reg = TRUE;
2430 call->rgctx_reg = TRUE;
2440 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2442 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2446 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2453 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2456 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2458 return (MonoInst*)call;
2462 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2464 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2468 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2472 * mono_emit_abs_call:
2474 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2476 inline static MonoInst*
2477 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2478 MonoMethodSignature *sig, MonoInst **args)
2480 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2484 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2487 if (cfg->abs_patches == NULL)
2488 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2489 g_hash_table_insert (cfg->abs_patches, ji, ji);
2490 ins = mono_emit_native_call (cfg, ji, sig, args);
2491 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2496 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2498 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2499 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2503 * Native code might return non register sized integers
2504 * without initializing the upper bits.
2506 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2507 case OP_LOADI1_MEMBASE:
2508 widen_op = OP_ICONV_TO_I1;
2510 case OP_LOADU1_MEMBASE:
2511 widen_op = OP_ICONV_TO_U1;
2513 case OP_LOADI2_MEMBASE:
2514 widen_op = OP_ICONV_TO_I2;
2516 case OP_LOADU2_MEMBASE:
2517 widen_op = OP_ICONV_TO_U2;
2523 if (widen_op != -1) {
2524 int dreg = alloc_preg (cfg);
2527 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2528 widen->type = ins->type;
2538 get_memcpy_method (void)
2540 static MonoMethod *memcpy_method = NULL;
2541 if (!memcpy_method) {
2542 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2544 g_error ("Old corlib found. Install a new one");
2546 return memcpy_method;
2550 * Emit code to copy a valuetype of type @klass whose address is stored in
2551 * @src->dreg to memory whose address is stored at @dest->dreg.
2554 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2556 MonoInst *iargs [3];
2559 MonoMethod *memcpy_method;
2563 * This check breaks with spilled vars... need to handle it during verification anyway.
2564 * g_assert (klass && klass == src->klass && klass == dest->klass);
2568 n = mono_class_native_size (klass, &align);
2570 n = mono_class_value_size (klass, &align);
2572 #if HAVE_WRITE_BARRIERS
2573 /* if native is true there should be no references in the struct */
2574 if (klass->has_references && !native) {
2575 /* Avoid barriers when storing to the stack */
2576 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2577 (dest->opcode == OP_LDADDR))) {
2578 int context_used = 0;
2583 if (cfg->generic_sharing_context)
2584 context_used = mono_class_check_context_used (klass);
2586 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2588 if (cfg->compile_aot) {
2589 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2591 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2592 mono_class_compute_gc_descriptor (klass);
2596 /* FIXME: this does the memcpy as well (or
2597 should), so we don't need the memcpy
2599 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2604 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2605 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2606 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2610 EMIT_NEW_ICONST (cfg, iargs [2], n);
2612 memcpy_method = get_memcpy_method ();
2613 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2618 get_memset_method (void)
2620 static MonoMethod *memset_method = NULL;
2621 if (!memset_method) {
2622 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2624 g_error ("Old corlib found. Install a new one");
2626 return memset_method;
2630 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2632 MonoInst *iargs [3];
2635 MonoMethod *memset_method;
2637 /* FIXME: Optimize this for the case when dest is an LDADDR */
2639 mono_class_init (klass);
2640 n = mono_class_value_size (klass, &align);
2642 if (n <= sizeof (gpointer) * 5) {
2643 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2646 memset_method = get_memset_method ();
2648 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2649 EMIT_NEW_ICONST (cfg, iargs [2], n);
2650 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2655 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2657 MonoInst *this = NULL;
2659 g_assert (cfg->generic_sharing_context);
2661 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2662 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2663 !method->klass->valuetype)
2664 EMIT_NEW_ARGLOAD (cfg, this, 0);
2666 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2667 MonoInst *mrgctx_loc, *mrgctx_var;
2670 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2672 mrgctx_loc = mono_get_vtable_var (cfg);
2673 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2676 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2677 MonoInst *vtable_loc, *vtable_var;
2681 vtable_loc = mono_get_vtable_var (cfg);
2682 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2684 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2685 MonoInst *mrgctx_var = vtable_var;
2688 vtable_reg = alloc_preg (cfg);
2689 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2690 vtable_var->type = STACK_PTR;
2696 int vtable_reg, res_reg;
2698 vtable_reg = alloc_preg (cfg);
2699 res_reg = alloc_preg (cfg);
2700 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2705 static MonoJumpInfoRgctxEntry *
2706 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2708 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2709 res->method = method;
2710 res->in_mrgctx = in_mrgctx;
2711 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2712 res->data->type = patch_type;
2713 res->data->data.target = patch_data;
2714 res->info_type = info_type;
2719 static inline MonoInst*
2720 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2722 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2726 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2727 MonoClass *klass, int rgctx_type)
2729 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2730 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2732 return emit_rgctx_fetch (cfg, rgctx, entry);
2736 * emit_get_rgctx_method:
2738 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2739 * normal constants, else emit a load from the rgctx.
2742 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2743 MonoMethod *cmethod, int rgctx_type)
2745 if (!context_used) {
2748 switch (rgctx_type) {
2749 case MONO_RGCTX_INFO_METHOD:
2750 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2752 case MONO_RGCTX_INFO_METHOD_RGCTX:
2753 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2756 g_assert_not_reached ();
2759 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2760 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2762 return emit_rgctx_fetch (cfg, rgctx, entry);
2767 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2768 MonoClassField *field, int rgctx_type)
2770 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2771 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2773 return emit_rgctx_fetch (cfg, rgctx, entry);
2777 * On return the caller must check @klass for load errors.
2780 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2782 MonoInst *vtable_arg;
2784 int context_used = 0;
2786 if (cfg->generic_sharing_context)
2787 context_used = mono_class_check_context_used (klass);
2790 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2791 klass, MONO_RGCTX_INFO_VTABLE);
2793 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2797 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2800 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2801 #ifdef MONO_ARCH_VTABLE_REG
2802 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2803 cfg->uses_vtable_reg = TRUE;
2810 * On return the caller must check @array_class for load errors
2813 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2815 int vtable_reg = alloc_preg (cfg);
2816 int context_used = 0;
2818 if (cfg->generic_sharing_context)
2819 context_used = mono_class_check_context_used (array_class);
2821 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2823 if (cfg->opt & MONO_OPT_SHARED) {
2824 int class_reg = alloc_preg (cfg);
2825 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2826 if (cfg->compile_aot) {
2827 int klass_reg = alloc_preg (cfg);
2828 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2829 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2833 } else if (context_used) {
2834 MonoInst *vtable_ins;
2836 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2837 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2839 if (cfg->compile_aot) {
2843 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2845 vt_reg = alloc_preg (cfg);
2846 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2847 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2850 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2852 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2856 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2860 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2862 if (mini_get_debug_options ()->better_cast_details) {
2863 int to_klass_reg = alloc_preg (cfg);
2864 int vtable_reg = alloc_preg (cfg);
2865 int klass_reg = alloc_preg (cfg);
2866 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2869 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2873 MONO_ADD_INS (cfg->cbb, tls_get);
2874 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2875 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2877 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2878 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2879 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2884 reset_cast_details (MonoCompile *cfg)
2886 /* Reset the variables holding the cast details */
2887 if (mini_get_debug_options ()->better_cast_details) {
2888 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2890 MONO_ADD_INS (cfg->cbb, tls_get);
2891 /* It is enough to reset the from field */
2892 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2897 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2898 * generic code is generated.
2901 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2903 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2906 MonoInst *rgctx, *addr;
2908 /* FIXME: What if the class is shared? We might not
2909 have to get the address of the method from the
2911 addr = emit_get_rgctx_method (cfg, context_used, method,
2912 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2914 rgctx = emit_get_rgctx (cfg, method, context_used);
2916 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2918 return mono_emit_method_call (cfg, method, &val, NULL);
2923 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2927 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2928 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2929 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2930 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2932 obj_reg = sp [0]->dreg;
2933 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2934 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2936 /* FIXME: generics */
2937 g_assert (klass->rank == 0);
2940 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2941 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2943 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2944 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2947 MonoInst *element_class;
2949 /* This assertion is from the unboxcast insn */
2950 g_assert (klass->rank == 0);
2952 element_class = emit_get_rgctx_klass (cfg, context_used,
2953 klass->element_class, MONO_RGCTX_INFO_KLASS);
2955 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2956 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2958 save_cast_details (cfg, klass->element_class, obj_reg);
2959 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2960 reset_cast_details (cfg);
2963 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2964 MONO_ADD_INS (cfg->cbb, add);
2965 add->type = STACK_MP;
2972 * Returns NULL and set the cfg exception on error.
2975 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2977 MonoInst *iargs [2];
2980 if (cfg->opt & MONO_OPT_SHARED) {
2981 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2982 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2984 alloc_ftn = mono_object_new;
2985 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2986 /* This happens often in argument checking code, eg. throw new FooException... */
2987 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2988 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2989 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2991 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2992 MonoMethod *managed_alloc = NULL;
2996 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2997 cfg->exception_ptr = klass;
3001 #ifndef MONO_CROSS_COMPILE
3002 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3005 if (managed_alloc) {
3006 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3007 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3009 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3011 guint32 lw = vtable->klass->instance_size;
3012 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3013 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3014 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3017 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3021 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3025 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3028 MonoInst *iargs [2];
3029 MonoMethod *managed_alloc = NULL;
3033 FIXME: we cannot get managed_alloc here because we can't get
3034 the class's vtable (because it's not a closed class)
3036 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3037 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3040 if (cfg->opt & MONO_OPT_SHARED) {
3041 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3042 iargs [1] = data_inst;
3043 alloc_ftn = mono_object_new;
3045 if (managed_alloc) {
3046 iargs [0] = data_inst;
3047 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3050 iargs [0] = data_inst;
3051 alloc_ftn = mono_object_new_specific;
3054 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3058 * Returns NULL and set the cfg exception on error.
3061 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3063 MonoInst *alloc, *ins;
3065 if (mono_class_is_nullable (klass)) {
3066 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3067 return mono_emit_method_call (cfg, method, &val, NULL);
3070 alloc = handle_alloc (cfg, klass, TRUE);
3074 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3080 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3082 MonoInst *alloc, *ins;
3084 if (mono_class_is_nullable (klass)) {
3085 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3086 /* FIXME: What if the class is shared? We might not
3087 have to get the method address from the RGCTX. */
3088 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3089 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3090 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3092 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3094 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3096 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3102 // FIXME: This doesn't work yet (class libs tests fail?)
3103 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || mono_class_has_variant_generic_params (klass) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3106 * Returns NULL and set the cfg exception on error.
3109 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3111 MonoBasicBlock *is_null_bb;
3112 int obj_reg = src->dreg;
3113 int vtable_reg = alloc_preg (cfg);
3114 MonoInst *klass_inst = NULL;
3119 klass_inst = emit_get_rgctx_klass (cfg, context_used,
3120 klass, MONO_RGCTX_INFO_KLASS);
3122 if (is_complex_isinst (klass)) {
3123 /* Complex case, handle by an icall */
3129 args [1] = klass_inst;
3131 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3133 /* Simple case, handled by the code below */
3137 NEW_BBLOCK (cfg, is_null_bb);
3139 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3142 save_cast_details (cfg, klass, obj_reg);
3144 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3145 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3146 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3148 int klass_reg = alloc_preg (cfg);
3150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3152 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3153 /* the remoting code is broken, access the class for now */
3154 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3155 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3157 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3158 cfg->exception_ptr = klass;
3161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3166 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3168 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3169 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3173 MONO_START_BB (cfg, is_null_bb);
3175 reset_cast_details (cfg);
3181 * Returns NULL and set the cfg exception on error.
3184 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3187 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3188 int obj_reg = src->dreg;
3189 int vtable_reg = alloc_preg (cfg);
3190 int res_reg = alloc_preg (cfg);
3191 MonoInst *klass_inst = NULL;
3194 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3196 if (is_complex_isinst (klass)) {
3199 /* Complex case, handle by an icall */
3205 args [1] = klass_inst;
3207 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3209 /* Simple case, the code below can handle it */
3213 NEW_BBLOCK (cfg, is_null_bb);
3214 NEW_BBLOCK (cfg, false_bb);
3215 NEW_BBLOCK (cfg, end_bb);
3217 /* Do the assignment at the beginning, so the other assignment can be if converted */
3218 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3219 ins->type = STACK_OBJ;
3222 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3223 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3227 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3228 g_assert (!context_used);
3229 /* the is_null_bb target simply copies the input register to the output */
3230 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3232 int klass_reg = alloc_preg (cfg);
3235 int rank_reg = alloc_preg (cfg);
3236 int eclass_reg = alloc_preg (cfg);
3238 g_assert (!context_used);
3239 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3240 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3241 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3242 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3244 if (klass->cast_class == mono_defaults.object_class) {
3245 int parent_reg = alloc_preg (cfg);
3246 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3247 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3248 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3249 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3250 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3251 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3252 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3253 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3254 } else if (klass->cast_class == mono_defaults.enum_class) {
3255 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3256 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3257 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3258 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3260 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3261 /* Check that the object is a vector too */
3262 int bounds_reg = alloc_preg (cfg);
3263 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3264 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3265 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3268 /* the is_null_bb target simply copies the input register to the output */
3269 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3271 } else if (mono_class_is_nullable (klass)) {
3272 g_assert (!context_used);
3273 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3274 /* the is_null_bb target simply copies the input register to the output */
3275 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3277 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3278 g_assert (!context_used);
3279 /* the remoting code is broken, access the class for now */
3280 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3281 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3283 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3284 cfg->exception_ptr = klass;
3287 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3290 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3292 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3293 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3296 /* the is_null_bb target simply copies the input register to the output */
3297 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3302 MONO_START_BB (cfg, false_bb);
3304 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3305 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3307 MONO_START_BB (cfg, is_null_bb);
3309 MONO_START_BB (cfg, end_bb);
3315 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3317 /* This opcode takes as input an object reference and a class, and returns:
3318 0) if the object is an instance of the class,
3319 1) if the object is not instance of the class,
3320 2) if the object is a proxy whose type cannot be determined */
3323 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3324 int obj_reg = src->dreg;
3325 int dreg = alloc_ireg (cfg);
3327 int klass_reg = alloc_preg (cfg);
3329 NEW_BBLOCK (cfg, true_bb);
3330 NEW_BBLOCK (cfg, false_bb);
3331 NEW_BBLOCK (cfg, false2_bb);
3332 NEW_BBLOCK (cfg, end_bb);
3333 NEW_BBLOCK (cfg, no_proxy_bb);
3335 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3336 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3338 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3339 NEW_BBLOCK (cfg, interface_fail_bb);
3341 tmp_reg = alloc_preg (cfg);
3342 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3343 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3344 MONO_START_BB (cfg, interface_fail_bb);
3345 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3347 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3349 tmp_reg = alloc_preg (cfg);
3350 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3351 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3352 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3354 tmp_reg = alloc_preg (cfg);
3355 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3356 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3358 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3359 tmp_reg = alloc_preg (cfg);
3360 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3363 tmp_reg = alloc_preg (cfg);
3364 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3365 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3366 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3368 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3369 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3371 MONO_START_BB (cfg, no_proxy_bb);
3373 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3376 MONO_START_BB (cfg, false_bb);
3378 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3379 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3381 MONO_START_BB (cfg, false2_bb);
3383 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3384 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3386 MONO_START_BB (cfg, true_bb);
3388 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3390 MONO_START_BB (cfg, end_bb);
3393 MONO_INST_NEW (cfg, ins, OP_ICONST);
3395 ins->type = STACK_I4;
3401 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3403 /* This opcode takes as input an object reference and a class, and returns:
3404 0) if the object is an instance of the class,
3405 1) if the object is a proxy whose type cannot be determined
3406 an InvalidCastException exception is thrown otherwhise*/
3409 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3410 int obj_reg = src->dreg;
3411 int dreg = alloc_ireg (cfg);
3412 int tmp_reg = alloc_preg (cfg);
3413 int klass_reg = alloc_preg (cfg);
3415 NEW_BBLOCK (cfg, end_bb);
3416 NEW_BBLOCK (cfg, ok_result_bb);
3418 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3419 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3421 save_cast_details (cfg, klass, obj_reg);
3423 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3424 NEW_BBLOCK (cfg, interface_fail_bb);
3426 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3427 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3428 MONO_START_BB (cfg, interface_fail_bb);
3429 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3431 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3433 tmp_reg = alloc_preg (cfg);
3434 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3436 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3438 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3439 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3442 NEW_BBLOCK (cfg, no_proxy_bb);
3444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3446 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3448 tmp_reg = alloc_preg (cfg);
3449 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3452 tmp_reg = alloc_preg (cfg);
3453 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3455 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3457 NEW_BBLOCK (cfg, fail_1_bb);
3459 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3461 MONO_START_BB (cfg, fail_1_bb);
3463 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3464 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3466 MONO_START_BB (cfg, no_proxy_bb);
3468 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3471 MONO_START_BB (cfg, ok_result_bb);
3473 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3475 MONO_START_BB (cfg, end_bb);
3478 MONO_INST_NEW (cfg, ins, OP_ICONST);
3480 ins->type = STACK_I4;
3486 * Returns NULL and set the cfg exception on error.
3488 static G_GNUC_UNUSED MonoInst*
3489 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3491 gpointer *trampoline;
3492 MonoInst *obj, *method_ins, *tramp_ins;
3496 obj = handle_alloc (cfg, klass, FALSE);
3500 /* Inline the contents of mono_delegate_ctor */
3502 /* Set target field */
3503 /* Optimize away setting of NULL target */
3504 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3505 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3507 /* Set method field */
3508 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3512 * To avoid looking up the compiled code belonging to the target method
3513 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3514 * store it, and we fill it after the method has been compiled.
3516 if (!cfg->compile_aot && !method->dynamic) {
3517 MonoInst *code_slot_ins;
3520 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3522 domain = mono_domain_get ();
3523 mono_domain_lock (domain);
3524 if (!domain_jit_info (domain)->method_code_hash)
3525 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3526 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3528 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3529 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3531 mono_domain_unlock (domain);
3533 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3535 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3538 /* Set invoke_impl field */
3539 if (cfg->compile_aot) {
3540 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3542 trampoline = mono_create_delegate_trampoline (klass);
3543 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3545 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3547 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3553 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3555 MonoJitICallInfo *info;
3557 /* Need to register the icall so it gets an icall wrapper */
3558 info = mono_get_array_new_va_icall (rank);
3560 cfg->flags |= MONO_CFG_HAS_VARARGS;
3562 /* mono_array_new_va () needs a vararg calling convention */
3563 cfg->disable_llvm = TRUE;
3565 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3566 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3570 mono_emit_load_got_addr (MonoCompile *cfg)
3572 MonoInst *getaddr, *dummy_use;
3574 if (!cfg->got_var || cfg->got_var_allocated)
3577 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3578 getaddr->dreg = cfg->got_var->dreg;
3580 /* Add it to the start of the first bblock */
3581 if (cfg->bb_entry->code) {
3582 getaddr->next = cfg->bb_entry->code;
3583 cfg->bb_entry->code = getaddr;
3586 MONO_ADD_INS (cfg->bb_entry, getaddr);
3588 cfg->got_var_allocated = TRUE;
3591 * Add a dummy use to keep the got_var alive, since real uses might
3592 * only be generated by the back ends.
3593 * Add it to end_bblock, so the variable's lifetime covers the whole
3595 * It would be better to make the usage of the got var explicit in all
3596 * cases when the backend needs it (i.e. calls, throw etc.), so this
3597 * wouldn't be needed.
3599 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3600 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3603 static int inline_limit;
3604 static gboolean inline_limit_inited;
3607 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3609 MonoMethodHeaderSummary header;
3611 #ifdef MONO_ARCH_SOFT_FLOAT
3612 MonoMethodSignature *sig = mono_method_signature (method);
3616 if (cfg->generic_sharing_context)
3619 if (cfg->inline_depth > 10)
3622 #ifdef MONO_ARCH_HAVE_LMF_OPS
3623 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3624 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3625 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3630 if (!mono_method_get_header_summary (method, &header))
3633 /*runtime, icall and pinvoke are checked by summary call*/
3634 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3635 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3636 (method->klass->marshalbyref) ||
3640 /* also consider num_locals? */
3641 /* Do the size check early to avoid creating vtables */
3642 if (!inline_limit_inited) {
3643 if (getenv ("MONO_INLINELIMIT"))
3644 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3646 inline_limit = INLINE_LENGTH_LIMIT;
3647 inline_limit_inited = TRUE;
3649 if (header.code_size >= inline_limit)
3653 * if we can initialize the class of the method right away, we do,
3654 * otherwise we don't allow inlining if the class needs initialization,
3655 * since it would mean inserting a call to mono_runtime_class_init()
3656 * inside the inlined code
3658 if (!(cfg->opt & MONO_OPT_SHARED)) {
3659 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3660 if (cfg->run_cctors && method->klass->has_cctor) {
3661 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3662 if (!method->klass->runtime_info)
3663 /* No vtable created yet */
3665 vtable = mono_class_vtable (cfg->domain, method->klass);
3668 /* This makes so that inline cannot trigger */
3669 /* .cctors: too many apps depend on them */
3670 /* running with a specific order... */
3671 if (! vtable->initialized)
3673 mono_runtime_class_init (vtable);
3675 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3676 if (!method->klass->runtime_info)
3677 /* No vtable created yet */
3679 vtable = mono_class_vtable (cfg->domain, method->klass);
3682 if (!vtable->initialized)
3687 * If we're compiling for shared code
3688 * the cctor will need to be run at aot method load time, for example,
3689 * or at the end of the compilation of the inlining method.
3691 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3696 * CAS - do not inline methods with declarative security
3697 * Note: this has to be before any possible return TRUE;
3699 if (mono_method_has_declsec (method))
3702 #ifdef MONO_ARCH_SOFT_FLOAT
3704 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3706 for (i = 0; i < sig->param_count; ++i)
3707 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3715 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3717 if (vtable->initialized && !cfg->compile_aot)
3720 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3723 if (!mono_class_needs_cctor_run (vtable->klass, method))
3726 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3727 /* The initialization is already done before the method is called */
3734 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3738 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3740 mono_class_init (klass);
3741 size = mono_class_array_element_size (klass);
3743 mult_reg = alloc_preg (cfg);
3744 array_reg = arr->dreg;
3745 index_reg = index->dreg;
3747 #if SIZEOF_REGISTER == 8
3748 /* The array reg is 64 bits but the index reg is only 32 */
3749 if (COMPILE_LLVM (cfg)) {
3751 index2_reg = index_reg;
3753 index2_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3757 if (index->type == STACK_I8) {
3758 index2_reg = alloc_preg (cfg);
3759 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3761 index2_reg = index_reg;
3765 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3767 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3768 if (size == 1 || size == 2 || size == 4 || size == 8) {
3769 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3771 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3772 ins->type = STACK_PTR;
3778 add_reg = alloc_preg (cfg);
3780 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3781 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3782 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3783 ins->type = STACK_PTR;
3784 MONO_ADD_INS (cfg->cbb, ins);
3789 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3791 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3793 int bounds_reg = alloc_preg (cfg);
3794 int add_reg = alloc_preg (cfg);
3795 int mult_reg = alloc_preg (cfg);
3796 int mult2_reg = alloc_preg (cfg);
3797 int low1_reg = alloc_preg (cfg);
3798 int low2_reg = alloc_preg (cfg);
3799 int high1_reg = alloc_preg (cfg);
3800 int high2_reg = alloc_preg (cfg);
3801 int realidx1_reg = alloc_preg (cfg);
3802 int realidx2_reg = alloc_preg (cfg);
3803 int sum_reg = alloc_preg (cfg);
3808 mono_class_init (klass);
3809 size = mono_class_array_element_size (klass);
3811 index1 = index_ins1->dreg;
3812 index2 = index_ins2->dreg;
3814 /* range checking */
3815 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3816 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3819 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3820 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3821 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3822 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3823 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3824 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3826 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3827 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3828 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3829 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3830 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3831 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3832 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3834 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3835 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3837 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3838 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3840 ins->type = STACK_MP;
3842 MONO_ADD_INS (cfg->cbb, ins);
3849 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3853 MonoMethod *addr_method;
3856 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3859 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3861 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3862 /* emit_ldelema_2 depends on OP_LMUL */
3863 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3864 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3868 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3869 addr_method = mono_marshal_get_array_address (rank, element_size);
3870 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3875 static MonoBreakPolicy
3876 always_insert_breakpoint (MonoMethod *method)
3878 return MONO_BREAK_POLICY_ALWAYS;
3881 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3884 * mono_set_break_policy:
3885 * policy_callback: the new callback function
3887 * Allow embedders to decide wherther to actually obey breakpoint instructions
3888 * (both break IL instructions and Debugger.Break () method calls), for example
3889 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3890 * untrusted or semi-trusted code.
3892 * @policy_callback will be called every time a break point instruction needs to
3893 * be inserted with the method argument being the method that calls Debugger.Break()
3894 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3895 * if it wants the breakpoint to not be effective in the given method.
3896 * #MONO_BREAK_POLICY_ALWAYS is the default.
3899 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3901 if (policy_callback)
3902 break_policy_func = policy_callback;
3904 break_policy_func = always_insert_breakpoint;
3908 should_insert_brekpoint (MonoMethod *method) {
3909 switch (break_policy_func (method)) {
3910 case MONO_BREAK_POLICY_ALWAYS:
3912 case MONO_BREAK_POLICY_NEVER:
3914 case MONO_BREAK_POLICY_ON_DBG:
3915 return mono_debug_using_mono_debugger ();
3917 g_warning ("Incorrect value returned from break policy callback");
3923 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3925 MonoInst *ins = NULL;
3927 static MonoClass *runtime_helpers_class = NULL;
3928 if (! runtime_helpers_class)
3929 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3930 "System.Runtime.CompilerServices", "RuntimeHelpers");
3932 if (cmethod->klass == mono_defaults.string_class) {
3933 if (strcmp (cmethod->name, "get_Chars") == 0) {
3934 int dreg = alloc_ireg (cfg);
3935 int index_reg = alloc_preg (cfg);
3936 int mult_reg = alloc_preg (cfg);
3937 int add_reg = alloc_preg (cfg);
3939 #if SIZEOF_REGISTER == 8
3940 /* The array reg is 64 bits but the index reg is only 32 */
3941 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3943 index_reg = args [1]->dreg;
3945 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3947 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3948 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3949 add_reg = ins->dreg;
3950 /* Avoid a warning */
3952 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3955 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3956 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3957 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3958 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3960 type_from_op (ins, NULL, NULL);
3962 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3963 int dreg = alloc_ireg (cfg);
3964 /* Decompose later to allow more optimizations */
3965 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3966 ins->type = STACK_I4;
3967 cfg->cbb->has_array_access = TRUE;
3968 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3971 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3972 int mult_reg = alloc_preg (cfg);
3973 int add_reg = alloc_preg (cfg);
3975 /* The corlib functions check for oob already. */
3976 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3977 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3978 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3979 return cfg->cbb->last_ins;
3982 } else if (cmethod->klass == mono_defaults.object_class) {
3984 if (strcmp (cmethod->name, "GetType") == 0) {
3985 int dreg = alloc_preg (cfg);
3986 int vt_reg = alloc_preg (cfg);
3987 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3988 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3989 type_from_op (ins, NULL, NULL);
3992 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3993 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3994 int dreg = alloc_ireg (cfg);
3995 int t1 = alloc_ireg (cfg);
3997 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3998 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3999 ins->type = STACK_I4;
4003 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4004 MONO_INST_NEW (cfg, ins, OP_NOP);
4005 MONO_ADD_INS (cfg->cbb, ins);
4009 } else if (cmethod->klass == mono_defaults.array_class) {
4010 if (cmethod->name [0] != 'g')
4013 if (strcmp (cmethod->name, "get_Rank") == 0) {
4014 int dreg = alloc_ireg (cfg);
4015 int vtable_reg = alloc_preg (cfg);
4016 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4017 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4018 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4019 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4020 type_from_op (ins, NULL, NULL);
4023 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4024 int dreg = alloc_ireg (cfg);
4026 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4027 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4028 type_from_op (ins, NULL, NULL);
4033 } else if (cmethod->klass == runtime_helpers_class) {
4035 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4036 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4040 } else if (cmethod->klass == mono_defaults.thread_class) {
4041 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4042 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4043 MONO_ADD_INS (cfg->cbb, ins);
4045 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4046 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4047 MONO_ADD_INS (cfg->cbb, ins);
4050 } else if (cmethod->klass == mono_defaults.monitor_class) {
4051 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4052 if (strcmp (cmethod->name, "Enter") == 0) {
4055 if (COMPILE_LLVM (cfg)) {
4057 * Pass the argument normally, the LLVM backend will handle the
4058 * calling convention problems.
4060 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4062 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4063 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4064 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4065 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4068 return (MonoInst*)call;
4069 } else if (strcmp (cmethod->name, "Exit") == 0) {
4072 if (COMPILE_LLVM (cfg)) {
4073 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4075 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4076 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4077 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4078 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4081 return (MonoInst*)call;
4083 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4084 MonoMethod *fast_method = NULL;
4086 /* Avoid infinite recursion */
4087 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4088 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4089 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4092 if (strcmp (cmethod->name, "Enter") == 0 ||
4093 strcmp (cmethod->name, "Exit") == 0)
4094 fast_method = mono_monitor_get_fast_path (cmethod);
4098 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4100 } else if (mini_class_is_system_array (cmethod->klass) &&
4101 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4102 MonoInst *addr, *store, *load;
4103 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4105 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4106 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4107 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4109 } else if (cmethod->klass->image == mono_defaults.corlib &&
4110 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4111 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4114 #if SIZEOF_REGISTER == 8
4115 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4116 /* 64 bit reads are already atomic */
4117 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4118 ins->dreg = mono_alloc_preg (cfg);
4119 ins->inst_basereg = args [0]->dreg;
4120 ins->inst_offset = 0;
4121 MONO_ADD_INS (cfg->cbb, ins);
4125 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4126 if (strcmp (cmethod->name, "Increment") == 0) {
4127 MonoInst *ins_iconst;
4130 if (fsig->params [0]->type == MONO_TYPE_I4)
4131 opcode = OP_ATOMIC_ADD_NEW_I4;
4132 #if SIZEOF_REGISTER == 8
4133 else if (fsig->params [0]->type == MONO_TYPE_I8)
4134 opcode = OP_ATOMIC_ADD_NEW_I8;
4137 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4138 ins_iconst->inst_c0 = 1;
4139 ins_iconst->dreg = mono_alloc_ireg (cfg);
4140 MONO_ADD_INS (cfg->cbb, ins_iconst);
4142 MONO_INST_NEW (cfg, ins, opcode);
4143 ins->dreg = mono_alloc_ireg (cfg);
4144 ins->inst_basereg = args [0]->dreg;
4145 ins->inst_offset = 0;
4146 ins->sreg2 = ins_iconst->dreg;
4147 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4148 MONO_ADD_INS (cfg->cbb, ins);
4150 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4151 MonoInst *ins_iconst;
4154 if (fsig->params [0]->type == MONO_TYPE_I4)
4155 opcode = OP_ATOMIC_ADD_NEW_I4;
4156 #if SIZEOF_REGISTER == 8
4157 else if (fsig->params [0]->type == MONO_TYPE_I8)
4158 opcode = OP_ATOMIC_ADD_NEW_I8;
4161 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4162 ins_iconst->inst_c0 = -1;
4163 ins_iconst->dreg = mono_alloc_ireg (cfg);
4164 MONO_ADD_INS (cfg->cbb, ins_iconst);
4166 MONO_INST_NEW (cfg, ins, opcode);
4167 ins->dreg = mono_alloc_ireg (cfg);
4168 ins->inst_basereg = args [0]->dreg;
4169 ins->inst_offset = 0;
4170 ins->sreg2 = ins_iconst->dreg;
4171 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4172 MONO_ADD_INS (cfg->cbb, ins);
4174 } else if (strcmp (cmethod->name, "Add") == 0) {
4177 if (fsig->params [0]->type == MONO_TYPE_I4)
4178 opcode = OP_ATOMIC_ADD_NEW_I4;
4179 #if SIZEOF_REGISTER == 8
4180 else if (fsig->params [0]->type == MONO_TYPE_I8)
4181 opcode = OP_ATOMIC_ADD_NEW_I8;
4185 MONO_INST_NEW (cfg, ins, opcode);
4186 ins->dreg = mono_alloc_ireg (cfg);
4187 ins->inst_basereg = args [0]->dreg;
4188 ins->inst_offset = 0;
4189 ins->sreg2 = args [1]->dreg;
4190 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4191 MONO_ADD_INS (cfg->cbb, ins);
4194 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4196 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4197 if (strcmp (cmethod->name, "Exchange") == 0) {
4199 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4201 if (fsig->params [0]->type == MONO_TYPE_I4)
4202 opcode = OP_ATOMIC_EXCHANGE_I4;
4203 #if SIZEOF_REGISTER == 8
4204 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4205 (fsig->params [0]->type == MONO_TYPE_I))
4206 opcode = OP_ATOMIC_EXCHANGE_I8;
4208 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4209 opcode = OP_ATOMIC_EXCHANGE_I4;
4214 MONO_INST_NEW (cfg, ins, opcode);
4215 ins->dreg = mono_alloc_ireg (cfg);
4216 ins->inst_basereg = args [0]->dreg;
4217 ins->inst_offset = 0;
4218 ins->sreg2 = args [1]->dreg;
4219 MONO_ADD_INS (cfg->cbb, ins);
4221 switch (fsig->params [0]->type) {
4223 ins->type = STACK_I4;
4227 ins->type = STACK_I8;
4229 case MONO_TYPE_OBJECT:
4230 ins->type = STACK_OBJ;
4233 g_assert_not_reached ();
4236 #if HAVE_WRITE_BARRIERS
4238 MonoInst *dummy_use;
4239 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4240 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4241 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4245 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4247 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4248 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4250 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4251 if (fsig->params [1]->type == MONO_TYPE_I4)
4253 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4254 size = sizeof (gpointer);
4255 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4258 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4259 ins->dreg = alloc_ireg (cfg);
4260 ins->sreg1 = args [0]->dreg;
4261 ins->sreg2 = args [1]->dreg;
4262 ins->sreg3 = args [2]->dreg;
4263 ins->type = STACK_I4;
4264 MONO_ADD_INS (cfg->cbb, ins);
4265 } else if (size == 8) {
4266 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4267 ins->dreg = alloc_ireg (cfg);
4268 ins->sreg1 = args [0]->dreg;
4269 ins->sreg2 = args [1]->dreg;
4270 ins->sreg3 = args [2]->dreg;
4271 ins->type = STACK_I8;
4272 MONO_ADD_INS (cfg->cbb, ins);
4274 /* g_assert_not_reached (); */
4276 #if HAVE_WRITE_BARRIERS
4278 MonoInst *dummy_use;
4279 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4280 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4281 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [1]);
4285 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4289 } else if (cmethod->klass->image == mono_defaults.corlib) {
4290 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4291 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4292 if (should_insert_brekpoint (cfg->method))
4293 MONO_INST_NEW (cfg, ins, OP_BREAK);
4295 MONO_INST_NEW (cfg, ins, OP_NOP);
4296 MONO_ADD_INS (cfg->cbb, ins);
4299 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4300 && strcmp (cmethod->klass->name, "Environment") == 0) {
4302 EMIT_NEW_ICONST (cfg, ins, 1);
4304 EMIT_NEW_ICONST (cfg, ins, 0);
4308 } else if (cmethod->klass == mono_defaults.math_class) {
4310 * There is general branches code for Min/Max, but it does not work for
4312 * http://everything2.com/?node_id=1051618
4316 #ifdef MONO_ARCH_SIMD_INTRINSICS
4317 if (cfg->opt & MONO_OPT_SIMD) {
4318 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4324 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4328 * This entry point could be used later for arbitrary method
4331 inline static MonoInst*
4332 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4333 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4335 if (method->klass == mono_defaults.string_class) {
4336 /* managed string allocation support */
4337 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS)) {
4338 MonoInst *iargs [2];
4339 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4340 MonoMethod *managed_alloc = NULL;
4342 g_assert (vtable); /*Should not fail since it System.String*/
4343 #ifndef MONO_CROSS_COMPILE
4344 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4348 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4349 iargs [1] = args [0];
4350 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4357 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4359 MonoInst *store, *temp;
4362 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4363 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4366 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4367 * would be different than the MonoInst's used to represent arguments, and
4368 * the ldelema implementation can't deal with that.
4369 * Solution: When ldelema is used on an inline argument, create a var for
4370 * it, emit ldelema on that var, and emit the saving code below in
4371 * inline_method () if needed.
4373 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4374 cfg->args [i] = temp;
4375 /* This uses cfg->args [i] which is set by the preceeding line */
4376 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4377 store->cil_code = sp [0]->cil_code;
4382 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4383 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4385 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4387 check_inline_called_method_name_limit (MonoMethod *called_method)
4390 static char *limit = NULL;
4392 if (limit == NULL) {
4393 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4395 if (limit_string != NULL)
4396 limit = limit_string;
4398 limit = (char *) "";
4401 if (limit [0] != '\0') {
4402 char *called_method_name = mono_method_full_name (called_method, TRUE);
4404 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4405 g_free (called_method_name);
4407 //return (strncmp_result <= 0);
4408 return (strncmp_result == 0);
4415 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4417 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4420 static char *limit = NULL;
4422 if (limit == NULL) {
4423 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4424 if (limit_string != NULL) {
4425 limit = limit_string;
4427 limit = (char *) "";
4431 if (limit [0] != '\0') {
4432 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4434 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4435 g_free (caller_method_name);
4437 //return (strncmp_result <= 0);
4438 return (strncmp_result == 0);
4446 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4447 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4449 MonoInst *ins, *rvar = NULL;
4450 MonoMethodHeader *cheader;
4451 MonoBasicBlock *ebblock, *sbblock;
4453 MonoMethod *prev_inlined_method;
4454 MonoInst **prev_locals, **prev_args;
4455 MonoType **prev_arg_types;
4456 guint prev_real_offset;
4457 GHashTable *prev_cbb_hash;
4458 MonoBasicBlock **prev_cil_offset_to_bb;
4459 MonoBasicBlock *prev_cbb;
4460 unsigned char* prev_cil_start;
4461 guint32 prev_cil_offset_to_bb_len;
4462 MonoMethod *prev_current_method;
4463 MonoGenericContext *prev_generic_context;
4464 gboolean ret_var_set, prev_ret_var_set;
4466 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4468 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4469 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4472 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4473 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4477 if (cfg->verbose_level > 2)
4478 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4480 if (!cmethod->inline_info) {
4481 mono_jit_stats.inlineable_methods++;
4482 cmethod->inline_info = 1;
4485 /* allocate local variables */
4486 cheader = mono_method_get_header (cmethod);
4488 if (cheader == NULL || mono_loader_get_last_error ()) {
4490 mono_metadata_free_mh (cheader);
4491 mono_loader_clear_error ();
4495 /* allocate space to store the return value */
4496 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4497 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4501 prev_locals = cfg->locals;
4502 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4503 for (i = 0; i < cheader->num_locals; ++i)
4504 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4506 /* allocate start and end blocks */
4507 /* This is needed so if the inline is aborted, we can clean up */
4508 NEW_BBLOCK (cfg, sbblock);
4509 sbblock->real_offset = real_offset;
4511 NEW_BBLOCK (cfg, ebblock);
4512 ebblock->block_num = cfg->num_bblocks++;
4513 ebblock->real_offset = real_offset;
4515 prev_args = cfg->args;
4516 prev_arg_types = cfg->arg_types;
4517 prev_inlined_method = cfg->inlined_method;
4518 cfg->inlined_method = cmethod;
4519 cfg->ret_var_set = FALSE;
4520 cfg->inline_depth ++;
4521 prev_real_offset = cfg->real_offset;
4522 prev_cbb_hash = cfg->cbb_hash;
4523 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4524 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4525 prev_cil_start = cfg->cil_start;
4526 prev_cbb = cfg->cbb;
4527 prev_current_method = cfg->current_method;
4528 prev_generic_context = cfg->generic_context;
4529 prev_ret_var_set = cfg->ret_var_set;
4531 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4533 ret_var_set = cfg->ret_var_set;
4535 cfg->inlined_method = prev_inlined_method;
4536 cfg->real_offset = prev_real_offset;
4537 cfg->cbb_hash = prev_cbb_hash;
4538 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4539 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4540 cfg->cil_start = prev_cil_start;
4541 cfg->locals = prev_locals;
4542 cfg->args = prev_args;
4543 cfg->arg_types = prev_arg_types;
4544 cfg->current_method = prev_current_method;
4545 cfg->generic_context = prev_generic_context;
4546 cfg->ret_var_set = prev_ret_var_set;
4547 cfg->inline_depth --;
4549 if ((costs >= 0 && costs < 60) || inline_allways) {
4550 if (cfg->verbose_level > 2)
4551 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4553 mono_jit_stats.inlined_methods++;
4555 /* always add some code to avoid block split failures */
4556 MONO_INST_NEW (cfg, ins, OP_NOP);
4557 MONO_ADD_INS (prev_cbb, ins);
4559 prev_cbb->next_bb = sbblock;
4560 link_bblock (cfg, prev_cbb, sbblock);
4563 * Get rid of the begin and end bblocks if possible to aid local
4566 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4568 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4569 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4571 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4572 MonoBasicBlock *prev = ebblock->in_bb [0];
4573 mono_merge_basic_blocks (cfg, prev, ebblock);
4575 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4576 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4577 cfg->cbb = prev_cbb;
4585 * If the inlined method contains only a throw, then the ret var is not
4586 * set, so set it to a dummy value.
4589 static double r8_0 = 0.0;
4591 switch (rvar->type) {
4593 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4596 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4601 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4604 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4605 ins->type = STACK_R8;
4606 ins->inst_p0 = (void*)&r8_0;
4607 ins->dreg = rvar->dreg;
4608 MONO_ADD_INS (cfg->cbb, ins);
4611 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4614 g_assert_not_reached ();
4618 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4621 mono_metadata_free_mh (cheader);
4624 if (cfg->verbose_level > 2)
4625 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4626 cfg->exception_type = MONO_EXCEPTION_NONE;
4627 mono_loader_clear_error ();
4629 /* This gets rid of the newly added bblocks */
4630 cfg->cbb = prev_cbb;
4632 mono_metadata_free_mh (cheader);
4637 * Some of these comments may well be out-of-date.
4638 * Design decisions: we do a single pass over the IL code (and we do bblock
4639 * splitting/merging in the few cases when it's required: a back jump to an IL
4640 * address that was not already seen as bblock starting point).
4641 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4642 * Complex operations are decomposed in simpler ones right away. We need to let the
4643 * arch-specific code peek and poke inside this process somehow (except when the
4644 * optimizations can take advantage of the full semantic info of coarse opcodes).
4645 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4646 * MonoInst->opcode initially is the IL opcode or some simplification of that
4647 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4648 * opcode with value bigger than OP_LAST.
4649 * At this point the IR can be handed over to an interpreter, a dumb code generator
4650 * or to the optimizing code generator that will translate it to SSA form.
4652 * Profiling directed optimizations.
4653 * We may compile by default with few or no optimizations and instrument the code
4654 * or the user may indicate what methods to optimize the most either in a config file
4655 * or through repeated runs where the compiler applies offline the optimizations to
4656 * each method and then decides if it was worth it.
4659 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4660 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4661 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4662 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4663 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4664 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4665 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4666 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4668 /* offset from br.s -> br like opcodes */
4669 #define BIG_BRANCH_OFFSET 13
4672 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4674 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4676 return b == NULL || b == bb;
4680 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4682 unsigned char *ip = start;
4683 unsigned char *target;
4686 MonoBasicBlock *bblock;
4687 const MonoOpcode *opcode;
4690 cli_addr = ip - start;
4691 i = mono_opcode_value ((const guint8 **)&ip, end);
4694 opcode = &mono_opcodes [i];
4695 switch (opcode->argument) {
4696 case MonoInlineNone:
4699 case MonoInlineString:
4700 case MonoInlineType:
4701 case MonoInlineField:
4702 case MonoInlineMethod:
4705 case MonoShortInlineR:
4712 case MonoShortInlineVar:
4713 case MonoShortInlineI:
4716 case MonoShortInlineBrTarget:
4717 target = start + cli_addr + 2 + (signed char)ip [1];
4718 GET_BBLOCK (cfg, bblock, target);
4721 GET_BBLOCK (cfg, bblock, ip);
4723 case MonoInlineBrTarget:
4724 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4725 GET_BBLOCK (cfg, bblock, target);
4728 GET_BBLOCK (cfg, bblock, ip);
4730 case MonoInlineSwitch: {
4731 guint32 n = read32 (ip + 1);
4734 cli_addr += 5 + 4 * n;
4735 target = start + cli_addr;
4736 GET_BBLOCK (cfg, bblock, target);
4738 for (j = 0; j < n; ++j) {
4739 target = start + cli_addr + (gint32)read32 (ip);
4740 GET_BBLOCK (cfg, bblock, target);
4750 g_assert_not_reached ();
4753 if (i == CEE_THROW) {
4754 unsigned char *bb_start = ip - 1;
4756 /* Find the start of the bblock containing the throw */
4758 while ((bb_start >= start) && !bblock) {
4759 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4763 bblock->out_of_line = 1;
4772 static inline MonoMethod *
4773 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4777 if (m->wrapper_type != MONO_WRAPPER_NONE)
4778 return mono_method_get_wrapper_data (m, token);
4780 method = mono_get_method_full (m->klass->image, token, klass, context);
4785 static inline MonoMethod *
4786 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4788 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4790 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4796 static inline MonoClass*
4797 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4801 if (method->wrapper_type != MONO_WRAPPER_NONE)
4802 klass = mono_method_get_wrapper_data (method, token);
4804 klass = mono_class_get_full (method->klass->image, token, context);
4806 mono_class_init (klass);
4811 * Returns TRUE if the JIT should abort inlining because "callee"
4812 * is influenced by security attributes.
4815 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4819 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4823 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4824 if (result == MONO_JIT_SECURITY_OK)
4827 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4828 /* Generate code to throw a SecurityException before the actual call/link */
4829 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4832 NEW_ICONST (cfg, args [0], 4);
4833 NEW_METHODCONST (cfg, args [1], caller);
4834 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4835 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4836 /* don't hide previous results */
4837 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4838 cfg->exception_data = result;
4846 throw_exception (void)
4848 static MonoMethod *method = NULL;
4851 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4852 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4859 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4861 MonoMethod *thrower = throw_exception ();
4864 EMIT_NEW_PCONST (cfg, args [0], ex);
4865 mono_emit_method_call (cfg, thrower, args, NULL);
4869 * Return the original method is a wrapper is specified. We can only access
4870 * the custom attributes from the original method.
4873 get_original_method (MonoMethod *method)
4875 if (method->wrapper_type == MONO_WRAPPER_NONE)
4878 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4879 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4882 /* in other cases we need to find the original method */
4883 return mono_marshal_method_from_wrapper (method);
4887 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4888 MonoBasicBlock *bblock, unsigned char *ip)
4890 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4891 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
4893 emit_throw_exception (cfg, ex);
4897 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4898 MonoBasicBlock *bblock, unsigned char *ip)
4900 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4901 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
4903 emit_throw_exception (cfg, ex);
4907 * Check that the IL instructions at ip are the array initialization
4908 * sequence and return the pointer to the data and the size.
4911 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4914 * newarr[System.Int32]
4916 * ldtoken field valuetype ...
4917 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4919 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4920 guint32 token = read32 (ip + 7);
4921 guint32 field_token = read32 (ip + 2);
4922 guint32 field_index = field_token & 0xffffff;
4924 const char *data_ptr;
4926 MonoMethod *cmethod;
4927 MonoClass *dummy_class;
4928 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4934 *out_field_token = field_token;
4936 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4939 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4941 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4942 case MONO_TYPE_BOOLEAN:
4946 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4947 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4948 case MONO_TYPE_CHAR:
4958 return NULL; /* stupid ARM FP swapped format */
4968 if (size > mono_type_size (field->type, &dummy_align))
4971 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4972 if (!method->klass->image->dynamic) {
4973 field_index = read32 (ip + 2) & 0xffffff;
4974 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4975 data_ptr = mono_image_rva_map (method->klass->image, rva);
4976 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4977 /* for aot code we do the lookup on load */
4978 if (aot && data_ptr)
4979 return GUINT_TO_POINTER (rva);
4981 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4983 data_ptr = mono_field_get_data (field);
4991 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4993 char *method_fname = mono_method_full_name (method, TRUE);
4995 MonoMethodHeader *header = mono_method_get_header (method);
4997 if (header->code_size == 0)
4998 method_code = g_strdup ("method body is empty.");
5000 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5001 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
5002 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5003 g_free (method_fname);
5004 g_free (method_code);
5005 mono_metadata_free_mh (header);
5009 set_exception_object (MonoCompile *cfg, MonoException *exception)
5011 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
5012 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
5013 cfg->exception_ptr = exception;
5017 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5021 if (cfg->generic_sharing_context)
5022 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
5024 type = &klass->byval_arg;
5025 return MONO_TYPE_IS_REFERENCE (type);
5029 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5032 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5033 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5034 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5035 /* Optimize reg-reg moves away */
5037 * Can't optimize other opcodes, since sp[0] might point to
5038 * the last ins of a decomposed opcode.
5040 sp [0]->dreg = (cfg)->locals [n]->dreg;
5042 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5047 * ldloca inhibits many optimizations so try to get rid of it in common
5050 static inline unsigned char *
5051 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5060 local = read16 (ip + 2);
5064 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5065 gboolean skip = FALSE;
5067 /* From the INITOBJ case */
5068 token = read32 (ip + 2);
5069 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5070 CHECK_TYPELOAD (klass);
5071 if (generic_class_is_reference_type (cfg, klass)) {
5072 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5073 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5074 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5075 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5076 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5089 is_exception_class (MonoClass *class)
5092 if (class == mono_defaults.exception_class)
5094 class = class->parent;
5100 * mono_method_to_ir:
5102 * Translate the .net IL into linear IR.
5105 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5106 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5107 guint inline_offset, gboolean is_virtual_call)
5110 MonoInst *ins, **sp, **stack_start;
5111 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5112 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5113 MonoMethod *cmethod, *method_definition;
5114 MonoInst **arg_array;
5115 MonoMethodHeader *header;
5117 guint32 token, ins_flag;
5119 MonoClass *constrained_call = NULL;
5120 unsigned char *ip, *end, *target, *err_pos;
5121 static double r8_0 = 0.0;
5122 MonoMethodSignature *sig;
5123 MonoGenericContext *generic_context = NULL;
5124 MonoGenericContainer *generic_container = NULL;
5125 MonoType **param_types;
5126 int i, n, start_new_bblock, dreg;
5127 int num_calls = 0, inline_costs = 0;
5128 int breakpoint_id = 0;
5130 MonoBoolean security, pinvoke;
5131 MonoSecurityManager* secman = NULL;
5132 MonoDeclSecurityActions actions;
5133 GSList *class_inits = NULL;
5134 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5136 gboolean init_locals, seq_points, skip_dead_blocks;
5138 /* serialization and xdomain stuff may need access to private fields and methods */
5139 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5140 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5141 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5142 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5143 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5144 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5146 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5148 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5149 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5150 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5151 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5153 image = method->klass->image;
5154 header = mono_method_get_header (method);
5155 generic_container = mono_method_get_generic_container (method);
5156 sig = mono_method_signature (method);
5157 num_args = sig->hasthis + sig->param_count;
5158 ip = (unsigned char*)header->code;
5159 cfg->cil_start = ip;
5160 end = ip + header->code_size;
5161 mono_jit_stats.cil_code_size += header->code_size;
5162 init_locals = header->init_locals;
5164 seq_points = cfg->gen_seq_points && cfg->method == method;
5167 * Methods without init_locals set could cause asserts in various passes
5172 method_definition = method;
5173 while (method_definition->is_inflated) {
5174 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5175 method_definition = imethod->declaring;
5178 /* SkipVerification is not allowed if core-clr is enabled */
5179 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5181 dont_verify_stloc = TRUE;
5184 if (!dont_verify && mini_method_verify (cfg, method_definition))
5185 goto exception_exit;
5187 if (mono_debug_using_mono_debugger ())
5188 cfg->keep_cil_nops = TRUE;
5190 if (sig->is_inflated)
5191 generic_context = mono_method_get_context (method);
5192 else if (generic_container)
5193 generic_context = &generic_container->context;
5194 cfg->generic_context = generic_context;
5196 if (!cfg->generic_sharing_context)
5197 g_assert (!sig->has_type_parameters);
5199 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5200 g_assert (method->is_inflated);
5201 g_assert (mono_method_get_context (method)->method_inst);
5203 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5204 g_assert (sig->generic_param_count);
5206 if (cfg->method == method) {
5207 cfg->real_offset = 0;
5209 cfg->real_offset = inline_offset;
5212 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5213 cfg->cil_offset_to_bb_len = header->code_size;
5215 cfg->current_method = method;
5217 if (cfg->verbose_level > 2)
5218 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5220 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5222 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5223 for (n = 0; n < sig->param_count; ++n)
5224 param_types [n + sig->hasthis] = sig->params [n];
5225 cfg->arg_types = param_types;
5227 dont_inline = g_list_prepend (dont_inline, method);
5228 if (cfg->method == method) {
5230 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5231 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5234 NEW_BBLOCK (cfg, start_bblock);
5235 cfg->bb_entry = start_bblock;
5236 start_bblock->cil_code = NULL;
5237 start_bblock->cil_length = 0;
5240 NEW_BBLOCK (cfg, end_bblock);
5241 cfg->bb_exit = end_bblock;
5242 end_bblock->cil_code = NULL;
5243 end_bblock->cil_length = 0;
5244 g_assert (cfg->num_bblocks == 2);
5246 arg_array = cfg->args;
5248 if (header->num_clauses) {
5249 cfg->spvars = g_hash_table_new (NULL, NULL);
5250 cfg->exvars = g_hash_table_new (NULL, NULL);
5252 /* handle exception clauses */
5253 for (i = 0; i < header->num_clauses; ++i) {
5254 MonoBasicBlock *try_bb;
5255 MonoExceptionClause *clause = &header->clauses [i];
5256 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5257 try_bb->real_offset = clause->try_offset;
5258 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5259 tblock->real_offset = clause->handler_offset;
5260 tblock->flags |= BB_EXCEPTION_HANDLER;
5262 link_bblock (cfg, try_bb, tblock);
5264 if (*(ip + clause->handler_offset) == CEE_POP)
5265 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5267 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5268 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5269 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5270 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5271 MONO_ADD_INS (tblock, ins);
5273 /* todo: is a fault block unsafe to optimize? */
5274 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5275 tblock->flags |= BB_EXCEPTION_UNSAFE;
5279 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5281 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5283 /* catch and filter blocks get the exception object on the stack */
5284 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5285 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5286 MonoInst *dummy_use;
5288 /* mostly like handle_stack_args (), but just sets the input args */
5289 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5290 tblock->in_scount = 1;
5291 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5292 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5295 * Add a dummy use for the exvar so its liveness info will be
5299 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5301 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5302 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5303 tblock->flags |= BB_EXCEPTION_HANDLER;
5304 tblock->real_offset = clause->data.filter_offset;
5305 tblock->in_scount = 1;
5306 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5307 /* The filter block shares the exvar with the handler block */
5308 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5309 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5310 MONO_ADD_INS (tblock, ins);
5314 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5315 clause->data.catch_class &&
5316 cfg->generic_sharing_context &&
5317 mono_class_check_context_used (clause->data.catch_class)) {
5319 * In shared generic code with catch
5320 * clauses containing type variables
5321 * the exception handling code has to
5322 * be able to get to the rgctx.
5323 * Therefore we have to make sure that
5324 * the vtable/mrgctx argument (for
5325 * static or generic methods) or the
5326 * "this" argument (for non-static
5327 * methods) are live.
5329 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5330 mini_method_get_context (method)->method_inst ||
5331 method->klass->valuetype) {
5332 mono_get_vtable_var (cfg);
5334 MonoInst *dummy_use;
5336 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5341 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5342 cfg->cbb = start_bblock;
5343 cfg->args = arg_array;
5344 mono_save_args (cfg, sig, inline_args);
5347 /* FIRST CODE BLOCK */
5348 NEW_BBLOCK (cfg, bblock);
5349 bblock->cil_code = ip;
5353 ADD_BBLOCK (cfg, bblock);
5355 if (cfg->method == method) {
5356 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5357 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5358 MONO_INST_NEW (cfg, ins, OP_BREAK);
5359 MONO_ADD_INS (bblock, ins);
5363 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5364 secman = mono_security_manager_get_methods ();
5366 security = (secman && mono_method_has_declsec (method));
5367 /* at this point having security doesn't mean we have any code to generate */
5368 if (security && (cfg->method == method)) {
5369 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5370 * And we do not want to enter the next section (with allocation) if we
5371 * have nothing to generate */
5372 security = mono_declsec_get_demands (method, &actions);
5375 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5376 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5378 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5379 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5380 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5382 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5383 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5387 mono_custom_attrs_free (custom);
5390 custom = mono_custom_attrs_from_class (wrapped->klass);
5391 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5395 mono_custom_attrs_free (custom);
5398 /* not a P/Invoke after all */
5403 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5404 /* we use a separate basic block for the initialization code */
5405 NEW_BBLOCK (cfg, init_localsbb);
5406 cfg->bb_init = init_localsbb;
5407 init_localsbb->real_offset = cfg->real_offset;
5408 start_bblock->next_bb = init_localsbb;
5409 init_localsbb->next_bb = bblock;
5410 link_bblock (cfg, start_bblock, init_localsbb);
5411 link_bblock (cfg, init_localsbb, bblock);
5413 cfg->cbb = init_localsbb;
5415 start_bblock->next_bb = bblock;
5416 link_bblock (cfg, start_bblock, bblock);
5419 /* at this point we know, if security is TRUE, that some code needs to be generated */
5420 if (security && (cfg->method == method)) {
5423 mono_jit_stats.cas_demand_generation++;
5425 if (actions.demand.blob) {
5426 /* Add code for SecurityAction.Demand */
5427 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5428 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5429 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5430 mono_emit_method_call (cfg, secman->demand, args, NULL);
5432 if (actions.noncasdemand.blob) {
5433 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5434 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5435 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5436 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5437 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5438 mono_emit_method_call (cfg, secman->demand, args, NULL);
5440 if (actions.demandchoice.blob) {
5441 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5442 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5443 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5444 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5445 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5449 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5451 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5454 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5455 /* check if this is native code, e.g. an icall or a p/invoke */
5456 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5457 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5459 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5460 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5462 /* if this ia a native call then it can only be JITted from platform code */
5463 if ((icall || pinvk) && method->klass && method->klass->image) {
5464 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5465 MonoException *ex = icall ? mono_get_exception_security () :
5466 mono_get_exception_method_access ();
5467 emit_throw_exception (cfg, ex);
5474 if (header->code_size == 0)
5477 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5482 if (cfg->method == method)
5483 mono_debug_init_method (cfg, bblock, breakpoint_id);
5485 for (n = 0; n < header->num_locals; ++n) {
5486 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5491 /* We force the vtable variable here for all shared methods
5492 for the possibility that they might show up in a stack
5493 trace where their exact instantiation is needed. */
5494 if (cfg->generic_sharing_context && method == cfg->method) {
5495 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5496 mini_method_get_context (method)->method_inst ||
5497 method->klass->valuetype) {
5498 mono_get_vtable_var (cfg);
5500 /* FIXME: Is there a better way to do this?
5501 We need the variable live for the duration
5502 of the whole method. */
5503 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5507 /* add a check for this != NULL to inlined methods */
5508 if (is_virtual_call) {
5511 NEW_ARGLOAD (cfg, arg_ins, 0);
5512 MONO_ADD_INS (cfg->cbb, arg_ins);
5513 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5516 skip_dead_blocks = !dont_verify;
5517 if (skip_dead_blocks) {
5518 original_bb = bb = mono_basic_block_split (method, &error);
5519 if (!mono_error_ok (&error)) {
5520 mono_error_cleanup (&error);
5526 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5527 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5530 start_new_bblock = 0;
5533 if (cfg->method == method)
5534 cfg->real_offset = ip - header->code;
5536 cfg->real_offset = inline_offset;
5541 if (start_new_bblock) {
5542 bblock->cil_length = ip - bblock->cil_code;
5543 if (start_new_bblock == 2) {
5544 g_assert (ip == tblock->cil_code);
5546 GET_BBLOCK (cfg, tblock, ip);
5548 bblock->next_bb = tblock;
5551 start_new_bblock = 0;
5552 for (i = 0; i < bblock->in_scount; ++i) {
5553 if (cfg->verbose_level > 3)
5554 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5555 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5559 g_slist_free (class_inits);
5562 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5563 link_bblock (cfg, bblock, tblock);
5564 if (sp != stack_start) {
5565 handle_stack_args (cfg, stack_start, sp - stack_start);
5567 CHECK_UNVERIFIABLE (cfg);
5569 bblock->next_bb = tblock;
5572 for (i = 0; i < bblock->in_scount; ++i) {
5573 if (cfg->verbose_level > 3)
5574 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5575 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5578 g_slist_free (class_inits);
5583 if (skip_dead_blocks) {
5584 int ip_offset = ip - header->code;
5586 if (ip_offset == bb->end)
5590 int op_size = mono_opcode_size (ip, end);
5591 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
5593 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
5595 if (ip_offset + op_size == bb->end) {
5596 MONO_INST_NEW (cfg, ins, OP_NOP);
5597 MONO_ADD_INS (bblock, ins);
5598 start_new_bblock = 1;
5606 * Sequence points are points where the debugger can place a breakpoint.
5607 * Currently, we generate these automatically at points where the IL
5610 if (seq_points && sp == stack_start) {
5611 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5612 MONO_ADD_INS (cfg->cbb, ins);
5615 bblock->real_offset = cfg->real_offset;
5617 if ((cfg->method == method) && cfg->coverage_info) {
5618 guint32 cil_offset = ip - header->code;
5619 cfg->coverage_info->data [cil_offset].cil_code = ip;
5621 /* TODO: Use an increment here */
5622 #if defined(TARGET_X86)
5623 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5624 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5626 MONO_ADD_INS (cfg->cbb, ins);
5628 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5629 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5633 if (cfg->verbose_level > 3)
5634 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5638 if (cfg->keep_cil_nops)
5639 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5641 MONO_INST_NEW (cfg, ins, OP_NOP);
5643 MONO_ADD_INS (bblock, ins);
5646 if (should_insert_brekpoint (cfg->method))
5647 MONO_INST_NEW (cfg, ins, OP_BREAK);
5649 MONO_INST_NEW (cfg, ins, OP_NOP);
5651 MONO_ADD_INS (bblock, ins);
5657 CHECK_STACK_OVF (1);
5658 n = (*ip)-CEE_LDARG_0;
5660 EMIT_NEW_ARGLOAD (cfg, ins, n);
5668 CHECK_STACK_OVF (1);
5669 n = (*ip)-CEE_LDLOC_0;
5671 EMIT_NEW_LOCLOAD (cfg, ins, n);
5680 n = (*ip)-CEE_STLOC_0;
5683 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5685 emit_stloc_ir (cfg, sp, header, n);
5692 CHECK_STACK_OVF (1);
5695 EMIT_NEW_ARGLOAD (cfg, ins, n);
5701 CHECK_STACK_OVF (1);
5704 NEW_ARGLOADA (cfg, ins, n);
5705 MONO_ADD_INS (cfg->cbb, ins);
5715 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5717 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5722 CHECK_STACK_OVF (1);
5725 EMIT_NEW_LOCLOAD (cfg, ins, n);
5729 case CEE_LDLOCA_S: {
5730 unsigned char *tmp_ip;
5732 CHECK_STACK_OVF (1);
5733 CHECK_LOCAL (ip [1]);
5735 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5741 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5750 CHECK_LOCAL (ip [1]);
5751 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5753 emit_stloc_ir (cfg, sp, header, ip [1]);
5758 CHECK_STACK_OVF (1);
5759 EMIT_NEW_PCONST (cfg, ins, NULL);
5760 ins->type = STACK_OBJ;
5765 CHECK_STACK_OVF (1);
5766 EMIT_NEW_ICONST (cfg, ins, -1);
5779 CHECK_STACK_OVF (1);
5780 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5786 CHECK_STACK_OVF (1);
5788 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5794 CHECK_STACK_OVF (1);
5795 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5801 CHECK_STACK_OVF (1);
5802 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5803 ins->type = STACK_I8;
5804 ins->dreg = alloc_dreg (cfg, STACK_I8);
5806 ins->inst_l = (gint64)read64 (ip);
5807 MONO_ADD_INS (bblock, ins);
5813 gboolean use_aotconst = FALSE;
5815 #ifdef TARGET_POWERPC
5816 /* FIXME: Clean this up */
5817 if (cfg->compile_aot)
5818 use_aotconst = TRUE;
5821 /* FIXME: we should really allocate this only late in the compilation process */
5822 f = mono_domain_alloc (cfg->domain, sizeof (float));
5824 CHECK_STACK_OVF (1);
5830 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
5832 dreg = alloc_freg (cfg);
5833 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
5834 ins->type = STACK_R8;
5836 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5837 ins->type = STACK_R8;
5838 ins->dreg = alloc_dreg (cfg, STACK_R8);
5840 MONO_ADD_INS (bblock, ins);
5850 gboolean use_aotconst = FALSE;
5852 #ifdef TARGET_POWERPC
5853 /* FIXME: Clean this up */
5854 if (cfg->compile_aot)
5855 use_aotconst = TRUE;
5858 /* FIXME: we should really allocate this only late in the compilation process */
5859 d = mono_domain_alloc (cfg->domain, sizeof (double));
5861 CHECK_STACK_OVF (1);
5867 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
5869 dreg = alloc_freg (cfg);
5870 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
5871 ins->type = STACK_R8;
5873 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5874 ins->type = STACK_R8;
5875 ins->dreg = alloc_dreg (cfg, STACK_R8);
5877 MONO_ADD_INS (bblock, ins);
5886 MonoInst *temp, *store;
5888 CHECK_STACK_OVF (1);
5892 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5893 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5895 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5898 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5911 if (sp [0]->type == STACK_R8)
5912 /* we need to pop the value from the x86 FP stack */
5913 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5922 if (stack_start != sp)
5924 token = read32 (ip + 1);
5925 /* FIXME: check the signature matches */
5926 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5931 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5932 GENERIC_SHARING_FAILURE (CEE_JMP);
5934 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5935 CHECK_CFG_EXCEPTION;
5937 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5939 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5942 /* Handle tail calls similarly to calls */
5943 n = fsig->param_count + fsig->hasthis;
5945 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5946 call->method = cmethod;
5947 call->tail_call = TRUE;
5948 call->signature = mono_method_signature (cmethod);
5949 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5950 call->inst.inst_p0 = cmethod;
5951 for (i = 0; i < n; ++i)
5952 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5954 mono_arch_emit_call (cfg, call);
5955 MONO_ADD_INS (bblock, (MonoInst*)call);
5958 for (i = 0; i < num_args; ++i)
5959 /* Prevent arguments from being optimized away */
5960 arg_array [i]->flags |= MONO_INST_VOLATILE;
5962 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5963 ins = (MonoInst*)call;
5964 ins->inst_p0 = cmethod;
5965 MONO_ADD_INS (bblock, ins);
5969 start_new_bblock = 1;
5974 case CEE_CALLVIRT: {
5975 MonoInst *addr = NULL;
5976 MonoMethodSignature *fsig = NULL;
5978 int virtual = *ip == CEE_CALLVIRT;
5979 int calli = *ip == CEE_CALLI;
5980 gboolean pass_imt_from_rgctx = FALSE;
5981 MonoInst *imt_arg = NULL;
5982 gboolean pass_vtable = FALSE;
5983 gboolean pass_mrgctx = FALSE;
5984 MonoInst *vtable_arg = NULL;
5985 gboolean check_this = FALSE;
5986 gboolean supported_tail_call = FALSE;
5989 token = read32 (ip + 1);
5996 if (method->wrapper_type != MONO_WRAPPER_NONE)
5997 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5999 fsig = mono_metadata_parse_signature (image, token);
6001 n = fsig->param_count + fsig->hasthis;
6003 if (method->dynamic && fsig->pinvoke) {
6007 * This is a call through a function pointer using a pinvoke
6008 * signature. Have to create a wrapper and call that instead.
6009 * FIXME: This is very slow, need to create a wrapper at JIT time
6010 * instead based on the signature.
6012 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6013 EMIT_NEW_PCONST (cfg, args [1], fsig);
6015 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6018 MonoMethod *cil_method;
6020 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6021 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6022 cil_method = cmethod;
6023 } else if (constrained_call) {
6024 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6026 * This is needed since get_method_constrained can't find
6027 * the method in klass representing a type var.
6028 * The type var is guaranteed to be a reference type in this
6031 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6032 cil_method = cmethod;
6033 g_assert (!cmethod->klass->valuetype);
6035 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6038 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6039 cil_method = cmethod;
6044 if (!dont_verify && !cfg->skip_visibility) {
6045 MonoMethod *target_method = cil_method;
6046 if (method->is_inflated) {
6047 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6049 if (!mono_method_can_access_method (method_definition, target_method) &&
6050 !mono_method_can_access_method (method, cil_method))
6051 METHOD_ACCESS_FAILURE;
6054 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6055 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6057 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6058 /* MS.NET seems to silently convert this to a callvirt */
6061 if (!cmethod->klass->inited)
6062 if (!mono_class_init (cmethod->klass))
6065 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6066 mini_class_is_system_array (cmethod->klass)) {
6067 array_rank = cmethod->klass->rank;
6068 fsig = mono_method_signature (cmethod);
6070 fsig = mono_method_signature (cmethod);
6075 if (fsig->pinvoke) {
6076 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6077 check_for_pending_exc, FALSE);
6078 fsig = mono_method_signature (wrapper);
6079 } else if (constrained_call) {
6080 fsig = mono_method_signature (cmethod);
6082 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6086 mono_save_token_info (cfg, image, token, cil_method);
6088 n = fsig->param_count + fsig->hasthis;
6090 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6091 if (check_linkdemand (cfg, method, cmethod))
6093 CHECK_CFG_EXCEPTION;
6096 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6097 g_assert_not_reached ();
6100 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6103 if (!cfg->generic_sharing_context && cmethod)
6104 g_assert (!mono_method_check_context_used (cmethod));
6108 //g_assert (!virtual || fsig->hasthis);
6112 if (constrained_call) {
6114 * We have the `constrained.' prefix opcode.
6116 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6118 * The type parameter is instantiated as a valuetype,
6119 * but that type doesn't override the method we're
6120 * calling, so we need to box `this'.
6122 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6123 ins->klass = constrained_call;
6124 sp [0] = handle_box (cfg, ins, constrained_call);
6125 CHECK_CFG_EXCEPTION;
6126 } else if (!constrained_call->valuetype) {
6127 int dreg = alloc_preg (cfg);
6130 * The type parameter is instantiated as a reference
6131 * type. We have a managed pointer on the stack, so
6132 * we need to dereference it here.
6134 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6135 ins->type = STACK_OBJ;
6137 } else if (cmethod->klass->valuetype)
6139 constrained_call = NULL;
6142 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6146 * If the callee is a shared method, then its static cctor
6147 * might not get called after the call was patched.
6149 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6150 emit_generic_class_init (cfg, cmethod->klass);
6151 CHECK_TYPELOAD (cmethod->klass);
6154 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6155 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6156 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6157 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6158 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6161 * Pass vtable iff target method might
6162 * be shared, which means that sharing
6163 * is enabled for its class and its
6164 * context is sharable (and it's not a
6167 if (sharing_enabled && context_sharable &&
6168 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6172 if (cmethod && mini_method_get_context (cmethod) &&
6173 mini_method_get_context (cmethod)->method_inst) {
6174 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6175 MonoGenericContext *context = mini_method_get_context (cmethod);
6176 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6178 g_assert (!pass_vtable);
6180 if (sharing_enabled && context_sharable)
6184 if (cfg->generic_sharing_context && cmethod) {
6185 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6187 context_used = mono_method_check_context_used (cmethod);
6189 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6190 /* Generic method interface
6191 calls are resolved via a
6192 helper function and don't
6194 if (!cmethod_context || !cmethod_context->method_inst)
6195 pass_imt_from_rgctx = TRUE;
6199 * If a shared method calls another
6200 * shared method then the caller must
6201 * have a generic sharing context
6202 * because the magic trampoline
6203 * requires it. FIXME: We shouldn't
6204 * have to force the vtable/mrgctx
6205 * variable here. Instead there
6206 * should be a flag in the cfg to
6207 * request a generic sharing context.
6210 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6211 mono_get_vtable_var (cfg);
6216 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6218 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6220 CHECK_TYPELOAD (cmethod->klass);
6221 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6226 g_assert (!vtable_arg);
6228 if (!cfg->compile_aot) {
6230 * emit_get_rgctx_method () calls mono_class_vtable () so check
6231 * for type load errors before.
6233 mono_class_setup_vtable (cmethod->klass);
6234 CHECK_TYPELOAD (cmethod->klass);
6237 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6239 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6240 MONO_METHOD_IS_FINAL (cmethod)) {
6247 if (pass_imt_from_rgctx) {
6248 g_assert (!pass_vtable);
6251 imt_arg = emit_get_rgctx_method (cfg, context_used,
6252 cmethod, MONO_RGCTX_INFO_METHOD);
6256 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6258 /* Calling virtual generic methods */
6259 if (cmethod && virtual &&
6260 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6261 !(MONO_METHOD_IS_FINAL (cmethod) &&
6262 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6263 mono_method_signature (cmethod)->generic_param_count) {
6264 MonoInst *this_temp, *this_arg_temp, *store;
6265 MonoInst *iargs [4];
6267 g_assert (mono_method_signature (cmethod)->is_inflated);
6269 /* Prevent inlining of methods that contain indirect calls */
6272 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6273 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6274 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6275 g_assert (!imt_arg);
6277 g_assert (cmethod->is_inflated);
6278 imt_arg = emit_get_rgctx_method (cfg, context_used,
6279 cmethod, MONO_RGCTX_INFO_METHOD);
6280 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6284 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6285 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6286 MONO_ADD_INS (bblock, store);
6288 /* FIXME: This should be a managed pointer */
6289 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6291 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6292 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6293 cmethod, MONO_RGCTX_INFO_METHOD);
6294 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6295 addr = mono_emit_jit_icall (cfg,
6296 mono_helper_compile_generic_method, iargs);
6298 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6300 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6303 if (!MONO_TYPE_IS_VOID (fsig->ret))
6304 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6306 CHECK_CFG_EXCEPTION;
6313 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6314 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6316 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6320 /* FIXME: runtime generic context pointer for jumps? */
6321 /* FIXME: handle this for generic sharing eventually */
6322 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6325 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6328 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6329 /* Handle tail calls similarly to calls */
6330 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6332 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6333 call->tail_call = TRUE;
6334 call->method = cmethod;
6335 call->signature = mono_method_signature (cmethod);
6338 * We implement tail calls by storing the actual arguments into the
6339 * argument variables, then emitting a CEE_JMP.
6341 for (i = 0; i < n; ++i) {
6342 /* Prevent argument from being register allocated */
6343 arg_array [i]->flags |= MONO_INST_VOLATILE;
6344 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6348 ins = (MonoInst*)call;
6349 ins->inst_p0 = cmethod;
6350 ins->inst_p1 = arg_array [0];
6351 MONO_ADD_INS (bblock, ins);
6352 link_bblock (cfg, bblock, end_bblock);
6353 start_new_bblock = 1;
6355 CHECK_CFG_EXCEPTION;
6357 /* skip CEE_RET as well */
6363 /* Conversion to a JIT intrinsic */
6364 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6365 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6366 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6371 CHECK_CFG_EXCEPTION;
6379 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6380 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6381 mono_method_check_inlining (cfg, cmethod) &&
6382 !g_list_find (dont_inline, cmethod)) {
6384 gboolean allways = FALSE;
6386 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6387 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6388 /* Prevent inlining of methods that call wrappers */
6390 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6394 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6396 cfg->real_offset += 5;
6399 if (!MONO_TYPE_IS_VOID (fsig->ret))
6400 /* *sp is already set by inline_method */
6403 inline_costs += costs;
6409 inline_costs += 10 * num_calls++;
6411 /* Tail recursion elimination */
6412 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6413 gboolean has_vtargs = FALSE;
6416 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6419 /* keep it simple */
6420 for (i = fsig->param_count - 1; i >= 0; i--) {
6421 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6426 for (i = 0; i < n; ++i)
6427 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6428 MONO_INST_NEW (cfg, ins, OP_BR);
6429 MONO_ADD_INS (bblock, ins);
6430 tblock = start_bblock->out_bb [0];
6431 link_bblock (cfg, bblock, tblock);
6432 ins->inst_target_bb = tblock;
6433 start_new_bblock = 1;
6435 /* skip the CEE_RET, too */
6436 if (ip_in_bb (cfg, bblock, ip + 5))
6446 /* Generic sharing */
6447 /* FIXME: only do this for generic methods if
6448 they are not shared! */
6449 if (context_used && !imt_arg && !array_rank &&
6450 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6451 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6452 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6453 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6456 g_assert (cfg->generic_sharing_context && cmethod);
6460 * We are compiling a call to a
6461 * generic method from shared code,
6462 * which means that we have to look up
6463 * the method in the rgctx and do an
6466 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6469 /* Indirect calls */
6471 g_assert (!imt_arg);
6473 if (*ip == CEE_CALL)
6474 g_assert (context_used);
6475 else if (*ip == CEE_CALLI)
6476 g_assert (!vtable_arg);
6478 /* FIXME: what the hell is this??? */
6479 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6480 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6482 /* Prevent inlining of methods with indirect calls */
6486 #ifdef MONO_ARCH_RGCTX_REG
6488 int rgctx_reg = mono_alloc_preg (cfg);
6490 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6491 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6492 call = (MonoCallInst*)ins;
6493 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6494 cfg->uses_rgctx_reg = TRUE;
6495 call->rgctx_reg = TRUE;
6500 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6502 * Instead of emitting an indirect call, emit a direct call
6503 * with the contents of the aotconst as the patch info.
6505 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6507 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6508 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6511 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6514 if (!MONO_TYPE_IS_VOID (fsig->ret))
6515 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6517 CHECK_CFG_EXCEPTION;
6528 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6529 if (sp [fsig->param_count]->type == STACK_OBJ) {
6530 MonoInst *iargs [2];
6533 iargs [1] = sp [fsig->param_count];
6535 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6538 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6539 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6540 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6541 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6543 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6546 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6547 if (!cmethod->klass->element_class->valuetype && !readonly)
6548 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6549 CHECK_TYPELOAD (cmethod->klass);
6552 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6555 g_assert_not_reached ();
6558 CHECK_CFG_EXCEPTION;
6565 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6567 if (!MONO_TYPE_IS_VOID (fsig->ret))
6568 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6570 CHECK_CFG_EXCEPTION;
6580 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6582 } else if (imt_arg) {
6583 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6585 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6588 if (!MONO_TYPE_IS_VOID (fsig->ret))
6589 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6591 CHECK_CFG_EXCEPTION;
6598 if (cfg->method != method) {
6599 /* return from inlined method */
6601 * If in_count == 0, that means the ret is unreachable due to
6602 * being preceeded by a throw. In that case, inline_method () will
6603 * handle setting the return value
6604 * (test case: test_0_inline_throw ()).
6606 if (return_var && cfg->cbb->in_count) {
6610 //g_assert (returnvar != -1);
6611 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6612 cfg->ret_var_set = TRUE;
6616 MonoType *ret_type = mono_method_signature (method)->ret;
6620 * Place a seq point here too even through the IL stack is not
6621 * empty, so a step over on
6624 * will work correctly.
6626 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6627 MONO_ADD_INS (cfg->cbb, ins);
6630 g_assert (!return_var);
6633 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6636 if (!cfg->vret_addr) {
6639 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6641 EMIT_NEW_RETLOADA (cfg, ret_addr);
6643 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6644 ins->klass = mono_class_from_mono_type (ret_type);
6647 #ifdef MONO_ARCH_SOFT_FLOAT
6648 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6649 MonoInst *iargs [1];
6653 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6654 mono_arch_emit_setret (cfg, method, conv);
6656 mono_arch_emit_setret (cfg, method, *sp);
6659 mono_arch_emit_setret (cfg, method, *sp);
6664 if (sp != stack_start)
6666 MONO_INST_NEW (cfg, ins, OP_BR);
6668 ins->inst_target_bb = end_bblock;
6669 MONO_ADD_INS (bblock, ins);
6670 link_bblock (cfg, bblock, end_bblock);
6671 start_new_bblock = 1;
6675 MONO_INST_NEW (cfg, ins, OP_BR);
6677 target = ip + 1 + (signed char)(*ip);
6679 GET_BBLOCK (cfg, tblock, target);
6680 link_bblock (cfg, bblock, tblock);
6681 ins->inst_target_bb = tblock;
6682 if (sp != stack_start) {
6683 handle_stack_args (cfg, stack_start, sp - stack_start);
6685 CHECK_UNVERIFIABLE (cfg);
6687 MONO_ADD_INS (bblock, ins);
6688 start_new_bblock = 1;
6689 inline_costs += BRANCH_COST;
6703 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6705 target = ip + 1 + *(signed char*)ip;
6711 inline_costs += BRANCH_COST;
6715 MONO_INST_NEW (cfg, ins, OP_BR);
6718 target = ip + 4 + (gint32)read32(ip);
6720 GET_BBLOCK (cfg, tblock, target);
6721 link_bblock (cfg, bblock, tblock);
6722 ins->inst_target_bb = tblock;
6723 if (sp != stack_start) {
6724 handle_stack_args (cfg, stack_start, sp - stack_start);
6726 CHECK_UNVERIFIABLE (cfg);
6729 MONO_ADD_INS (bblock, ins);
6731 start_new_bblock = 1;
6732 inline_costs += BRANCH_COST;
6739 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6740 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6741 guint32 opsize = is_short ? 1 : 4;
6743 CHECK_OPSIZE (opsize);
6745 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6748 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6753 GET_BBLOCK (cfg, tblock, target);
6754 link_bblock (cfg, bblock, tblock);
6755 GET_BBLOCK (cfg, tblock, ip);
6756 link_bblock (cfg, bblock, tblock);
6758 if (sp != stack_start) {
6759 handle_stack_args (cfg, stack_start, sp - stack_start);
6760 CHECK_UNVERIFIABLE (cfg);
6763 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6764 cmp->sreg1 = sp [0]->dreg;
6765 type_from_op (cmp, sp [0], NULL);
6768 #if SIZEOF_REGISTER == 4
6769 if (cmp->opcode == OP_LCOMPARE_IMM) {
6770 /* Convert it to OP_LCOMPARE */
6771 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6772 ins->type = STACK_I8;
6773 ins->dreg = alloc_dreg (cfg, STACK_I8);
6775 MONO_ADD_INS (bblock, ins);
6776 cmp->opcode = OP_LCOMPARE;
6777 cmp->sreg2 = ins->dreg;
6780 MONO_ADD_INS (bblock, cmp);
6782 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6783 type_from_op (ins, sp [0], NULL);
6784 MONO_ADD_INS (bblock, ins);
6785 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6786 GET_BBLOCK (cfg, tblock, target);
6787 ins->inst_true_bb = tblock;
6788 GET_BBLOCK (cfg, tblock, ip);
6789 ins->inst_false_bb = tblock;
6790 start_new_bblock = 2;
6793 inline_costs += BRANCH_COST;
6808 MONO_INST_NEW (cfg, ins, *ip);
6810 target = ip + 4 + (gint32)read32(ip);
6816 inline_costs += BRANCH_COST;
6820 MonoBasicBlock **targets;
6821 MonoBasicBlock *default_bblock;
6822 MonoJumpInfoBBTable *table;
6823 int offset_reg = alloc_preg (cfg);
6824 int target_reg = alloc_preg (cfg);
6825 int table_reg = alloc_preg (cfg);
6826 int sum_reg = alloc_preg (cfg);
6827 gboolean use_op_switch;
6831 n = read32 (ip + 1);
6834 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6838 CHECK_OPSIZE (n * sizeof (guint32));
6839 target = ip + n * sizeof (guint32);
6841 GET_BBLOCK (cfg, default_bblock, target);
6843 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6844 for (i = 0; i < n; ++i) {
6845 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6846 targets [i] = tblock;
6850 if (sp != stack_start) {
6852 * Link the current bb with the targets as well, so handle_stack_args
6853 * will set their in_stack correctly.
6855 link_bblock (cfg, bblock, default_bblock);
6856 for (i = 0; i < n; ++i)
6857 link_bblock (cfg, bblock, targets [i]);
6859 handle_stack_args (cfg, stack_start, sp - stack_start);
6861 CHECK_UNVERIFIABLE (cfg);
6864 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6865 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6868 for (i = 0; i < n; ++i)
6869 link_bblock (cfg, bblock, targets [i]);
6871 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6872 table->table = targets;
6873 table->table_size = n;
6875 use_op_switch = FALSE;
6877 /* ARM implements SWITCH statements differently */
6878 /* FIXME: Make it use the generic implementation */
6879 if (!cfg->compile_aot)
6880 use_op_switch = TRUE;
6883 if (COMPILE_LLVM (cfg))
6884 use_op_switch = TRUE;
6886 cfg->cbb->has_jump_table = 1;
6888 if (use_op_switch) {
6889 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6890 ins->sreg1 = src1->dreg;
6891 ins->inst_p0 = table;
6892 ins->inst_many_bb = targets;
6893 ins->klass = GUINT_TO_POINTER (n);
6894 MONO_ADD_INS (cfg->cbb, ins);
6896 if (sizeof (gpointer) == 8)
6897 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6899 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6901 #if SIZEOF_REGISTER == 8
6902 /* The upper word might not be zero, and we add it to a 64 bit address later */
6903 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6906 if (cfg->compile_aot) {
6907 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6909 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6910 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6911 ins->inst_p0 = table;
6912 ins->dreg = table_reg;
6913 MONO_ADD_INS (cfg->cbb, ins);
6916 /* FIXME: Use load_memindex */
6917 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6919 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6921 start_new_bblock = 1;
6922 inline_costs += (BRANCH_COST * 2);
6942 dreg = alloc_freg (cfg);
6945 dreg = alloc_lreg (cfg);
6948 dreg = alloc_preg (cfg);
6951 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6952 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6953 ins->flags |= ins_flag;
6955 MONO_ADD_INS (bblock, ins);
6970 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6971 ins->flags |= ins_flag;
6973 MONO_ADD_INS (bblock, ins);
6975 #if HAVE_WRITE_BARRIERS
6976 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6977 MonoInst *dummy_use;
6978 /* insert call to write barrier */
6979 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6980 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6981 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
6992 MONO_INST_NEW (cfg, ins, (*ip));
6994 ins->sreg1 = sp [0]->dreg;
6995 ins->sreg2 = sp [1]->dreg;
6996 type_from_op (ins, sp [0], sp [1]);
6998 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7000 /* Use the immediate opcodes if possible */
7001 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7002 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7003 if (imm_opcode != -1) {
7004 ins->opcode = imm_opcode;
7005 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7008 sp [1]->opcode = OP_NOP;
7012 MONO_ADD_INS ((cfg)->cbb, (ins));
7014 *sp++ = mono_decompose_opcode (cfg, ins);
7031 MONO_INST_NEW (cfg, ins, (*ip));
7033 ins->sreg1 = sp [0]->dreg;
7034 ins->sreg2 = sp [1]->dreg;
7035 type_from_op (ins, sp [0], sp [1]);
7037 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7038 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7040 /* FIXME: Pass opcode to is_inst_imm */
7042 /* Use the immediate opcodes if possible */
7043 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7046 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7047 if (imm_opcode != -1) {
7048 ins->opcode = imm_opcode;
7049 if (sp [1]->opcode == OP_I8CONST) {
7050 #if SIZEOF_REGISTER == 8
7051 ins->inst_imm = sp [1]->inst_l;
7053 ins->inst_ls_word = sp [1]->inst_ls_word;
7054 ins->inst_ms_word = sp [1]->inst_ms_word;
7058 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7061 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7062 if (sp [1]->next == NULL)
7063 sp [1]->opcode = OP_NOP;
7066 MONO_ADD_INS ((cfg)->cbb, (ins));
7068 *sp++ = mono_decompose_opcode (cfg, ins);
7081 case CEE_CONV_OVF_I8:
7082 case CEE_CONV_OVF_U8:
7086 /* Special case this earlier so we have long constants in the IR */
7087 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7088 int data = sp [-1]->inst_c0;
7089 sp [-1]->opcode = OP_I8CONST;
7090 sp [-1]->type = STACK_I8;
7091 #if SIZEOF_REGISTER == 8
7092 if ((*ip) == CEE_CONV_U8)
7093 sp [-1]->inst_c0 = (guint32)data;
7095 sp [-1]->inst_c0 = data;
7097 sp [-1]->inst_ls_word = data;
7098 if ((*ip) == CEE_CONV_U8)
7099 sp [-1]->inst_ms_word = 0;
7101 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7103 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7110 case CEE_CONV_OVF_I4:
7111 case CEE_CONV_OVF_I1:
7112 case CEE_CONV_OVF_I2:
7113 case CEE_CONV_OVF_I:
7114 case CEE_CONV_OVF_U:
7117 if (sp [-1]->type == STACK_R8) {
7118 ADD_UNOP (CEE_CONV_OVF_I8);
7125 case CEE_CONV_OVF_U1:
7126 case CEE_CONV_OVF_U2:
7127 case CEE_CONV_OVF_U4:
7130 if (sp [-1]->type == STACK_R8) {
7131 ADD_UNOP (CEE_CONV_OVF_U8);
7138 case CEE_CONV_OVF_I1_UN:
7139 case CEE_CONV_OVF_I2_UN:
7140 case CEE_CONV_OVF_I4_UN:
7141 case CEE_CONV_OVF_I8_UN:
7142 case CEE_CONV_OVF_U1_UN:
7143 case CEE_CONV_OVF_U2_UN:
7144 case CEE_CONV_OVF_U4_UN:
7145 case CEE_CONV_OVF_U8_UN:
7146 case CEE_CONV_OVF_I_UN:
7147 case CEE_CONV_OVF_U_UN:
7154 CHECK_CFG_EXCEPTION;
7158 case CEE_ADD_OVF_UN:
7160 case CEE_MUL_OVF_UN:
7162 case CEE_SUB_OVF_UN:
7170 token = read32 (ip + 1);
7171 klass = mini_get_class (method, token, generic_context);
7172 CHECK_TYPELOAD (klass);
7174 if (generic_class_is_reference_type (cfg, klass)) {
7175 MonoInst *store, *load;
7176 int dreg = alloc_preg (cfg);
7178 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7179 load->flags |= ins_flag;
7180 MONO_ADD_INS (cfg->cbb, load);
7182 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7183 store->flags |= ins_flag;
7184 MONO_ADD_INS (cfg->cbb, store);
7186 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7198 token = read32 (ip + 1);
7199 klass = mini_get_class (method, token, generic_context);
7200 CHECK_TYPELOAD (klass);
7202 /* Optimize the common ldobj+stloc combination */
7212 loc_index = ip [5] - CEE_STLOC_0;
7219 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7220 CHECK_LOCAL (loc_index);
7222 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7223 ins->dreg = cfg->locals [loc_index]->dreg;
7229 /* Optimize the ldobj+stobj combination */
7230 /* The reference case ends up being a load+store anyway */
7231 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7236 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7243 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7252 CHECK_STACK_OVF (1);
7254 n = read32 (ip + 1);
7256 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7257 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7258 ins->type = STACK_OBJ;
7261 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7262 MonoInst *iargs [1];
7264 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7265 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7267 if (cfg->opt & MONO_OPT_SHARED) {
7268 MonoInst *iargs [3];
7270 if (cfg->compile_aot) {
7271 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7273 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7274 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7275 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7276 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7277 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7279 if (bblock->out_of_line) {
7280 MonoInst *iargs [2];
7282 if (image == mono_defaults.corlib) {
7284 * Avoid relocations in AOT and save some space by using a
7285 * version of helper_ldstr specialized to mscorlib.
7287 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7288 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7290 /* Avoid creating the string object */
7291 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7292 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7293 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7297 if (cfg->compile_aot) {
7298 NEW_LDSTRCONST (cfg, ins, image, n);
7300 MONO_ADD_INS (bblock, ins);
7303 NEW_PCONST (cfg, ins, NULL);
7304 ins->type = STACK_OBJ;
7305 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7307 MONO_ADD_INS (bblock, ins);
7316 MonoInst *iargs [2];
7317 MonoMethodSignature *fsig;
7320 MonoInst *vtable_arg = NULL;
7323 token = read32 (ip + 1);
7324 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7327 fsig = mono_method_get_signature (cmethod, image, token);
7331 mono_save_token_info (cfg, image, token, cmethod);
7333 if (!mono_class_init (cmethod->klass))
7336 if (cfg->generic_sharing_context)
7337 context_used = mono_method_check_context_used (cmethod);
7339 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7340 if (check_linkdemand (cfg, method, cmethod))
7342 CHECK_CFG_EXCEPTION;
7343 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7344 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7347 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7348 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7349 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7350 mono_class_vtable (cfg->domain, cmethod->klass);
7351 CHECK_TYPELOAD (cmethod->klass);
7353 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7354 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7357 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7358 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7360 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7362 CHECK_TYPELOAD (cmethod->klass);
7363 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7368 n = fsig->param_count;
7372 * Generate smaller code for the common newobj <exception> instruction in
7373 * argument checking code.
7375 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7376 is_exception_class (cmethod->klass) && n <= 2 &&
7377 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7378 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7379 MonoInst *iargs [3];
7381 g_assert (!vtable_arg);
7385 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7388 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7392 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7397 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7400 g_assert_not_reached ();
7408 /* move the args to allow room for 'this' in the first position */
7414 /* check_call_signature () requires sp[0] to be set */
7415 this_ins.type = STACK_OBJ;
7417 if (check_call_signature (cfg, fsig, sp))
7422 if (mini_class_is_system_array (cmethod->klass)) {
7423 g_assert (!vtable_arg);
7425 *sp = emit_get_rgctx_method (cfg, context_used,
7426 cmethod, MONO_RGCTX_INFO_METHOD);
7428 /* Avoid varargs in the common case */
7429 if (fsig->param_count == 1)
7430 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7431 else if (fsig->param_count == 2)
7432 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7433 else if (fsig->param_count == 3)
7434 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7436 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7437 } else if (cmethod->string_ctor) {
7438 g_assert (!context_used);
7439 g_assert (!vtable_arg);
7440 /* we simply pass a null pointer */
7441 EMIT_NEW_PCONST (cfg, *sp, NULL);
7442 /* now call the string ctor */
7443 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7445 MonoInst* callvirt_this_arg = NULL;
7447 if (cmethod->klass->valuetype) {
7448 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7449 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7450 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7455 * The code generated by mini_emit_virtual_call () expects
7456 * iargs [0] to be a boxed instance, but luckily the vcall
7457 * will be transformed into a normal call there.
7459 } else if (context_used) {
7463 if (cfg->opt & MONO_OPT_SHARED)
7464 rgctx_info = MONO_RGCTX_INFO_KLASS;
7466 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7467 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7469 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7472 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7474 CHECK_TYPELOAD (cmethod->klass);
7477 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7478 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7479 * As a workaround, we call class cctors before allocating objects.
7481 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7482 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7483 if (cfg->verbose_level > 2)
7484 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7485 class_inits = g_slist_prepend (class_inits, vtable);
7488 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7491 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7494 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7496 /* Now call the actual ctor */
7497 /* Avoid virtual calls to ctors if possible */
7498 if (cmethod->klass->marshalbyref)
7499 callvirt_this_arg = sp [0];
7501 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7502 mono_method_check_inlining (cfg, cmethod) &&
7503 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7504 !g_list_find (dont_inline, cmethod)) {
7507 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7508 cfg->real_offset += 5;
7511 inline_costs += costs - 5;
7514 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7516 } else if (context_used &&
7517 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7518 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7519 MonoInst *cmethod_addr;
7521 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7522 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7524 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7527 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7528 callvirt_this_arg, NULL, vtable_arg);
7532 if (alloc == NULL) {
7534 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7535 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7549 token = read32 (ip + 1);
7550 klass = mini_get_class (method, token, generic_context);
7551 CHECK_TYPELOAD (klass);
7552 if (sp [0]->type != STACK_OBJ)
7555 if (cfg->generic_sharing_context)
7556 context_used = mono_class_check_context_used (klass);
7558 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7565 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7567 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7571 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7572 MonoMethod *mono_castclass;
7573 MonoInst *iargs [1];
7576 mono_castclass = mono_marshal_get_castclass (klass);
7579 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7580 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7581 g_assert (costs > 0);
7584 cfg->real_offset += 5;
7589 inline_costs += costs;
7592 ins = handle_castclass (cfg, klass, *sp, context_used);
7593 CHECK_CFG_EXCEPTION;
7603 token = read32 (ip + 1);
7604 klass = mini_get_class (method, token, generic_context);
7605 CHECK_TYPELOAD (klass);
7606 if (sp [0]->type != STACK_OBJ)
7609 if (cfg->generic_sharing_context)
7610 context_used = mono_class_check_context_used (klass);
7612 if (!context_used && mono_class_has_variant_generic_params (klass)) {
7619 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7621 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7625 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
7626 MonoMethod *mono_isinst;
7627 MonoInst *iargs [1];
7630 mono_isinst = mono_marshal_get_isinst (klass);
7633 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7634 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7635 g_assert (costs > 0);
7638 cfg->real_offset += 5;
7643 inline_costs += costs;
7646 ins = handle_isinst (cfg, klass, *sp, context_used);
7647 CHECK_CFG_EXCEPTION;
7654 case CEE_UNBOX_ANY: {
7658 token = read32 (ip + 1);
7659 klass = mini_get_class (method, token, generic_context);
7660 CHECK_TYPELOAD (klass);
7662 mono_save_token_info (cfg, image, token, klass);
7664 if (cfg->generic_sharing_context)
7665 context_used = mono_class_check_context_used (klass);
7667 if (generic_class_is_reference_type (cfg, klass)) {
7668 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7670 MonoInst *iargs [2];
7675 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7676 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7680 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7681 MonoMethod *mono_castclass;
7682 MonoInst *iargs [1];
7685 mono_castclass = mono_marshal_get_castclass (klass);
7688 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7689 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7691 g_assert (costs > 0);
7694 cfg->real_offset += 5;
7698 inline_costs += costs;
7700 ins = handle_castclass (cfg, klass, *sp, 0);
7701 CHECK_CFG_EXCEPTION;
7709 if (mono_class_is_nullable (klass)) {
7710 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7717 ins = handle_unbox (cfg, klass, sp, context_used);
7723 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7736 token = read32 (ip + 1);
7737 klass = mini_get_class (method, token, generic_context);
7738 CHECK_TYPELOAD (klass);
7740 mono_save_token_info (cfg, image, token, klass);
7742 if (cfg->generic_sharing_context)
7743 context_used = mono_class_check_context_used (klass);
7745 if (generic_class_is_reference_type (cfg, klass)) {
7751 if (klass == mono_defaults.void_class)
7753 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7755 /* frequent check in generic code: box (struct), brtrue */
7756 if (!mono_class_is_nullable (klass) &&
7757 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7758 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7760 MONO_INST_NEW (cfg, ins, OP_BR);
7761 if (*ip == CEE_BRTRUE_S) {
7764 target = ip + 1 + (signed char)(*ip);
7769 target = ip + 4 + (gint)(read32 (ip));
7772 GET_BBLOCK (cfg, tblock, target);
7773 link_bblock (cfg, bblock, tblock);
7774 ins->inst_target_bb = tblock;
7775 GET_BBLOCK (cfg, tblock, ip);
7777 * This leads to some inconsistency, since the two bblocks are
7778 * not really connected, but it is needed for handling stack
7779 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7780 * FIXME: This should only be needed if sp != stack_start, but that
7781 * doesn't work for some reason (test failure in mcs/tests on x86).
7783 link_bblock (cfg, bblock, tblock);
7784 if (sp != stack_start) {
7785 handle_stack_args (cfg, stack_start, sp - stack_start);
7787 CHECK_UNVERIFIABLE (cfg);
7789 MONO_ADD_INS (bblock, ins);
7790 start_new_bblock = 1;
7798 if (cfg->opt & MONO_OPT_SHARED)
7799 rgctx_info = MONO_RGCTX_INFO_KLASS;
7801 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7802 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7803 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7805 *sp++ = handle_box (cfg, val, klass);
7808 CHECK_CFG_EXCEPTION;
7817 token = read32 (ip + 1);
7818 klass = mini_get_class (method, token, generic_context);
7819 CHECK_TYPELOAD (klass);
7821 mono_save_token_info (cfg, image, token, klass);
7823 if (cfg->generic_sharing_context)
7824 context_used = mono_class_check_context_used (klass);
7826 if (mono_class_is_nullable (klass)) {
7829 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7830 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7834 ins = handle_unbox (cfg, klass, sp, context_used);
7844 MonoClassField *field;
7848 if (*ip == CEE_STFLD) {
7855 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7857 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7860 token = read32 (ip + 1);
7861 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7862 field = mono_method_get_wrapper_data (method, token);
7863 klass = field->parent;
7866 field = mono_field_from_token (image, token, &klass, generic_context);
7870 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7871 FIELD_ACCESS_FAILURE;
7872 mono_class_init (klass);
7874 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7875 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7876 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7877 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7880 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7881 if (*ip == CEE_STFLD) {
7882 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7884 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7885 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7886 MonoInst *iargs [5];
7889 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7890 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7891 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7895 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7896 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7897 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7898 g_assert (costs > 0);
7900 cfg->real_offset += 5;
7903 inline_costs += costs;
7905 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7910 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7912 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7914 #if HAVE_WRITE_BARRIERS
7915 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7916 /* insert call to write barrier */
7917 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7918 MonoInst *iargs [2], *dummy_use;
7921 dreg = alloc_preg (cfg);
7922 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7924 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7926 EMIT_NEW_DUMMY_USE (cfg, dummy_use, sp [1]);
7930 store->flags |= ins_flag;
7937 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7938 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7939 MonoInst *iargs [4];
7942 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7943 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7944 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7945 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7946 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7947 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7949 g_assert (costs > 0);
7951 cfg->real_offset += 5;
7955 inline_costs += costs;
7957 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7961 if (sp [0]->type == STACK_VTYPE) {
7964 /* Have to compute the address of the variable */
7966 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7968 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7970 g_assert (var->klass == klass);
7972 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7976 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
7978 if (*ip == CEE_LDFLDA) {
7979 dreg = alloc_preg (cfg);
7981 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7982 ins->klass = mono_class_from_mono_type (field->type);
7983 ins->type = STACK_MP;
7988 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7989 load->flags |= ins_flag;
7990 load->flags |= MONO_INST_FAULT;
8001 MonoClassField *field;
8002 gpointer addr = NULL;
8003 gboolean is_special_static;
8006 token = read32 (ip + 1);
8008 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8009 field = mono_method_get_wrapper_data (method, token);
8010 klass = field->parent;
8013 field = mono_field_from_token (image, token, &klass, generic_context);
8016 mono_class_init (klass);
8017 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8018 FIELD_ACCESS_FAILURE;
8020 /* if the class is Critical then transparent code cannot access it's fields */
8021 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8022 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8025 * We can only support shared generic static
8026 * field access on architectures where the
8027 * trampoline code has been extended to handle
8028 * the generic class init.
8030 #ifndef MONO_ARCH_VTABLE_REG
8031 GENERIC_SHARING_FAILURE (*ip);
8034 if (cfg->generic_sharing_context)
8035 context_used = mono_class_check_context_used (klass);
8037 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8039 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8040 * to be called here.
8042 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8043 mono_class_vtable (cfg->domain, klass);
8044 CHECK_TYPELOAD (klass);
8046 mono_domain_lock (cfg->domain);
8047 if (cfg->domain->special_static_fields)
8048 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8049 mono_domain_unlock (cfg->domain);
8051 is_special_static = mono_class_field_is_special_static (field);
8053 /* Generate IR to compute the field address */
8054 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8056 * Fast access to TLS data
8057 * Inline version of get_thread_static_data () in
8061 int idx, static_data_reg, array_reg, dreg;
8062 MonoInst *thread_ins;
8064 // offset &= 0x7fffffff;
8065 // idx = (offset >> 24) - 1;
8066 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8068 thread_ins = mono_get_thread_intrinsic (cfg);
8069 MONO_ADD_INS (cfg->cbb, thread_ins);
8070 static_data_reg = alloc_ireg (cfg);
8071 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8073 if (cfg->compile_aot) {
8074 int offset_reg, offset2_reg, idx_reg;
8076 /* For TLS variables, this will return the TLS offset */
8077 EMIT_NEW_SFLDACONST (cfg, ins, field);
8078 offset_reg = ins->dreg;
8079 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8080 idx_reg = alloc_ireg (cfg);
8081 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8083 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8084 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8085 array_reg = alloc_ireg (cfg);
8086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8087 offset2_reg = alloc_ireg (cfg);
8088 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8089 dreg = alloc_ireg (cfg);
8090 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8092 offset = (gsize)addr & 0x7fffffff;
8093 idx = (offset >> 24) - 1;
8095 array_reg = alloc_ireg (cfg);
8096 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8097 dreg = alloc_ireg (cfg);
8098 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8100 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8101 (cfg->compile_aot && is_special_static) ||
8102 (context_used && is_special_static)) {
8103 MonoInst *iargs [2];
8105 g_assert (field->parent);
8106 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8108 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8109 field, MONO_RGCTX_INFO_CLASS_FIELD);
8111 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8113 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8114 } else if (context_used) {
8115 MonoInst *static_data;
8118 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8119 method->klass->name_space, method->klass->name, method->name,
8120 depth, field->offset);
8123 if (mono_class_needs_cctor_run (klass, method)) {
8127 vtable = emit_get_rgctx_klass (cfg, context_used,
8128 klass, MONO_RGCTX_INFO_VTABLE);
8130 // FIXME: This doesn't work since it tries to pass the argument
8131 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8133 * The vtable pointer is always passed in a register regardless of
8134 * the calling convention, so assign it manually, and make a call
8135 * using a signature without parameters.
8137 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8138 #ifdef MONO_ARCH_VTABLE_REG
8139 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8140 cfg->uses_vtable_reg = TRUE;
8147 * The pointer we're computing here is
8149 * super_info.static_data + field->offset
8151 static_data = emit_get_rgctx_klass (cfg, context_used,
8152 klass, MONO_RGCTX_INFO_STATIC_DATA);
8154 if (field->offset == 0) {
8157 int addr_reg = mono_alloc_preg (cfg);
8158 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8160 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8161 MonoInst *iargs [2];
8163 g_assert (field->parent);
8164 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8165 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8166 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8168 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8170 CHECK_TYPELOAD (klass);
8172 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8173 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8174 if (cfg->verbose_level > 2)
8175 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8176 class_inits = g_slist_prepend (class_inits, vtable);
8178 if (cfg->run_cctors) {
8180 /* This makes so that inline cannot trigger */
8181 /* .cctors: too many apps depend on them */
8182 /* running with a specific order... */
8183 if (! vtable->initialized)
8185 ex = mono_runtime_class_init_full (vtable, FALSE);
8187 set_exception_object (cfg, ex);
8188 goto exception_exit;
8192 addr = (char*)vtable->data + field->offset;
8194 if (cfg->compile_aot)
8195 EMIT_NEW_SFLDACONST (cfg, ins, field);
8197 EMIT_NEW_PCONST (cfg, ins, addr);
8199 MonoInst *iargs [1];
8200 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8201 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8205 /* Generate IR to do the actual load/store operation */
8207 if (*ip == CEE_LDSFLDA) {
8208 ins->klass = mono_class_from_mono_type (field->type);
8209 ins->type = STACK_PTR;
8211 } else if (*ip == CEE_STSFLD) {
8216 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8217 store->flags |= ins_flag;
8219 gboolean is_const = FALSE;
8220 MonoVTable *vtable = NULL;
8222 if (!context_used) {
8223 vtable = mono_class_vtable (cfg->domain, klass);
8224 CHECK_TYPELOAD (klass);
8226 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8227 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8228 gpointer addr = (char*)vtable->data + field->offset;
8229 int ro_type = field->type->type;
8230 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8231 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8233 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8236 case MONO_TYPE_BOOLEAN:
8238 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8242 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8245 case MONO_TYPE_CHAR:
8247 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8251 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8256 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8260 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8263 #ifndef HAVE_MOVING_COLLECTOR
8266 case MONO_TYPE_STRING:
8267 case MONO_TYPE_OBJECT:
8268 case MONO_TYPE_CLASS:
8269 case MONO_TYPE_SZARRAY:
8271 case MONO_TYPE_FNPTR:
8272 case MONO_TYPE_ARRAY:
8273 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8274 type_to_eval_stack_type ((cfg), field->type, *sp);
8280 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8285 case MONO_TYPE_VALUETYPE:
8295 CHECK_STACK_OVF (1);
8297 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8298 load->flags |= ins_flag;
8311 token = read32 (ip + 1);
8312 klass = mini_get_class (method, token, generic_context);
8313 CHECK_TYPELOAD (klass);
8314 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8315 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8326 const char *data_ptr;
8328 guint32 field_token;
8334 token = read32 (ip + 1);
8336 klass = mini_get_class (method, token, generic_context);
8337 CHECK_TYPELOAD (klass);
8339 if (cfg->generic_sharing_context)
8340 context_used = mono_class_check_context_used (klass);
8342 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8343 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8344 ins->sreg1 = sp [0]->dreg;
8345 ins->type = STACK_I4;
8346 ins->dreg = alloc_ireg (cfg);
8347 MONO_ADD_INS (cfg->cbb, ins);
8348 *sp = mono_decompose_opcode (cfg, ins);
8353 MonoClass *array_class = mono_array_class_get (klass, 1);
8354 /* FIXME: we cannot get a managed
8355 allocator because we can't get the
8356 open generic class's vtable. We
8357 have the same problem in
8358 handle_alloc_from_inst(). This
8359 needs to be solved so that we can
8360 have managed allocs of shared
8363 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8364 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8366 MonoMethod *managed_alloc = NULL;
8368 /* FIXME: Decompose later to help abcrem */
8371 args [0] = emit_get_rgctx_klass (cfg, context_used,
8372 array_class, MONO_RGCTX_INFO_VTABLE);
8377 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8379 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8381 if (cfg->opt & MONO_OPT_SHARED) {
8382 /* Decompose now to avoid problems with references to the domainvar */
8383 MonoInst *iargs [3];
8385 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8386 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8389 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8391 /* Decompose later since it is needed by abcrem */
8392 MonoClass *array_type = mono_array_class_get (klass, 1);
8393 mono_class_vtable (cfg->domain, array_type);
8394 CHECK_TYPELOAD (array_type);
8396 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8397 ins->dreg = alloc_preg (cfg);
8398 ins->sreg1 = sp [0]->dreg;
8399 ins->inst_newa_class = klass;
8400 ins->type = STACK_OBJ;
8402 MONO_ADD_INS (cfg->cbb, ins);
8403 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8404 cfg->cbb->has_array_access = TRUE;
8406 /* Needed so mono_emit_load_get_addr () gets called */
8407 mono_get_got_var (cfg);
8417 * we inline/optimize the initialization sequence if possible.
8418 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8419 * for small sizes open code the memcpy
8420 * ensure the rva field is big enough
8422 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8423 MonoMethod *memcpy_method = get_memcpy_method ();
8424 MonoInst *iargs [3];
8425 int add_reg = alloc_preg (cfg);
8427 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8428 if (cfg->compile_aot) {
8429 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8431 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8433 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8434 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8443 if (sp [0]->type != STACK_OBJ)
8446 dreg = alloc_preg (cfg);
8447 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8448 ins->dreg = alloc_preg (cfg);
8449 ins->sreg1 = sp [0]->dreg;
8450 ins->type = STACK_I4;
8451 MONO_ADD_INS (cfg->cbb, ins);
8452 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8453 cfg->cbb->has_array_access = TRUE;
8461 if (sp [0]->type != STACK_OBJ)
8464 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8466 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8467 CHECK_TYPELOAD (klass);
8468 /* we need to make sure that this array is exactly the type it needs
8469 * to be for correctness. the wrappers are lax with their usage
8470 * so we need to ignore them here
8472 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8473 MonoClass *array_class = mono_array_class_get (klass, 1);
8474 mini_emit_check_array_type (cfg, sp [0], array_class);
8475 CHECK_TYPELOAD (array_class);
8479 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8494 case CEE_LDELEM_REF: {
8500 if (*ip == CEE_LDELEM) {
8502 token = read32 (ip + 1);
8503 klass = mini_get_class (method, token, generic_context);
8504 CHECK_TYPELOAD (klass);
8505 mono_class_init (klass);
8508 klass = array_access_to_klass (*ip);
8510 if (sp [0]->type != STACK_OBJ)
8513 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8515 if (sp [1]->opcode == OP_ICONST) {
8516 int array_reg = sp [0]->dreg;
8517 int index_reg = sp [1]->dreg;
8518 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8520 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8521 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8523 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8524 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8527 if (*ip == CEE_LDELEM)
8540 case CEE_STELEM_REF:
8547 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8549 if (*ip == CEE_STELEM) {
8551 token = read32 (ip + 1);
8552 klass = mini_get_class (method, token, generic_context);
8553 CHECK_TYPELOAD (klass);
8554 mono_class_init (klass);
8557 klass = array_access_to_klass (*ip);
8559 if (sp [0]->type != STACK_OBJ)
8562 /* storing a NULL doesn't need any of the complex checks in stelemref */
8563 if (generic_class_is_reference_type (cfg, klass) &&
8564 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8565 MonoMethod* helper = mono_marshal_get_stelemref ();
8566 MonoInst *iargs [3];
8568 if (sp [0]->type != STACK_OBJ)
8570 if (sp [2]->type != STACK_OBJ)
8577 mono_emit_method_call (cfg, helper, iargs, NULL);
8579 if (sp [1]->opcode == OP_ICONST) {
8580 int array_reg = sp [0]->dreg;
8581 int index_reg = sp [1]->dreg;
8582 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8584 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8585 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8587 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8588 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8592 if (*ip == CEE_STELEM)
8599 case CEE_CKFINITE: {
8603 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8604 ins->sreg1 = sp [0]->dreg;
8605 ins->dreg = alloc_freg (cfg);
8606 ins->type = STACK_R8;
8607 MONO_ADD_INS (bblock, ins);
8609 *sp++ = mono_decompose_opcode (cfg, ins);
8614 case CEE_REFANYVAL: {
8615 MonoInst *src_var, *src;
8617 int klass_reg = alloc_preg (cfg);
8618 int dreg = alloc_preg (cfg);
8621 MONO_INST_NEW (cfg, ins, *ip);
8624 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8625 CHECK_TYPELOAD (klass);
8626 mono_class_init (klass);
8628 if (cfg->generic_sharing_context)
8629 context_used = mono_class_check_context_used (klass);
8632 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8634 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8635 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8636 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8639 MonoInst *klass_ins;
8641 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8642 klass, MONO_RGCTX_INFO_KLASS);
8645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8646 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8648 mini_emit_class_check (cfg, klass_reg, klass);
8650 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8651 ins->type = STACK_MP;
8656 case CEE_MKREFANY: {
8657 MonoInst *loc, *addr;
8660 MONO_INST_NEW (cfg, ins, *ip);
8663 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8664 CHECK_TYPELOAD (klass);
8665 mono_class_init (klass);
8667 if (cfg->generic_sharing_context)
8668 context_used = mono_class_check_context_used (klass);
8670 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8671 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8674 MonoInst *const_ins;
8675 int type_reg = alloc_preg (cfg);
8677 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8679 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8680 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8681 } else if (cfg->compile_aot) {
8682 int const_reg = alloc_preg (cfg);
8683 int type_reg = alloc_preg (cfg);
8685 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8687 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8690 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8691 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8695 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8696 ins->type = STACK_VTYPE;
8697 ins->klass = mono_defaults.typed_reference_class;
8704 MonoClass *handle_class;
8706 CHECK_STACK_OVF (1);
8709 n = read32 (ip + 1);
8711 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8712 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8713 handle = mono_method_get_wrapper_data (method, n);
8714 handle_class = mono_method_get_wrapper_data (method, n + 1);
8715 if (handle_class == mono_defaults.typehandle_class)
8716 handle = &((MonoClass*)handle)->byval_arg;
8719 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8723 mono_class_init (handle_class);
8724 if (cfg->generic_sharing_context) {
8725 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8726 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8727 /* This case handles ldtoken
8728 of an open type, like for
8731 } else if (handle_class == mono_defaults.typehandle_class) {
8732 /* If we get a MONO_TYPE_CLASS
8733 then we need to provide the
8735 instantiation of it. */
8736 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8739 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8740 } else if (handle_class == mono_defaults.fieldhandle_class)
8741 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8742 else if (handle_class == mono_defaults.methodhandle_class)
8743 context_used = mono_method_check_context_used (handle);
8745 g_assert_not_reached ();
8748 if ((cfg->opt & MONO_OPT_SHARED) &&
8749 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8750 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8751 MonoInst *addr, *vtvar, *iargs [3];
8752 int method_context_used;
8754 if (cfg->generic_sharing_context)
8755 method_context_used = mono_method_check_context_used (method);
8757 method_context_used = 0;
8759 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8761 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8762 EMIT_NEW_ICONST (cfg, iargs [1], n);
8763 if (method_context_used) {
8764 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8765 method, MONO_RGCTX_INFO_METHOD);
8766 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8768 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8769 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8771 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8773 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8775 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8777 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8778 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8779 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8780 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8781 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8782 MonoClass *tclass = mono_class_from_mono_type (handle);
8784 mono_class_init (tclass);
8786 ins = emit_get_rgctx_klass (cfg, context_used,
8787 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8788 } else if (cfg->compile_aot) {
8789 if (method->wrapper_type) {
8790 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
8791 /* Special case for static synchronized wrappers */
8792 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
8794 /* FIXME: n is not a normal token */
8795 cfg->disable_aot = TRUE;
8796 EMIT_NEW_PCONST (cfg, ins, NULL);
8799 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8802 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8804 ins->type = STACK_OBJ;
8805 ins->klass = cmethod->klass;
8808 MonoInst *addr, *vtvar;
8810 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8813 if (handle_class == mono_defaults.typehandle_class) {
8814 ins = emit_get_rgctx_klass (cfg, context_used,
8815 mono_class_from_mono_type (handle),
8816 MONO_RGCTX_INFO_TYPE);
8817 } else if (handle_class == mono_defaults.methodhandle_class) {
8818 ins = emit_get_rgctx_method (cfg, context_used,
8819 handle, MONO_RGCTX_INFO_METHOD);
8820 } else if (handle_class == mono_defaults.fieldhandle_class) {
8821 ins = emit_get_rgctx_field (cfg, context_used,
8822 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8824 g_assert_not_reached ();
8826 } else if (cfg->compile_aot) {
8827 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8829 EMIT_NEW_PCONST (cfg, ins, handle);
8831 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8832 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8833 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8843 MONO_INST_NEW (cfg, ins, OP_THROW);
8845 ins->sreg1 = sp [0]->dreg;
8847 bblock->out_of_line = TRUE;
8848 MONO_ADD_INS (bblock, ins);
8849 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8850 MONO_ADD_INS (bblock, ins);
8853 link_bblock (cfg, bblock, end_bblock);
8854 start_new_bblock = 1;
8856 case CEE_ENDFINALLY:
8857 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8858 MONO_ADD_INS (bblock, ins);
8860 start_new_bblock = 1;
8863 * Control will leave the method so empty the stack, otherwise
8864 * the next basic block will start with a nonempty stack.
8866 while (sp != stack_start) {
8874 if (*ip == CEE_LEAVE) {
8876 target = ip + 5 + (gint32)read32(ip + 1);
8879 target = ip + 2 + (signed char)(ip [1]);
8882 /* empty the stack */
8883 while (sp != stack_start) {
8888 * If this leave statement is in a catch block, check for a
8889 * pending exception, and rethrow it if necessary.
8890 * We avoid doing this in runtime invoke wrappers, since those are called
8891 * by native code which excepts the wrapper to catch all exceptions.
8893 for (i = 0; i < header->num_clauses; ++i) {
8894 MonoExceptionClause *clause = &header->clauses [i];
8897 * Use <= in the final comparison to handle clauses with multiple
8898 * leave statements, like in bug #78024.
8899 * The ordering of the exception clauses guarantees that we find the
8902 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
8904 MonoBasicBlock *dont_throw;
8909 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8912 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8914 NEW_BBLOCK (cfg, dont_throw);
8917 * Currently, we allways rethrow the abort exception, despite the
8918 * fact that this is not correct. See thread6.cs for an example.
8919 * But propagating the abort exception is more important than
8920 * getting the sematics right.
8922 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8923 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8924 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8926 MONO_START_BB (cfg, dont_throw);
8931 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8933 MonoExceptionClause *clause;
8935 for (tmp = handlers; tmp; tmp = tmp->next) {
8937 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
8939 link_bblock (cfg, bblock, tblock);
8940 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8941 ins->inst_target_bb = tblock;
8942 ins->inst_eh_block = clause;
8943 MONO_ADD_INS (bblock, ins);
8944 bblock->has_call_handler = 1;
8945 if (COMPILE_LLVM (cfg)) {
8946 MonoBasicBlock *target_bb;
8949 * Link the finally bblock with the target, since it will
8950 * conceptually branch there.
8951 * FIXME: Have to link the bblock containing the endfinally.
8953 GET_BBLOCK (cfg, target_bb, target);
8954 link_bblock (cfg, tblock, target_bb);
8957 g_list_free (handlers);
8960 MONO_INST_NEW (cfg, ins, OP_BR);
8961 MONO_ADD_INS (bblock, ins);
8962 GET_BBLOCK (cfg, tblock, target);
8963 link_bblock (cfg, bblock, tblock);
8964 ins->inst_target_bb = tblock;
8965 start_new_bblock = 1;
8967 if (*ip == CEE_LEAVE)
8976 * Mono specific opcodes
8978 case MONO_CUSTOM_PREFIX: {
8980 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8984 case CEE_MONO_ICALL: {
8986 MonoJitICallInfo *info;
8988 token = read32 (ip + 2);
8989 func = mono_method_get_wrapper_data (method, token);
8990 info = mono_find_jit_icall_by_addr (func);
8993 CHECK_STACK (info->sig->param_count);
8994 sp -= info->sig->param_count;
8996 ins = mono_emit_jit_icall (cfg, info->func, sp);
8997 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9001 inline_costs += 10 * num_calls++;
9005 case CEE_MONO_LDPTR: {
9008 CHECK_STACK_OVF (1);
9010 token = read32 (ip + 2);
9012 ptr = mono_method_get_wrapper_data (method, token);
9013 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9014 MonoJitICallInfo *callinfo;
9015 const char *icall_name;
9017 icall_name = method->name + strlen ("__icall_wrapper_");
9018 g_assert (icall_name);
9019 callinfo = mono_find_jit_icall_by_name (icall_name);
9020 g_assert (callinfo);
9022 if (ptr == callinfo->func) {
9023 /* Will be transformed into an AOTCONST later */
9024 EMIT_NEW_PCONST (cfg, ins, ptr);
9030 /* FIXME: Generalize this */
9031 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9032 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9037 EMIT_NEW_PCONST (cfg, ins, ptr);
9040 inline_costs += 10 * num_calls++;
9041 /* Can't embed random pointers into AOT code */
9042 cfg->disable_aot = 1;
9045 case CEE_MONO_ICALL_ADDR: {
9046 MonoMethod *cmethod;
9049 CHECK_STACK_OVF (1);
9051 token = read32 (ip + 2);
9053 cmethod = mono_method_get_wrapper_data (method, token);
9055 if (cfg->compile_aot) {
9056 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9058 ptr = mono_lookup_internal_call (cmethod);
9060 EMIT_NEW_PCONST (cfg, ins, ptr);
9066 case CEE_MONO_VTADDR: {
9067 MonoInst *src_var, *src;
9073 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9074 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9079 case CEE_MONO_NEWOBJ: {
9080 MonoInst *iargs [2];
9082 CHECK_STACK_OVF (1);
9084 token = read32 (ip + 2);
9085 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9086 mono_class_init (klass);
9087 NEW_DOMAINCONST (cfg, iargs [0]);
9088 MONO_ADD_INS (cfg->cbb, iargs [0]);
9089 NEW_CLASSCONST (cfg, iargs [1], klass);
9090 MONO_ADD_INS (cfg->cbb, iargs [1]);
9091 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9093 inline_costs += 10 * num_calls++;
9096 case CEE_MONO_OBJADDR:
9099 MONO_INST_NEW (cfg, ins, OP_MOVE);
9100 ins->dreg = alloc_preg (cfg);
9101 ins->sreg1 = sp [0]->dreg;
9102 ins->type = STACK_MP;
9103 MONO_ADD_INS (cfg->cbb, ins);
9107 case CEE_MONO_LDNATIVEOBJ:
9109 * Similar to LDOBJ, but instead load the unmanaged
9110 * representation of the vtype to the stack.
9115 token = read32 (ip + 2);
9116 klass = mono_method_get_wrapper_data (method, token);
9117 g_assert (klass->valuetype);
9118 mono_class_init (klass);
9121 MonoInst *src, *dest, *temp;
9124 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9125 temp->backend.is_pinvoke = 1;
9126 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9127 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9129 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9130 dest->type = STACK_VTYPE;
9131 dest->klass = klass;
9137 case CEE_MONO_RETOBJ: {
9139 * Same as RET, but return the native representation of a vtype
9142 g_assert (cfg->ret);
9143 g_assert (mono_method_signature (method)->pinvoke);
9148 token = read32 (ip + 2);
9149 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9151 if (!cfg->vret_addr) {
9152 g_assert (cfg->ret_var_is_local);
9154 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9156 EMIT_NEW_RETLOADA (cfg, ins);
9158 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9160 if (sp != stack_start)
9163 MONO_INST_NEW (cfg, ins, OP_BR);
9164 ins->inst_target_bb = end_bblock;
9165 MONO_ADD_INS (bblock, ins);
9166 link_bblock (cfg, bblock, end_bblock);
9167 start_new_bblock = 1;
9171 case CEE_MONO_CISINST:
9172 case CEE_MONO_CCASTCLASS: {
9177 token = read32 (ip + 2);
9178 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9179 if (ip [1] == CEE_MONO_CISINST)
9180 ins = handle_cisinst (cfg, klass, sp [0]);
9182 ins = handle_ccastclass (cfg, klass, sp [0]);
9188 case CEE_MONO_SAVE_LMF:
9189 case CEE_MONO_RESTORE_LMF:
9190 #ifdef MONO_ARCH_HAVE_LMF_OPS
9191 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9192 MONO_ADD_INS (bblock, ins);
9193 cfg->need_lmf_area = TRUE;
9197 case CEE_MONO_CLASSCONST:
9198 CHECK_STACK_OVF (1);
9200 token = read32 (ip + 2);
9201 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9204 inline_costs += 10 * num_calls++;
9206 case CEE_MONO_NOT_TAKEN:
9207 bblock->out_of_line = TRUE;
9211 CHECK_STACK_OVF (1);
9213 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9214 ins->dreg = alloc_preg (cfg);
9215 ins->inst_offset = (gint32)read32 (ip + 2);
9216 ins->type = STACK_PTR;
9217 MONO_ADD_INS (bblock, ins);
9221 case CEE_MONO_DYN_CALL: {
9224 /* It would be easier to call a trampoline, but that would put an
9225 * extra frame on the stack, confusing exception handling. So
9226 * implement it inline using an opcode for now.
9229 if (!cfg->dyn_call_var) {
9230 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9231 /* prevent it from being register allocated */
9232 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9235 /* Has to use a call inst since it local regalloc expects it */
9236 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9237 ins = (MonoInst*)call;
9239 ins->sreg1 = sp [0]->dreg;
9240 ins->sreg2 = sp [1]->dreg;
9241 MONO_ADD_INS (bblock, ins);
9243 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9244 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9248 inline_costs += 10 * num_calls++;
9253 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9263 /* somewhat similar to LDTOKEN */
9264 MonoInst *addr, *vtvar;
9265 CHECK_STACK_OVF (1);
9266 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9268 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9269 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9271 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9272 ins->type = STACK_VTYPE;
9273 ins->klass = mono_defaults.argumenthandle_class;
9286 * The following transforms:
9287 * CEE_CEQ into OP_CEQ
9288 * CEE_CGT into OP_CGT
9289 * CEE_CGT_UN into OP_CGT_UN
9290 * CEE_CLT into OP_CLT
9291 * CEE_CLT_UN into OP_CLT_UN
9293 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9295 MONO_INST_NEW (cfg, ins, cmp->opcode);
9297 cmp->sreg1 = sp [0]->dreg;
9298 cmp->sreg2 = sp [1]->dreg;
9299 type_from_op (cmp, sp [0], sp [1]);
9301 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9302 cmp->opcode = OP_LCOMPARE;
9303 else if (sp [0]->type == STACK_R8)
9304 cmp->opcode = OP_FCOMPARE;
9306 cmp->opcode = OP_ICOMPARE;
9307 MONO_ADD_INS (bblock, cmp);
9308 ins->type = STACK_I4;
9309 ins->dreg = alloc_dreg (cfg, ins->type);
9310 type_from_op (ins, sp [0], sp [1]);
9312 if (cmp->opcode == OP_FCOMPARE) {
9314 * The backends expect the fceq opcodes to do the
9317 cmp->opcode = OP_NOP;
9318 ins->sreg1 = cmp->sreg1;
9319 ins->sreg2 = cmp->sreg2;
9321 MONO_ADD_INS (bblock, ins);
9328 MonoMethod *cil_method;
9329 gboolean needs_static_rgctx_invoke;
9331 CHECK_STACK_OVF (1);
9333 n = read32 (ip + 2);
9334 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9337 mono_class_init (cmethod->klass);
9339 mono_save_token_info (cfg, image, n, cmethod);
9341 if (cfg->generic_sharing_context)
9342 context_used = mono_method_check_context_used (cmethod);
9344 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9346 cil_method = cmethod;
9347 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9348 METHOD_ACCESS_FAILURE;
9350 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9351 if (check_linkdemand (cfg, method, cmethod))
9353 CHECK_CFG_EXCEPTION;
9354 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9355 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9359 * Optimize the common case of ldftn+delegate creation
9361 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9362 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9363 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9365 int invoke_context_used = 0;
9367 invoke = mono_get_delegate_invoke (ctor_method->klass);
9368 if (!invoke || !mono_method_signature (invoke))
9371 if (cfg->generic_sharing_context)
9372 invoke_context_used = mono_method_check_context_used (invoke);
9374 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9375 /* FIXME: SGEN support */
9376 if (invoke_context_used == 0) {
9377 MonoInst *target_ins;
9380 if (cfg->verbose_level > 3)
9381 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9382 target_ins = sp [-1];
9384 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9385 CHECK_CFG_EXCEPTION;
9394 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9395 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9399 inline_costs += 10 * num_calls++;
9402 case CEE_LDVIRTFTN: {
9407 n = read32 (ip + 2);
9408 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9411 mono_class_init (cmethod->klass);
9413 if (cfg->generic_sharing_context)
9414 context_used = mono_method_check_context_used (cmethod);
9416 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9417 if (check_linkdemand (cfg, method, cmethod))
9419 CHECK_CFG_EXCEPTION;
9420 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9421 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9427 args [1] = emit_get_rgctx_method (cfg, context_used,
9428 cmethod, MONO_RGCTX_INFO_METHOD);
9431 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9433 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9436 inline_costs += 10 * num_calls++;
9440 CHECK_STACK_OVF (1);
9442 n = read16 (ip + 2);
9444 EMIT_NEW_ARGLOAD (cfg, ins, n);
9449 CHECK_STACK_OVF (1);
9451 n = read16 (ip + 2);
9453 NEW_ARGLOADA (cfg, ins, n);
9454 MONO_ADD_INS (cfg->cbb, ins);
9462 n = read16 (ip + 2);
9464 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9466 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9470 CHECK_STACK_OVF (1);
9472 n = read16 (ip + 2);
9474 EMIT_NEW_LOCLOAD (cfg, ins, n);
9479 unsigned char *tmp_ip;
9480 CHECK_STACK_OVF (1);
9482 n = read16 (ip + 2);
9485 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9491 EMIT_NEW_LOCLOADA (cfg, ins, n);
9500 n = read16 (ip + 2);
9502 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9504 emit_stloc_ir (cfg, sp, header, n);
9511 if (sp != stack_start)
9513 if (cfg->method != method)
9515 * Inlining this into a loop in a parent could lead to
9516 * stack overflows which is different behavior than the
9517 * non-inlined case, thus disable inlining in this case.
9519 goto inline_failure;
9521 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9522 ins->dreg = alloc_preg (cfg);
9523 ins->sreg1 = sp [0]->dreg;
9524 ins->type = STACK_PTR;
9525 MONO_ADD_INS (cfg->cbb, ins);
9527 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9529 ins->flags |= MONO_INST_INIT;
9534 case CEE_ENDFILTER: {
9535 MonoExceptionClause *clause, *nearest;
9536 int cc, nearest_num;
9540 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9542 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9543 ins->sreg1 = (*sp)->dreg;
9544 MONO_ADD_INS (bblock, ins);
9545 start_new_bblock = 1;
9550 for (cc = 0; cc < header->num_clauses; ++cc) {
9551 clause = &header->clauses [cc];
9552 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9553 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9554 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9560 if ((ip - header->code) != nearest->handler_offset)
9565 case CEE_UNALIGNED_:
9566 ins_flag |= MONO_INST_UNALIGNED;
9567 /* FIXME: record alignment? we can assume 1 for now */
9572 ins_flag |= MONO_INST_VOLATILE;
9576 ins_flag |= MONO_INST_TAILCALL;
9577 cfg->flags |= MONO_CFG_HAS_TAIL;
9578 /* Can't inline tail calls at this time */
9579 inline_costs += 100000;
9586 token = read32 (ip + 2);
9587 klass = mini_get_class (method, token, generic_context);
9588 CHECK_TYPELOAD (klass);
9589 if (generic_class_is_reference_type (cfg, klass))
9590 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9592 mini_emit_initobj (cfg, *sp, NULL, klass);
9596 case CEE_CONSTRAINED_:
9598 token = read32 (ip + 2);
9599 if (method->wrapper_type != MONO_WRAPPER_NONE)
9600 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9602 constrained_call = mono_class_get_full (image, token, generic_context);
9603 CHECK_TYPELOAD (constrained_call);
9608 MonoInst *iargs [3];
9612 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9613 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9614 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9615 /* emit_memset only works when val == 0 */
9616 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9621 if (ip [1] == CEE_CPBLK) {
9622 MonoMethod *memcpy_method = get_memcpy_method ();
9623 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9625 MonoMethod *memset_method = get_memset_method ();
9626 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9636 ins_flag |= MONO_INST_NOTYPECHECK;
9638 ins_flag |= MONO_INST_NORANGECHECK;
9639 /* we ignore the no-nullcheck for now since we
9640 * really do it explicitly only when doing callvirt->call
9646 int handler_offset = -1;
9648 for (i = 0; i < header->num_clauses; ++i) {
9649 MonoExceptionClause *clause = &header->clauses [i];
9650 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9651 handler_offset = clause->handler_offset;
9656 bblock->flags |= BB_EXCEPTION_UNSAFE;
9658 g_assert (handler_offset != -1);
9660 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9661 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9662 ins->sreg1 = load->dreg;
9663 MONO_ADD_INS (bblock, ins);
9665 link_bblock (cfg, bblock, end_bblock);
9666 start_new_bblock = 1;
9674 CHECK_STACK_OVF (1);
9676 token = read32 (ip + 2);
9677 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic) {
9678 MonoType *type = mono_type_create_from_typespec (image, token);
9679 token = mono_type_size (type, &ialign);
9681 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9682 CHECK_TYPELOAD (klass);
9683 mono_class_init (klass);
9684 token = mono_class_value_size (klass, &align);
9686 EMIT_NEW_ICONST (cfg, ins, token);
9691 case CEE_REFANYTYPE: {
9692 MonoInst *src_var, *src;
9698 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9700 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9701 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9702 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9720 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9730 g_warning ("opcode 0x%02x not handled", *ip);
9734 if (start_new_bblock != 1)
9737 bblock->cil_length = ip - bblock->cil_code;
9738 bblock->next_bb = end_bblock;
9740 if (cfg->method == method && cfg->domainvar) {
9742 MonoInst *get_domain;
9744 cfg->cbb = init_localsbb;
9746 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9747 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9750 get_domain->dreg = alloc_preg (cfg);
9751 MONO_ADD_INS (cfg->cbb, get_domain);
9753 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9754 MONO_ADD_INS (cfg->cbb, store);
9757 #ifdef TARGET_POWERPC
9758 if (cfg->compile_aot)
9759 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
9760 mono_get_got_var (cfg);
9763 if (cfg->method == method && cfg->got_var)
9764 mono_emit_load_got_addr (cfg);
9769 cfg->cbb = init_localsbb;
9771 for (i = 0; i < header->num_locals; ++i) {
9772 MonoType *ptype = header->locals [i];
9773 int t = ptype->type;
9774 dreg = cfg->locals [i]->dreg;
9776 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9777 t = mono_class_enum_basetype (ptype->data.klass)->type;
9779 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9780 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9781 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9782 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9783 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9784 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9785 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9786 ins->type = STACK_R8;
9787 ins->inst_p0 = (void*)&r8_0;
9788 ins->dreg = alloc_dreg (cfg, STACK_R8);
9789 MONO_ADD_INS (init_localsbb, ins);
9790 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9791 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9792 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9793 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9795 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9800 if (cfg->init_ref_vars && cfg->method == method) {
9801 /* Emit initialization for ref vars */
9802 // FIXME: Avoid duplication initialization for IL locals.
9803 for (i = 0; i < cfg->num_varinfo; ++i) {
9804 MonoInst *ins = cfg->varinfo [i];
9806 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
9807 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
9811 /* Add a sequence point for method entry/exit events */
9813 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
9814 MONO_ADD_INS (init_localsbb, ins);
9815 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
9816 MONO_ADD_INS (cfg->bb_exit, ins);
9821 if (cfg->method == method) {
9823 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9824 bb->region = mono_find_block_region (cfg, bb->real_offset);
9826 mono_create_spvar_for_region (cfg, bb->region);
9827 if (cfg->verbose_level > 2)
9828 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9832 g_slist_free (class_inits);
9833 dont_inline = g_list_remove (dont_inline, method);
9835 if (inline_costs < 0) {
9838 /* Method is too large */
9839 mname = mono_method_full_name (method, TRUE);
9840 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9841 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9843 mono_metadata_free_mh (header);
9844 mono_basic_block_free (original_bb);
9848 if ((cfg->verbose_level > 2) && (cfg->method == method))
9849 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9851 mono_metadata_free_mh (header);
9852 mono_basic_block_free (original_bb);
9853 return inline_costs;
9856 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9863 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9867 set_exception_type_from_invalid_il (cfg, method, ip);
9871 g_slist_free (class_inits);
9872 mono_basic_block_free (original_bb);
9873 dont_inline = g_list_remove (dont_inline, method);
9874 mono_metadata_free_mh (header);
9879 store_membase_reg_to_store_membase_imm (int opcode)
9882 case OP_STORE_MEMBASE_REG:
9883 return OP_STORE_MEMBASE_IMM;
9884 case OP_STOREI1_MEMBASE_REG:
9885 return OP_STOREI1_MEMBASE_IMM;
9886 case OP_STOREI2_MEMBASE_REG:
9887 return OP_STOREI2_MEMBASE_IMM;
9888 case OP_STOREI4_MEMBASE_REG:
9889 return OP_STOREI4_MEMBASE_IMM;
9890 case OP_STOREI8_MEMBASE_REG:
9891 return OP_STOREI8_MEMBASE_IMM;
9893 g_assert_not_reached ();
9899 #endif /* DISABLE_JIT */
9902 mono_op_to_op_imm (int opcode)
9912 return OP_IDIV_UN_IMM;
9916 return OP_IREM_UN_IMM;
9930 return OP_ISHR_UN_IMM;
9947 return OP_LSHR_UN_IMM;
9950 return OP_COMPARE_IMM;
9952 return OP_ICOMPARE_IMM;
9954 return OP_LCOMPARE_IMM;
9956 case OP_STORE_MEMBASE_REG:
9957 return OP_STORE_MEMBASE_IMM;
9958 case OP_STOREI1_MEMBASE_REG:
9959 return OP_STOREI1_MEMBASE_IMM;
9960 case OP_STOREI2_MEMBASE_REG:
9961 return OP_STOREI2_MEMBASE_IMM;
9962 case OP_STOREI4_MEMBASE_REG:
9963 return OP_STOREI4_MEMBASE_IMM;
9965 #if defined(TARGET_X86) || defined (TARGET_AMD64)
9967 return OP_X86_PUSH_IMM;
9968 case OP_X86_COMPARE_MEMBASE_REG:
9969 return OP_X86_COMPARE_MEMBASE_IMM;
9971 #if defined(TARGET_AMD64)
9972 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9973 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9975 case OP_VOIDCALL_REG:
9984 return OP_LOCALLOC_IMM;
9991 ldind_to_load_membase (int opcode)
9995 return OP_LOADI1_MEMBASE;
9997 return OP_LOADU1_MEMBASE;
9999 return OP_LOADI2_MEMBASE;
10001 return OP_LOADU2_MEMBASE;
10003 return OP_LOADI4_MEMBASE;
10005 return OP_LOADU4_MEMBASE;
10007 return OP_LOAD_MEMBASE;
10008 case CEE_LDIND_REF:
10009 return OP_LOAD_MEMBASE;
10011 return OP_LOADI8_MEMBASE;
10013 return OP_LOADR4_MEMBASE;
10015 return OP_LOADR8_MEMBASE;
10017 g_assert_not_reached ();
10024 stind_to_store_membase (int opcode)
10028 return OP_STOREI1_MEMBASE_REG;
10030 return OP_STOREI2_MEMBASE_REG;
10032 return OP_STOREI4_MEMBASE_REG;
10034 case CEE_STIND_REF:
10035 return OP_STORE_MEMBASE_REG;
10037 return OP_STOREI8_MEMBASE_REG;
10039 return OP_STORER4_MEMBASE_REG;
10041 return OP_STORER8_MEMBASE_REG;
10043 g_assert_not_reached ();
10050 mono_load_membase_to_load_mem (int opcode)
10052 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10053 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10055 case OP_LOAD_MEMBASE:
10056 return OP_LOAD_MEM;
10057 case OP_LOADU1_MEMBASE:
10058 return OP_LOADU1_MEM;
10059 case OP_LOADU2_MEMBASE:
10060 return OP_LOADU2_MEM;
10061 case OP_LOADI4_MEMBASE:
10062 return OP_LOADI4_MEM;
10063 case OP_LOADU4_MEMBASE:
10064 return OP_LOADU4_MEM;
10065 #if SIZEOF_REGISTER == 8
10066 case OP_LOADI8_MEMBASE:
10067 return OP_LOADI8_MEM;
10076 op_to_op_dest_membase (int store_opcode, int opcode)
10078 #if defined(TARGET_X86)
10079 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10084 return OP_X86_ADD_MEMBASE_REG;
10086 return OP_X86_SUB_MEMBASE_REG;
10088 return OP_X86_AND_MEMBASE_REG;
10090 return OP_X86_OR_MEMBASE_REG;
10092 return OP_X86_XOR_MEMBASE_REG;
10095 return OP_X86_ADD_MEMBASE_IMM;
10098 return OP_X86_SUB_MEMBASE_IMM;
10101 return OP_X86_AND_MEMBASE_IMM;
10104 return OP_X86_OR_MEMBASE_IMM;
10107 return OP_X86_XOR_MEMBASE_IMM;
10113 #if defined(TARGET_AMD64)
10114 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10119 return OP_X86_ADD_MEMBASE_REG;
10121 return OP_X86_SUB_MEMBASE_REG;
10123 return OP_X86_AND_MEMBASE_REG;
10125 return OP_X86_OR_MEMBASE_REG;
10127 return OP_X86_XOR_MEMBASE_REG;
10129 return OP_X86_ADD_MEMBASE_IMM;
10131 return OP_X86_SUB_MEMBASE_IMM;
10133 return OP_X86_AND_MEMBASE_IMM;
10135 return OP_X86_OR_MEMBASE_IMM;
10137 return OP_X86_XOR_MEMBASE_IMM;
10139 return OP_AMD64_ADD_MEMBASE_REG;
10141 return OP_AMD64_SUB_MEMBASE_REG;
10143 return OP_AMD64_AND_MEMBASE_REG;
10145 return OP_AMD64_OR_MEMBASE_REG;
10147 return OP_AMD64_XOR_MEMBASE_REG;
10150 return OP_AMD64_ADD_MEMBASE_IMM;
10153 return OP_AMD64_SUB_MEMBASE_IMM;
10156 return OP_AMD64_AND_MEMBASE_IMM;
10159 return OP_AMD64_OR_MEMBASE_IMM;
10162 return OP_AMD64_XOR_MEMBASE_IMM;
10172 op_to_op_store_membase (int store_opcode, int opcode)
10174 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10177 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10178 return OP_X86_SETEQ_MEMBASE;
10180 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10181 return OP_X86_SETNE_MEMBASE;
10189 op_to_op_src1_membase (int load_opcode, int opcode)
10192 /* FIXME: This has sign extension issues */
10194 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10195 return OP_X86_COMPARE_MEMBASE8_IMM;
10198 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10203 return OP_X86_PUSH_MEMBASE;
10204 case OP_COMPARE_IMM:
10205 case OP_ICOMPARE_IMM:
10206 return OP_X86_COMPARE_MEMBASE_IMM;
10209 return OP_X86_COMPARE_MEMBASE_REG;
10213 #ifdef TARGET_AMD64
10214 /* FIXME: This has sign extension issues */
10216 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10217 return OP_X86_COMPARE_MEMBASE8_IMM;
10222 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10223 return OP_X86_PUSH_MEMBASE;
10225 /* FIXME: This only works for 32 bit immediates
10226 case OP_COMPARE_IMM:
10227 case OP_LCOMPARE_IMM:
10228 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10229 return OP_AMD64_COMPARE_MEMBASE_IMM;
10231 case OP_ICOMPARE_IMM:
10232 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10233 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10237 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10238 return OP_AMD64_COMPARE_MEMBASE_REG;
10241 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10242 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10251 op_to_op_src2_membase (int load_opcode, int opcode)
10254 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10260 return OP_X86_COMPARE_REG_MEMBASE;
10262 return OP_X86_ADD_REG_MEMBASE;
10264 return OP_X86_SUB_REG_MEMBASE;
10266 return OP_X86_AND_REG_MEMBASE;
10268 return OP_X86_OR_REG_MEMBASE;
10270 return OP_X86_XOR_REG_MEMBASE;
10274 #ifdef TARGET_AMD64
10277 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10278 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10282 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10283 return OP_AMD64_COMPARE_REG_MEMBASE;
10286 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10287 return OP_X86_ADD_REG_MEMBASE;
10289 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10290 return OP_X86_SUB_REG_MEMBASE;
10292 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10293 return OP_X86_AND_REG_MEMBASE;
10295 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10296 return OP_X86_OR_REG_MEMBASE;
10298 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10299 return OP_X86_XOR_REG_MEMBASE;
10301 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10302 return OP_AMD64_ADD_REG_MEMBASE;
10304 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10305 return OP_AMD64_SUB_REG_MEMBASE;
10307 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10308 return OP_AMD64_AND_REG_MEMBASE;
10310 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10311 return OP_AMD64_OR_REG_MEMBASE;
10313 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10314 return OP_AMD64_XOR_REG_MEMBASE;
10322 mono_op_to_op_imm_noemul (int opcode)
10325 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10330 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10338 return mono_op_to_op_imm (opcode);
10342 #ifndef DISABLE_JIT
10345 * mono_handle_global_vregs:
10347 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10351 mono_handle_global_vregs (MonoCompile *cfg)
10353 gint32 *vreg_to_bb;
10354 MonoBasicBlock *bb;
10357 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10359 #ifdef MONO_ARCH_SIMD_INTRINSICS
10360 if (cfg->uses_simd_intrinsics)
10361 mono_simd_simplify_indirection (cfg);
10364 /* Find local vregs used in more than one bb */
10365 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10366 MonoInst *ins = bb->code;
10367 int block_num = bb->block_num;
10369 if (cfg->verbose_level > 2)
10370 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10373 for (; ins; ins = ins->next) {
10374 const char *spec = INS_INFO (ins->opcode);
10375 int regtype = 0, regindex;
10378 if (G_UNLIKELY (cfg->verbose_level > 2))
10379 mono_print_ins (ins);
10381 g_assert (ins->opcode >= MONO_CEE_LAST);
10383 for (regindex = 0; regindex < 4; regindex ++) {
10386 if (regindex == 0) {
10387 regtype = spec [MONO_INST_DEST];
10388 if (regtype == ' ')
10391 } else if (regindex == 1) {
10392 regtype = spec [MONO_INST_SRC1];
10393 if (regtype == ' ')
10396 } else if (regindex == 2) {
10397 regtype = spec [MONO_INST_SRC2];
10398 if (regtype == ' ')
10401 } else if (regindex == 3) {
10402 regtype = spec [MONO_INST_SRC3];
10403 if (regtype == ' ')
10408 #if SIZEOF_REGISTER == 4
10409 /* In the LLVM case, the long opcodes are not decomposed */
10410 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10412 * Since some instructions reference the original long vreg,
10413 * and some reference the two component vregs, it is quite hard
10414 * to determine when it needs to be global. So be conservative.
10416 if (!get_vreg_to_inst (cfg, vreg)) {
10417 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10419 if (cfg->verbose_level > 2)
10420 printf ("LONG VREG R%d made global.\n", vreg);
10424 * Make the component vregs volatile since the optimizations can
10425 * get confused otherwise.
10427 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10428 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10432 g_assert (vreg != -1);
10434 prev_bb = vreg_to_bb [vreg];
10435 if (prev_bb == 0) {
10436 /* 0 is a valid block num */
10437 vreg_to_bb [vreg] = block_num + 1;
10438 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10439 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10442 if (!get_vreg_to_inst (cfg, vreg)) {
10443 if (G_UNLIKELY (cfg->verbose_level > 2))
10444 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10448 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10451 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10454 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10457 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10460 g_assert_not_reached ();
10464 /* Flag as having been used in more than one bb */
10465 vreg_to_bb [vreg] = -1;
10471 /* If a variable is used in only one bblock, convert it into a local vreg */
10472 for (i = 0; i < cfg->num_varinfo; i++) {
10473 MonoInst *var = cfg->varinfo [i];
10474 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10476 switch (var->type) {
10482 #if SIZEOF_REGISTER == 8
10485 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10486 /* Enabling this screws up the fp stack on x86 */
10489 /* Arguments are implicitly global */
10490 /* Putting R4 vars into registers doesn't work currently */
10491 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10493 * Make that the variable's liveness interval doesn't contain a call, since
10494 * that would cause the lvreg to be spilled, making the whole optimization
10497 /* This is too slow for JIT compilation */
10499 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10501 int def_index, call_index, ins_index;
10502 gboolean spilled = FALSE;
10507 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10508 const char *spec = INS_INFO (ins->opcode);
10510 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10511 def_index = ins_index;
10513 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10514 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10515 if (call_index > def_index) {
10521 if (MONO_IS_CALL (ins))
10522 call_index = ins_index;
10532 if (G_UNLIKELY (cfg->verbose_level > 2))
10533 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10534 var->flags |= MONO_INST_IS_DEAD;
10535 cfg->vreg_to_inst [var->dreg] = NULL;
10542 * Compress the varinfo and vars tables so the liveness computation is faster and
10543 * takes up less space.
10546 for (i = 0; i < cfg->num_varinfo; ++i) {
10547 MonoInst *var = cfg->varinfo [i];
10548 if (pos < i && cfg->locals_start == i)
10549 cfg->locals_start = pos;
10550 if (!(var->flags & MONO_INST_IS_DEAD)) {
10552 cfg->varinfo [pos] = cfg->varinfo [i];
10553 cfg->varinfo [pos]->inst_c0 = pos;
10554 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10555 cfg->vars [pos].idx = pos;
10556 #if SIZEOF_REGISTER == 4
10557 if (cfg->varinfo [pos]->type == STACK_I8) {
10558 /* Modify the two component vars too */
10561 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10562 var1->inst_c0 = pos;
10563 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10564 var1->inst_c0 = pos;
10571 cfg->num_varinfo = pos;
10572 if (cfg->locals_start > cfg->num_varinfo)
10573 cfg->locals_start = cfg->num_varinfo;
10577 * mono_spill_global_vars:
10579 * Generate spill code for variables which are not allocated to registers,
10580 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10581 * code is generated which could be optimized by the local optimization passes.
10584 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10586 MonoBasicBlock *bb;
10588 int orig_next_vreg;
10589 guint32 *vreg_to_lvreg;
10591 guint32 i, lvregs_len;
10592 gboolean dest_has_lvreg = FALSE;
10593 guint32 stacktypes [128];
10594 MonoInst **live_range_start, **live_range_end;
10595 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10597 *need_local_opts = FALSE;
10599 memset (spec2, 0, sizeof (spec2));
10601 /* FIXME: Move this function to mini.c */
10602 stacktypes ['i'] = STACK_PTR;
10603 stacktypes ['l'] = STACK_I8;
10604 stacktypes ['f'] = STACK_R8;
10605 #ifdef MONO_ARCH_SIMD_INTRINSICS
10606 stacktypes ['x'] = STACK_VTYPE;
10609 #if SIZEOF_REGISTER == 4
10610 /* Create MonoInsts for longs */
10611 for (i = 0; i < cfg->num_varinfo; i++) {
10612 MonoInst *ins = cfg->varinfo [i];
10614 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10615 switch (ins->type) {
10620 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
10623 g_assert (ins->opcode == OP_REGOFFSET);
10625 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10627 tree->opcode = OP_REGOFFSET;
10628 tree->inst_basereg = ins->inst_basereg;
10629 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10631 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10633 tree->opcode = OP_REGOFFSET;
10634 tree->inst_basereg = ins->inst_basereg;
10635 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10645 /* FIXME: widening and truncation */
10648 * As an optimization, when a variable allocated to the stack is first loaded into
10649 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10650 * the variable again.
10652 orig_next_vreg = cfg->next_vreg;
10653 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10654 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10658 * These arrays contain the first and last instructions accessing a given
10660 * Since we emit bblocks in the same order we process them here, and we
10661 * don't split live ranges, these will precisely describe the live range of
10662 * the variable, i.e. the instruction range where a valid value can be found
10663 * in the variables location.
10664 * The live range is computed using the liveness info computed by the liveness pass.
10665 * We can't use vmv->range, since that is an abstract live range, and we need
10666 * one which is instruction precise.
10667 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
10669 /* FIXME: Only do this if debugging info is requested */
10670 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10671 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10672 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10673 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10675 /* Add spill loads/stores */
10676 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10679 if (cfg->verbose_level > 2)
10680 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10682 /* Clear vreg_to_lvreg array */
10683 for (i = 0; i < lvregs_len; i++)
10684 vreg_to_lvreg [lvregs [i]] = 0;
10688 MONO_BB_FOR_EACH_INS (bb, ins) {
10689 const char *spec = INS_INFO (ins->opcode);
10690 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10691 gboolean store, no_lvreg;
10692 int sregs [MONO_MAX_SRC_REGS];
10694 if (G_UNLIKELY (cfg->verbose_level > 2))
10695 mono_print_ins (ins);
10697 if (ins->opcode == OP_NOP)
10701 * We handle LDADDR here as well, since it can only be decomposed
10702 * when variable addresses are known.
10704 if (ins->opcode == OP_LDADDR) {
10705 MonoInst *var = ins->inst_p0;
10707 if (var->opcode == OP_VTARG_ADDR) {
10708 /* Happens on SPARC/S390 where vtypes are passed by reference */
10709 MonoInst *vtaddr = var->inst_left;
10710 if (vtaddr->opcode == OP_REGVAR) {
10711 ins->opcode = OP_MOVE;
10712 ins->sreg1 = vtaddr->dreg;
10714 else if (var->inst_left->opcode == OP_REGOFFSET) {
10715 ins->opcode = OP_LOAD_MEMBASE;
10716 ins->inst_basereg = vtaddr->inst_basereg;
10717 ins->inst_offset = vtaddr->inst_offset;
10721 g_assert (var->opcode == OP_REGOFFSET);
10723 ins->opcode = OP_ADD_IMM;
10724 ins->sreg1 = var->inst_basereg;
10725 ins->inst_imm = var->inst_offset;
10728 *need_local_opts = TRUE;
10729 spec = INS_INFO (ins->opcode);
10732 if (ins->opcode < MONO_CEE_LAST) {
10733 mono_print_ins (ins);
10734 g_assert_not_reached ();
10738 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10742 if (MONO_IS_STORE_MEMBASE (ins)) {
10743 tmp_reg = ins->dreg;
10744 ins->dreg = ins->sreg2;
10745 ins->sreg2 = tmp_reg;
10748 spec2 [MONO_INST_DEST] = ' ';
10749 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10750 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10751 spec2 [MONO_INST_SRC3] = ' ';
10753 } else if (MONO_IS_STORE_MEMINDEX (ins))
10754 g_assert_not_reached ();
10759 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10760 printf ("\t %.3s %d", spec, ins->dreg);
10761 num_sregs = mono_inst_get_src_registers (ins, sregs);
10762 for (srcindex = 0; srcindex < 3; ++srcindex)
10763 printf (" %d", sregs [srcindex]);
10770 regtype = spec [MONO_INST_DEST];
10771 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10774 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10775 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10776 MonoInst *store_ins;
10778 MonoInst *def_ins = ins;
10779 int dreg = ins->dreg; /* The original vreg */
10781 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10783 if (var->opcode == OP_REGVAR) {
10784 ins->dreg = var->dreg;
10785 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10787 * Instead of emitting a load+store, use a _membase opcode.
10789 g_assert (var->opcode == OP_REGOFFSET);
10790 if (ins->opcode == OP_MOVE) {
10794 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10795 ins->inst_basereg = var->inst_basereg;
10796 ins->inst_offset = var->inst_offset;
10799 spec = INS_INFO (ins->opcode);
10803 g_assert (var->opcode == OP_REGOFFSET);
10805 prev_dreg = ins->dreg;
10807 /* Invalidate any previous lvreg for this vreg */
10808 vreg_to_lvreg [ins->dreg] = 0;
10812 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
10814 store_opcode = OP_STOREI8_MEMBASE_REG;
10817 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10819 if (regtype == 'l') {
10820 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10821 mono_bblock_insert_after_ins (bb, ins, store_ins);
10822 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10823 mono_bblock_insert_after_ins (bb, ins, store_ins);
10824 def_ins = store_ins;
10827 g_assert (store_opcode != OP_STOREV_MEMBASE);
10829 /* Try to fuse the store into the instruction itself */
10830 /* FIXME: Add more instructions */
10831 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10832 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10833 ins->inst_imm = ins->inst_c0;
10834 ins->inst_destbasereg = var->inst_basereg;
10835 ins->inst_offset = var->inst_offset;
10836 spec = INS_INFO (ins->opcode);
10837 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10838 ins->opcode = store_opcode;
10839 ins->inst_destbasereg = var->inst_basereg;
10840 ins->inst_offset = var->inst_offset;
10844 tmp_reg = ins->dreg;
10845 ins->dreg = ins->sreg2;
10846 ins->sreg2 = tmp_reg;
10849 spec2 [MONO_INST_DEST] = ' ';
10850 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10851 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10852 spec2 [MONO_INST_SRC3] = ' ';
10854 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10855 // FIXME: The backends expect the base reg to be in inst_basereg
10856 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10858 ins->inst_basereg = var->inst_basereg;
10859 ins->inst_offset = var->inst_offset;
10860 spec = INS_INFO (ins->opcode);
10862 /* printf ("INS: "); mono_print_ins (ins); */
10863 /* Create a store instruction */
10864 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10866 /* Insert it after the instruction */
10867 mono_bblock_insert_after_ins (bb, ins, store_ins);
10869 def_ins = store_ins;
10872 * We can't assign ins->dreg to var->dreg here, since the
10873 * sregs could use it. So set a flag, and do it after
10876 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10877 dest_has_lvreg = TRUE;
10882 if (def_ins && !live_range_start [dreg]) {
10883 live_range_start [dreg] = def_ins;
10884 live_range_start_bb [dreg] = bb;
10891 num_sregs = mono_inst_get_src_registers (ins, sregs);
10892 for (srcindex = 0; srcindex < 3; ++srcindex) {
10893 regtype = spec [MONO_INST_SRC1 + srcindex];
10894 sreg = sregs [srcindex];
10896 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10897 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10898 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10899 MonoInst *use_ins = ins;
10900 MonoInst *load_ins;
10901 guint32 load_opcode;
10903 if (var->opcode == OP_REGVAR) {
10904 sregs [srcindex] = var->dreg;
10905 //mono_inst_set_src_registers (ins, sregs);
10906 live_range_end [sreg] = use_ins;
10907 live_range_end_bb [sreg] = bb;
10911 g_assert (var->opcode == OP_REGOFFSET);
10913 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10915 g_assert (load_opcode != OP_LOADV_MEMBASE);
10917 if (vreg_to_lvreg [sreg]) {
10918 g_assert (vreg_to_lvreg [sreg] != -1);
10920 /* The variable is already loaded to an lvreg */
10921 if (G_UNLIKELY (cfg->verbose_level > 2))
10922 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10923 sregs [srcindex] = vreg_to_lvreg [sreg];
10924 //mono_inst_set_src_registers (ins, sregs);
10928 /* Try to fuse the load into the instruction */
10929 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10930 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10931 sregs [0] = var->inst_basereg;
10932 //mono_inst_set_src_registers (ins, sregs);
10933 ins->inst_offset = var->inst_offset;
10934 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10935 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10936 sregs [1] = var->inst_basereg;
10937 //mono_inst_set_src_registers (ins, sregs);
10938 ins->inst_offset = var->inst_offset;
10940 if (MONO_IS_REAL_MOVE (ins)) {
10941 ins->opcode = OP_NOP;
10944 //printf ("%d ", srcindex); mono_print_ins (ins);
10946 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10948 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10949 if (var->dreg == prev_dreg) {
10951 * sreg refers to the value loaded by the load
10952 * emitted below, but we need to use ins->dreg
10953 * since it refers to the store emitted earlier.
10957 g_assert (sreg != -1);
10958 vreg_to_lvreg [var->dreg] = sreg;
10959 g_assert (lvregs_len < 1024);
10960 lvregs [lvregs_len ++] = var->dreg;
10964 sregs [srcindex] = sreg;
10965 //mono_inst_set_src_registers (ins, sregs);
10967 if (regtype == 'l') {
10968 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10969 mono_bblock_insert_before_ins (bb, ins, load_ins);
10970 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10971 mono_bblock_insert_before_ins (bb, ins, load_ins);
10972 use_ins = load_ins;
10975 #if SIZEOF_REGISTER == 4
10976 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10978 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10979 mono_bblock_insert_before_ins (bb, ins, load_ins);
10980 use_ins = load_ins;
10984 if (var->dreg < orig_next_vreg) {
10985 live_range_end [var->dreg] = use_ins;
10986 live_range_end_bb [var->dreg] = bb;
10990 mono_inst_set_src_registers (ins, sregs);
10992 if (dest_has_lvreg) {
10993 g_assert (ins->dreg != -1);
10994 vreg_to_lvreg [prev_dreg] = ins->dreg;
10995 g_assert (lvregs_len < 1024);
10996 lvregs [lvregs_len ++] = prev_dreg;
10997 dest_has_lvreg = FALSE;
11001 tmp_reg = ins->dreg;
11002 ins->dreg = ins->sreg2;
11003 ins->sreg2 = tmp_reg;
11006 if (MONO_IS_CALL (ins)) {
11007 /* Clear vreg_to_lvreg array */
11008 for (i = 0; i < lvregs_len; i++)
11009 vreg_to_lvreg [lvregs [i]] = 0;
11011 } else if (ins->opcode == OP_NOP) {
11013 MONO_INST_NULLIFY_SREGS (ins);
11016 if (cfg->verbose_level > 2)
11017 mono_print_ins_index (1, ins);
11020 /* Extend the live range based on the liveness info */
11021 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11022 for (i = 0; i < cfg->num_varinfo; i ++) {
11023 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11025 if (vreg_is_volatile (cfg, vi->vreg))
11026 /* The liveness info is incomplete */
11029 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11030 /* Live from at least the first ins of this bb */
11031 live_range_start [vi->vreg] = bb->code;
11032 live_range_start_bb [vi->vreg] = bb;
11035 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11036 /* Live at least until the last ins of this bb */
11037 live_range_end [vi->vreg] = bb->last_ins;
11038 live_range_end_bb [vi->vreg] = bb;
11044 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11046 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11047 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11049 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11050 for (i = 0; i < cfg->num_varinfo; ++i) {
11051 int vreg = MONO_VARINFO (cfg, i)->vreg;
11054 if (live_range_start [vreg]) {
11055 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11057 ins->inst_c1 = vreg;
11058 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11060 if (live_range_end [vreg]) {
11061 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11063 ins->inst_c1 = vreg;
11064 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11065 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11067 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11073 g_free (live_range_start);
11074 g_free (live_range_end);
11075 g_free (live_range_start_bb);
11076 g_free (live_range_end_bb);
11081 * - use 'iadd' instead of 'int_add'
11082 * - handling ovf opcodes: decompose in method_to_ir.
11083 * - unify iregs/fregs
11084 * -> partly done, the missing parts are:
11085 * - a more complete unification would involve unifying the hregs as well, so
11086 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11087 * would no longer map to the machine hregs, so the code generators would need to
11088 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11089 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11090 * fp/non-fp branches speeds it up by about 15%.
11091 * - use sext/zext opcodes instead of shifts
11093 * - get rid of TEMPLOADs if possible and use vregs instead
11094 * - clean up usage of OP_P/OP_ opcodes
11095 * - cleanup usage of DUMMY_USE
11096 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11098 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11099 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11100 * - make sure handle_stack_args () is called before the branch is emitted
11101 * - when the new IR is done, get rid of all unused stuff
11102 * - COMPARE/BEQ as separate instructions or unify them ?
11103 * - keeping them separate allows specialized compare instructions like
11104 * compare_imm, compare_membase
11105 * - most back ends unify fp compare+branch, fp compare+ceq
11106 * - integrate mono_save_args into inline_method
11107 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11108 * - handle long shift opts on 32 bit platforms somehow: they require
11109 * 3 sregs (2 for arg1 and 1 for arg2)
11110 * - make byref a 'normal' type.
11111 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11112 * variable if needed.
11113 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11114 * like inline_method.
11115 * - remove inlining restrictions
11116 * - fix LNEG and enable cfold of INEG
11117 * - generalize x86 optimizations like ldelema as a peephole optimization
11118 * - add store_mem_imm for amd64
11119 * - optimize the loading of the interruption flag in the managed->native wrappers
11120 * - avoid special handling of OP_NOP in passes
11121 * - move code inserting instructions into one function/macro.
11122 * - try a coalescing phase after liveness analysis
11123 * - add float -> vreg conversion + local optimizations on !x86
11124 * - figure out how to handle decomposed branches during optimizations, ie.
11125 * compare+branch, op_jump_table+op_br etc.
11126 * - promote RuntimeXHandles to vregs
11127 * - vtype cleanups:
11128 * - add a NEW_VARLOADA_VREG macro
11129 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11130 * accessing vtype fields.
11131 * - get rid of I8CONST on 64 bit platforms
11132 * - dealing with the increase in code size due to branches created during opcode
11134 * - use extended basic blocks
11135 * - all parts of the JIT
11136 * - handle_global_vregs () && local regalloc
11137 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11138 * - sources of increase in code size:
11141 * - isinst and castclass
11142 * - lvregs not allocated to global registers even if used multiple times
11143 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11145 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11146 * - add all micro optimizations from the old JIT
11147 * - put tree optimizations into the deadce pass
11148 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11149 * specific function.
11150 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11151 * fcompare + branchCC.
11152 * - create a helper function for allocating a stack slot, taking into account
11153 * MONO_CFG_HAS_SPILLUP.
11155 * - merge the ia64 switch changes.
11156 * - optimize mono_regstate2_alloc_int/float.
11157 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11158 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11159 * parts of the tree could be separated by other instructions, killing the tree
11160 * arguments, or stores killing loads etc. Also, should we fold loads into other
11161 * instructions if the result of the load is used multiple times ?
11162 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11163 * - LAST MERGE: 108395.
11164 * - when returning vtypes in registers, generate IR and append it to the end of the
11165 * last bb instead of doing it in the epilog.
11166 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11174 - When to decompose opcodes:
11175 - earlier: this makes some optimizations hard to implement, since the low level IR
11176 no longer contains the neccessary information. But it is easier to do.
11177 - later: harder to implement, enables more optimizations.
11178 - Branches inside bblocks:
11179 - created when decomposing complex opcodes.
11180 - branches to another bblock: harmless, but not tracked by the branch
11181 optimizations, so need to branch to a label at the start of the bblock.
11182 - branches to inside the same bblock: very problematic, trips up the local
11183 reg allocator. Can be fixed by spitting the current bblock, but that is a
11184 complex operation, since some local vregs can become global vregs etc.
11185 - Local/global vregs:
11186 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11187 local register allocator.
11188 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11189 structure, created by mono_create_var (). Assigned to hregs or the stack by
11190 the global register allocator.
11191 - When to do optimizations like alu->alu_imm:
11192 - earlier -> saves work later on since the IR will be smaller/simpler
11193 - later -> can work on more instructions
11194 - Handling of valuetypes:
11195 - When a vtype is pushed on the stack, a new temporary is created, an
11196 instruction computing its address (LDADDR) is emitted and pushed on
11197 the stack. Need to optimize cases when the vtype is used immediately as in
11198 argument passing, stloc etc.
11199 - Instead of the to_end stuff in the old JIT, simply call the function handling
11200 the values on the stack before emitting the last instruction of the bb.
11203 #endif /* DISABLE_JIT */