2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) (((src1) != NONE) + ((src2) != NONE)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) (((src1) != NONE) + ((src2) != NONE) + ((src3) != NONE)),
149 const gint8 ins_sreg_counts[] = {
150 #include "mini-ops.h"
155 extern GHashTable *jit_icall_name_hash;
157 #define MONO_INIT_VARINFO(vi,id) do { \
158 (vi)->range.first_use.pos.bid = 0xffff; \
164 mono_inst_set_src_registers (MonoInst *ins, int *regs)
166 ins->sreg1 = regs [0];
167 ins->sreg2 = regs [1];
168 ins->sreg3 = regs [2];
172 mono_alloc_ireg (MonoCompile *cfg)
174 return alloc_ireg (cfg);
178 mono_alloc_freg (MonoCompile *cfg)
180 return alloc_freg (cfg);
184 mono_alloc_preg (MonoCompile *cfg)
186 return alloc_preg (cfg);
190 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
192 return alloc_dreg (cfg, stack_type);
196 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
202 switch (type->type) {
205 case MONO_TYPE_BOOLEAN:
217 case MONO_TYPE_FNPTR:
219 case MONO_TYPE_CLASS:
220 case MONO_TYPE_STRING:
221 case MONO_TYPE_OBJECT:
222 case MONO_TYPE_SZARRAY:
223 case MONO_TYPE_ARRAY:
227 #if SIZEOF_REGISTER == 8
236 case MONO_TYPE_VALUETYPE:
237 if (type->data.klass->enumtype) {
238 type = mono_class_enum_basetype (type->data.klass);
241 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
244 case MONO_TYPE_TYPEDBYREF:
246 case MONO_TYPE_GENERICINST:
247 type = &type->data.generic_class->container_class->byval_arg;
251 g_assert (cfg->generic_sharing_context);
254 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
260 mono_print_bb (MonoBasicBlock *bb, const char *msg)
265 printf ("\n%s %d: [IN: ", msg, bb->block_num);
266 for (i = 0; i < bb->in_count; ++i)
267 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
269 for (i = 0; i < bb->out_count; ++i)
270 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
272 for (tree = bb->code; tree; tree = tree->next)
273 mono_print_ins_index (-1, tree);
277 * Can't put this at the beginning, since other files reference stuff from this
282 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
284 #define GET_BBLOCK(cfg,tblock,ip) do { \
285 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
287 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
288 NEW_BBLOCK (cfg, (tblock)); \
289 (tblock)->cil_code = (ip); \
290 ADD_BBLOCK (cfg, (tblock)); \
294 #if defined(__i386__) || defined(__x86_64__)
295 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
296 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
297 (dest)->dreg = alloc_preg ((cfg)); \
298 (dest)->sreg1 = (sr1); \
299 (dest)->sreg2 = (sr2); \
300 (dest)->inst_imm = (imm); \
301 (dest)->backend.shift_amount = (shift); \
302 MONO_ADD_INS ((cfg)->cbb, (dest)); \
306 #if SIZEOF_REGISTER == 8
307 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
308 /* FIXME: Need to add many more cases */ \
309 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
311 int dr = alloc_preg (cfg); \
312 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
313 (ins)->sreg2 = widen->dreg; \
317 #define ADD_WIDEN_OP(ins, arg1, arg2)
320 #define ADD_BINOP(op) do { \
321 MONO_INST_NEW (cfg, ins, (op)); \
323 ins->sreg1 = sp [0]->dreg; \
324 ins->sreg2 = sp [1]->dreg; \
325 type_from_op (ins, sp [0], sp [1]); \
327 /* Have to insert a widening op */ \
328 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
329 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
330 MONO_ADD_INS ((cfg)->cbb, (ins)); \
332 mono_decompose_opcode ((cfg), (ins)); \
335 #define ADD_UNOP(op) do { \
336 MONO_INST_NEW (cfg, ins, (op)); \
338 ins->sreg1 = sp [0]->dreg; \
339 type_from_op (ins, sp [0], NULL); \
341 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
342 MONO_ADD_INS ((cfg)->cbb, (ins)); \
344 mono_decompose_opcode (cfg, ins); \
347 #define ADD_BINCOND(next_block) do { \
350 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
351 cmp->sreg1 = sp [0]->dreg; \
352 cmp->sreg2 = sp [1]->dreg; \
353 type_from_op (cmp, sp [0], sp [1]); \
355 type_from_op (ins, sp [0], sp [1]); \
356 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
357 GET_BBLOCK (cfg, tblock, target); \
358 link_bblock (cfg, bblock, tblock); \
359 ins->inst_true_bb = tblock; \
360 if ((next_block)) { \
361 link_bblock (cfg, bblock, (next_block)); \
362 ins->inst_false_bb = (next_block); \
363 start_new_bblock = 1; \
365 GET_BBLOCK (cfg, tblock, ip); \
366 link_bblock (cfg, bblock, tblock); \
367 ins->inst_false_bb = tblock; \
368 start_new_bblock = 2; \
370 if (sp != stack_start) { \
371 handle_stack_args (cfg, stack_start, sp - stack_start); \
372 CHECK_UNVERIFIABLE (cfg); \
374 MONO_ADD_INS (bblock, cmp); \
375 MONO_ADD_INS (bblock, ins); \
379 * link_bblock: Links two basic blocks
381 * links two basic blocks in the control flow graph, the 'from'
382 * argument is the starting block and the 'to' argument is the block
383 * the control flow ends to after 'from'.
386 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
388 MonoBasicBlock **newa;
392 if (from->cil_code) {
394 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
396 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
399 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
401 printf ("edge from entry to exit\n");
406 for (i = 0; i < from->out_count; ++i) {
407 if (to == from->out_bb [i]) {
413 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
414 for (i = 0; i < from->out_count; ++i) {
415 newa [i] = from->out_bb [i];
423 for (i = 0; i < to->in_count; ++i) {
424 if (from == to->in_bb [i]) {
430 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
431 for (i = 0; i < to->in_count; ++i) {
432 newa [i] = to->in_bb [i];
441 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
443 link_bblock (cfg, from, to);
447 * mono_find_block_region:
449 * We mark each basic block with a region ID. We use that to avoid BB
450 * optimizations when blocks are in different regions.
453 * A region token that encodes where this region is, and information
454 * about the clause owner for this block.
456 * The region encodes the try/catch/filter clause that owns this block
457 * as well as the type. -1 is a special value that represents a block
458 * that is in none of try/catch/filter.
461 mono_find_block_region (MonoCompile *cfg, int offset)
463 MonoMethod *method = cfg->method;
464 MonoMethodHeader *header = mono_method_get_header (method);
465 MonoExceptionClause *clause;
468 for (i = 0; i < header->num_clauses; ++i) {
469 clause = &header->clauses [i];
470 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
471 (offset < (clause->handler_offset)))
472 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
474 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
475 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
476 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
477 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
478 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
480 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
483 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
484 return ((i + 1) << 8) | clause->flags;
491 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
493 MonoMethod *method = cfg->method;
494 MonoMethodHeader *header = mono_method_get_header (method);
495 MonoExceptionClause *clause;
496 MonoBasicBlock *handler;
500 for (i = 0; i < header->num_clauses; ++i) {
501 clause = &header->clauses [i];
502 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
503 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
504 if (clause->flags == type) {
505 handler = cfg->cil_offset_to_bb [clause->handler_offset];
507 res = g_list_append (res, handler);
515 mono_create_spvar_for_region (MonoCompile *cfg, int region)
519 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
523 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
524 /* prevent it from being register allocated */
525 var->flags |= MONO_INST_INDIRECT;
527 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
531 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
533 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
537 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
541 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
545 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
546 /* prevent it from being register allocated */
547 var->flags |= MONO_INST_INDIRECT;
549 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
555 * Returns the type used in the eval stack when @type is loaded.
556 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
559 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
563 inst->klass = klass = mono_class_from_mono_type (type);
565 inst->type = STACK_MP;
570 switch (type->type) {
572 inst->type = STACK_INV;
576 case MONO_TYPE_BOOLEAN:
582 inst->type = STACK_I4;
587 case MONO_TYPE_FNPTR:
588 inst->type = STACK_PTR;
590 case MONO_TYPE_CLASS:
591 case MONO_TYPE_STRING:
592 case MONO_TYPE_OBJECT:
593 case MONO_TYPE_SZARRAY:
594 case MONO_TYPE_ARRAY:
595 inst->type = STACK_OBJ;
599 inst->type = STACK_I8;
603 inst->type = STACK_R8;
605 case MONO_TYPE_VALUETYPE:
606 if (type->data.klass->enumtype) {
607 type = mono_class_enum_basetype (type->data.klass);
611 inst->type = STACK_VTYPE;
614 case MONO_TYPE_TYPEDBYREF:
615 inst->klass = mono_defaults.typed_reference_class;
616 inst->type = STACK_VTYPE;
618 case MONO_TYPE_GENERICINST:
619 type = &type->data.generic_class->container_class->byval_arg;
622 case MONO_TYPE_MVAR :
623 /* FIXME: all the arguments must be references for now,
624 * later look inside cfg and see if the arg num is
627 g_assert (cfg->generic_sharing_context);
628 inst->type = STACK_OBJ;
631 g_error ("unknown type 0x%02x in eval stack type", type->type);
636 * The following tables are used to quickly validate the IL code in type_from_op ().
639 bin_num_table [STACK_MAX] [STACK_MAX] = {
640 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
652 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
655 /* reduce the size of this table */
657 bin_int_table [STACK_MAX] [STACK_MAX] = {
658 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
669 bin_comp_table [STACK_MAX] [STACK_MAX] = {
670 /* Inv i L p F & O vt */
672 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
673 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
674 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
675 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
676 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
677 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
678 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
681 /* reduce the size of this table */
683 shift_table [STACK_MAX] [STACK_MAX] = {
684 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
695 * Tables to map from the non-specific opcode to the matching
696 * type-specific opcode.
698 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
700 binops_op_map [STACK_MAX] = {
701 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
704 /* handles from CEE_NEG to CEE_CONV_U8 */
706 unops_op_map [STACK_MAX] = {
707 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
710 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
712 ovfops_op_map [STACK_MAX] = {
713 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
716 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
718 ovf2ops_op_map [STACK_MAX] = {
719 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
722 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
724 ovf3ops_op_map [STACK_MAX] = {
725 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
728 /* handles from CEE_BEQ to CEE_BLT_UN */
730 beqops_op_map [STACK_MAX] = {
731 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
734 /* handles from CEE_CEQ to CEE_CLT_UN */
736 ceqops_op_map [STACK_MAX] = {
737 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
741 * Sets ins->type (the type on the eval stack) according to the
742 * type of the opcode and the arguments to it.
743 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
745 * FIXME: this function sets ins->type unconditionally in some cases, but
746 * it should set it to invalid for some types (a conv.x on an object)
749 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
751 switch (ins->opcode) {
758 /* FIXME: check unverifiable args for STACK_MP */
759 ins->type = bin_num_table [src1->type] [src2->type];
760 ins->opcode += binops_op_map [ins->type];
767 ins->type = bin_int_table [src1->type] [src2->type];
768 ins->opcode += binops_op_map [ins->type];
773 ins->type = shift_table [src1->type] [src2->type];
774 ins->opcode += binops_op_map [ins->type];
779 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
780 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
781 ins->opcode = OP_LCOMPARE;
782 else if (src1->type == STACK_R8)
783 ins->opcode = OP_FCOMPARE;
785 ins->opcode = OP_ICOMPARE;
787 case OP_ICOMPARE_IMM:
788 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
789 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
790 ins->opcode = OP_LCOMPARE_IMM;
802 ins->opcode += beqops_op_map [src1->type];
805 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
806 ins->opcode += ceqops_op_map [src1->type];
812 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
813 ins->opcode += ceqops_op_map [src1->type];
817 ins->type = neg_table [src1->type];
818 ins->opcode += unops_op_map [ins->type];
821 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
822 ins->type = src1->type;
824 ins->type = STACK_INV;
825 ins->opcode += unops_op_map [ins->type];
831 ins->type = STACK_I4;
832 ins->opcode += unops_op_map [src1->type];
835 ins->type = STACK_R8;
836 switch (src1->type) {
839 ins->opcode = OP_ICONV_TO_R_UN;
842 ins->opcode = OP_LCONV_TO_R_UN;
846 case CEE_CONV_OVF_I1:
847 case CEE_CONV_OVF_U1:
848 case CEE_CONV_OVF_I2:
849 case CEE_CONV_OVF_U2:
850 case CEE_CONV_OVF_I4:
851 case CEE_CONV_OVF_U4:
852 ins->type = STACK_I4;
853 ins->opcode += ovf3ops_op_map [src1->type];
855 case CEE_CONV_OVF_I_UN:
856 case CEE_CONV_OVF_U_UN:
857 ins->type = STACK_PTR;
858 ins->opcode += ovf2ops_op_map [src1->type];
860 case CEE_CONV_OVF_I1_UN:
861 case CEE_CONV_OVF_I2_UN:
862 case CEE_CONV_OVF_I4_UN:
863 case CEE_CONV_OVF_U1_UN:
864 case CEE_CONV_OVF_U2_UN:
865 case CEE_CONV_OVF_U4_UN:
866 ins->type = STACK_I4;
867 ins->opcode += ovf2ops_op_map [src1->type];
870 ins->type = STACK_PTR;
871 switch (src1->type) {
873 ins->opcode = OP_ICONV_TO_U;
877 #if SIZEOF_REGISTER == 8
878 ins->opcode = OP_LCONV_TO_U;
880 ins->opcode = OP_MOVE;
884 ins->opcode = OP_LCONV_TO_U;
887 ins->opcode = OP_FCONV_TO_U;
893 ins->type = STACK_I8;
894 ins->opcode += unops_op_map [src1->type];
896 case CEE_CONV_OVF_I8:
897 case CEE_CONV_OVF_U8:
898 ins->type = STACK_I8;
899 ins->opcode += ovf3ops_op_map [src1->type];
901 case CEE_CONV_OVF_U8_UN:
902 case CEE_CONV_OVF_I8_UN:
903 ins->type = STACK_I8;
904 ins->opcode += ovf2ops_op_map [src1->type];
908 ins->type = STACK_R8;
909 ins->opcode += unops_op_map [src1->type];
912 ins->type = STACK_R8;
916 ins->type = STACK_I4;
917 ins->opcode += ovfops_op_map [src1->type];
922 ins->type = STACK_PTR;
923 ins->opcode += ovfops_op_map [src1->type];
931 ins->type = bin_num_table [src1->type] [src2->type];
932 ins->opcode += ovfops_op_map [src1->type];
933 if (ins->type == STACK_R8)
934 ins->type = STACK_INV;
936 case OP_LOAD_MEMBASE:
937 ins->type = STACK_PTR;
939 case OP_LOADI1_MEMBASE:
940 case OP_LOADU1_MEMBASE:
941 case OP_LOADI2_MEMBASE:
942 case OP_LOADU2_MEMBASE:
943 case OP_LOADI4_MEMBASE:
944 case OP_LOADU4_MEMBASE:
945 ins->type = STACK_PTR;
947 case OP_LOADI8_MEMBASE:
948 ins->type = STACK_I8;
950 case OP_LOADR4_MEMBASE:
951 case OP_LOADR8_MEMBASE:
952 ins->type = STACK_R8;
955 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
959 if (ins->type == STACK_MP)
960 ins->klass = mono_defaults.object_class;
965 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
971 param_table [STACK_MAX] [STACK_MAX] = {
976 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
980 switch (args->type) {
990 for (i = 0; i < sig->param_count; ++i) {
991 switch (args [i].type) {
995 if (!sig->params [i]->byref)
999 if (sig->params [i]->byref)
1001 switch (sig->params [i]->type) {
1002 case MONO_TYPE_CLASS:
1003 case MONO_TYPE_STRING:
1004 case MONO_TYPE_OBJECT:
1005 case MONO_TYPE_SZARRAY:
1006 case MONO_TYPE_ARRAY:
1013 if (sig->params [i]->byref)
1015 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1024 /*if (!param_table [args [i].type] [sig->params [i]->type])
1032 * When we need a pointer to the current domain many times in a method, we
1033 * call mono_domain_get() once and we store the result in a local variable.
1034 * This function returns the variable that represents the MonoDomain*.
1036 inline static MonoInst *
1037 mono_get_domainvar (MonoCompile *cfg)
1039 if (!cfg->domainvar)
1040 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1041 return cfg->domainvar;
1045 * The got_var contains the address of the Global Offset Table when AOT
1048 inline static MonoInst *
1049 mono_get_got_var (MonoCompile *cfg)
1051 #ifdef MONO_ARCH_NEED_GOT_VAR
1052 if (!cfg->compile_aot)
1054 if (!cfg->got_var) {
1055 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1057 return cfg->got_var;
1064 mono_get_vtable_var (MonoCompile *cfg)
1066 g_assert (cfg->generic_sharing_context);
1068 if (!cfg->rgctx_var) {
1069 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1070 /* force the var to be stack allocated */
1071 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1074 return cfg->rgctx_var;
1078 type_from_stack_type (MonoInst *ins) {
1079 switch (ins->type) {
1080 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1081 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1082 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1083 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1085 return &ins->klass->this_arg;
1086 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1087 case STACK_VTYPE: return &ins->klass->byval_arg;
1089 g_error ("stack type %d to monotype not handled\n", ins->type);
1094 static G_GNUC_UNUSED int
1095 type_to_stack_type (MonoType *t)
1097 switch (mono_type_get_underlying_type (t)->type) {
1100 case MONO_TYPE_BOOLEAN:
1103 case MONO_TYPE_CHAR:
1110 case MONO_TYPE_FNPTR:
1112 case MONO_TYPE_CLASS:
1113 case MONO_TYPE_STRING:
1114 case MONO_TYPE_OBJECT:
1115 case MONO_TYPE_SZARRAY:
1116 case MONO_TYPE_ARRAY:
1124 case MONO_TYPE_VALUETYPE:
1125 case MONO_TYPE_TYPEDBYREF:
1127 case MONO_TYPE_GENERICINST:
1128 if (mono_type_generic_inst_is_valuetype (t))
1134 g_assert_not_reached ();
1141 array_access_to_klass (int opcode)
1145 return mono_defaults.byte_class;
1147 return mono_defaults.uint16_class;
1150 return mono_defaults.int_class;
1153 return mono_defaults.sbyte_class;
1156 return mono_defaults.int16_class;
1159 return mono_defaults.int32_class;
1161 return mono_defaults.uint32_class;
1164 return mono_defaults.int64_class;
1167 return mono_defaults.single_class;
1170 return mono_defaults.double_class;
1171 case CEE_LDELEM_REF:
1172 case CEE_STELEM_REF:
1173 return mono_defaults.object_class;
1175 g_assert_not_reached ();
1181 * We try to share variables when possible
1184 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1189 /* inlining can result in deeper stacks */
1190 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1191 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1193 pos = ins->type - 1 + slot * STACK_MAX;
1195 switch (ins->type) {
1202 if ((vnum = cfg->intvars [pos]))
1203 return cfg->varinfo [vnum];
1204 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1205 cfg->intvars [pos] = res->inst_c0;
1208 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1214 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1217 * Don't use this if a generic_context is set, since that means AOT can't
1218 * look up the method using just the image+token.
1219 * table == 0 means this is a reference made from a wrapper.
1221 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1222 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1223 jump_info_token->image = image;
1224 jump_info_token->token = token;
1225 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1230 * This function is called to handle items that are left on the evaluation stack
1231 * at basic block boundaries. What happens is that we save the values to local variables
1232 * and we reload them later when first entering the target basic block (with the
1233 * handle_loaded_temps () function).
1234 * A single joint point will use the same variables (stored in the array bb->out_stack or
1235 * bb->in_stack, if the basic block is before or after the joint point).
1237 * This function needs to be called _before_ emitting the last instruction of
1238 * the bb (i.e. before emitting a branch).
1239 * If the stack merge fails at a join point, cfg->unverifiable is set.
1242 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1245 MonoBasicBlock *bb = cfg->cbb;
1246 MonoBasicBlock *outb;
1247 MonoInst *inst, **locals;
1252 if (cfg->verbose_level > 3)
1253 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1254 if (!bb->out_scount) {
1255 bb->out_scount = count;
1256 //printf ("bblock %d has out:", bb->block_num);
1258 for (i = 0; i < bb->out_count; ++i) {
1259 outb = bb->out_bb [i];
1260 /* exception handlers are linked, but they should not be considered for stack args */
1261 if (outb->flags & BB_EXCEPTION_HANDLER)
1263 //printf (" %d", outb->block_num);
1264 if (outb->in_stack) {
1266 bb->out_stack = outb->in_stack;
1272 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1273 for (i = 0; i < count; ++i) {
1275 * try to reuse temps already allocated for this purpouse, if they occupy the same
1276 * stack slot and if they are of the same type.
1277 * This won't cause conflicts since if 'local' is used to
1278 * store one of the values in the in_stack of a bblock, then
1279 * the same variable will be used for the same outgoing stack
1281 * This doesn't work when inlining methods, since the bblocks
1282 * in the inlined methods do not inherit their in_stack from
1283 * the bblock they are inlined to. See bug #58863 for an
1286 if (cfg->inlined_method)
1287 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1289 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1294 for (i = 0; i < bb->out_count; ++i) {
1295 outb = bb->out_bb [i];
1296 /* exception handlers are linked, but they should not be considered for stack args */
1297 if (outb->flags & BB_EXCEPTION_HANDLER)
1299 if (outb->in_scount) {
1300 if (outb->in_scount != bb->out_scount) {
1301 cfg->unverifiable = TRUE;
1304 continue; /* check they are the same locals */
1306 outb->in_scount = count;
1307 outb->in_stack = bb->out_stack;
1310 locals = bb->out_stack;
1312 for (i = 0; i < count; ++i) {
1313 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1314 inst->cil_code = sp [i]->cil_code;
1315 sp [i] = locals [i];
1316 if (cfg->verbose_level > 3)
1317 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1321 * It is possible that the out bblocks already have in_stack assigned, and
1322 * the in_stacks differ. In this case, we will store to all the different
1329 /* Find a bblock which has a different in_stack */
1331 while (bindex < bb->out_count) {
1332 outb = bb->out_bb [bindex];
1333 /* exception handlers are linked, but they should not be considered for stack args */
1334 if (outb->flags & BB_EXCEPTION_HANDLER) {
1338 if (outb->in_stack != locals) {
1339 for (i = 0; i < count; ++i) {
1340 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1341 inst->cil_code = sp [i]->cil_code;
1342 sp [i] = locals [i];
1343 if (cfg->verbose_level > 3)
1344 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1346 locals = outb->in_stack;
1355 /* Emit code which loads interface_offsets [klass->interface_id]
1356 * The array is stored in memory before vtable.
1359 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1361 if (cfg->compile_aot) {
1362 int ioffset_reg = alloc_preg (cfg);
1363 int iid_reg = alloc_preg (cfg);
1365 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1366 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1367 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1370 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1375 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1376 * stored in "klass_reg" implements the interface "klass".
1379 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1381 int ibitmap_reg = alloc_preg (cfg);
1382 int ibitmap_byte_reg = alloc_preg (cfg);
1384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1386 if (cfg->compile_aot) {
1387 int iid_reg = alloc_preg (cfg);
1388 int shifted_iid_reg = alloc_preg (cfg);
1389 int ibitmap_byte_address_reg = alloc_preg (cfg);
1390 int masked_iid_reg = alloc_preg (cfg);
1391 int iid_one_bit_reg = alloc_preg (cfg);
1392 int iid_bit_reg = alloc_preg (cfg);
1393 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1395 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1396 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1397 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1398 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1399 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1400 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1402 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1408 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1409 * stored in "vtable_reg" implements the interface "klass".
1412 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1414 int ibitmap_reg = alloc_preg (cfg);
1415 int ibitmap_byte_reg = alloc_preg (cfg);
1417 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1419 if (cfg->compile_aot) {
1420 int iid_reg = alloc_preg (cfg);
1421 int shifted_iid_reg = alloc_preg (cfg);
1422 int ibitmap_byte_address_reg = alloc_preg (cfg);
1423 int masked_iid_reg = alloc_preg (cfg);
1424 int iid_one_bit_reg = alloc_preg (cfg);
1425 int iid_bit_reg = alloc_preg (cfg);
1426 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1427 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1428 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1429 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1430 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1431 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1432 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1433 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1435 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1441 * Emit code which checks whenever the interface id of @klass is smaller than
1442 * than the value given by max_iid_reg.
1445 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1446 MonoBasicBlock *false_target)
1448 if (cfg->compile_aot) {
1449 int iid_reg = alloc_preg (cfg);
1450 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1451 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1456 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1458 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1461 /* Same as above, but obtains max_iid from a vtable */
1463 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1464 MonoBasicBlock *false_target)
1466 int max_iid_reg = alloc_preg (cfg);
1468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1469 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1472 /* Same as above, but obtains max_iid from a klass */
1474 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1475 MonoBasicBlock *false_target)
1477 int max_iid_reg = alloc_preg (cfg);
1479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1480 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1484 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1486 int idepth_reg = alloc_preg (cfg);
1487 int stypes_reg = alloc_preg (cfg);
1488 int stype = alloc_preg (cfg);
1490 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1491 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1492 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1493 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1495 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1496 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1497 if (cfg->compile_aot) {
1498 int const_reg = alloc_preg (cfg);
1499 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1500 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1502 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1504 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1508 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1510 int intf_reg = alloc_preg (cfg);
1512 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1513 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1516 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1518 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1522 * Variant of the above that takes a register to the class, not the vtable.
1525 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1527 int intf_bit_reg = alloc_preg (cfg);
1529 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1530 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1531 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1533 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1535 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1539 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1541 if (cfg->compile_aot) {
1542 int const_reg = alloc_preg (cfg);
1543 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1544 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1548 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1552 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1554 if (cfg->compile_aot) {
1555 int const_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1559 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1565 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1568 int rank_reg = alloc_preg (cfg);
1569 int eclass_reg = alloc_preg (cfg);
1571 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1572 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1573 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1574 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1576 if (klass->cast_class == mono_defaults.object_class) {
1577 int parent_reg = alloc_preg (cfg);
1578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1579 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1580 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1581 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1582 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1583 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1584 } else if (klass->cast_class == mono_defaults.enum_class) {
1585 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1586 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1587 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1589 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1590 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1593 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1594 /* Check that the object is a vector too */
1595 int bounds_reg = alloc_preg (cfg);
1596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1598 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1601 int idepth_reg = alloc_preg (cfg);
1602 int stypes_reg = alloc_preg (cfg);
1603 int stype = alloc_preg (cfg);
1605 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1606 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1607 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1608 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1610 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1611 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1612 mini_emit_class_check (cfg, stype, klass);
1617 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1621 g_assert (val == 0);
1626 if ((size <= 4) && (size <= align)) {
1629 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1632 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1635 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1637 #if SIZEOF_REGISTER == 8
1639 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1645 val_reg = alloc_preg (cfg);
1647 if (SIZEOF_REGISTER == 8)
1648 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1650 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1653 /* This could be optimized further if neccesary */
1655 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1662 #if !NO_UNALIGNED_ACCESS
1663 if (SIZEOF_REGISTER == 8) {
1665 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1670 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1678 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1688 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1694 #endif /* DISABLE_JIT */
1697 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1705 /* This could be optimized further if neccesary */
1707 cur_reg = alloc_preg (cfg);
1708 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1716 #if !NO_UNALIGNED_ACCESS
1717 if (SIZEOF_REGISTER == 8) {
1719 cur_reg = alloc_preg (cfg);
1720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1721 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1730 cur_reg = alloc_preg (cfg);
1731 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1732 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1738 cur_reg = alloc_preg (cfg);
1739 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1740 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1746 cur_reg = alloc_preg (cfg);
1747 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1748 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1758 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1761 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1764 type = mini_get_basic_type_from_generic (gsctx, type);
1765 switch (type->type) {
1766 case MONO_TYPE_VOID:
1767 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1770 case MONO_TYPE_BOOLEAN:
1773 case MONO_TYPE_CHAR:
1776 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1780 case MONO_TYPE_FNPTR:
1781 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1782 case MONO_TYPE_CLASS:
1783 case MONO_TYPE_STRING:
1784 case MONO_TYPE_OBJECT:
1785 case MONO_TYPE_SZARRAY:
1786 case MONO_TYPE_ARRAY:
1787 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1790 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1793 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1794 case MONO_TYPE_VALUETYPE:
1795 if (type->data.klass->enumtype) {
1796 type = mono_class_enum_basetype (type->data.klass);
1799 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1800 case MONO_TYPE_TYPEDBYREF:
1801 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1802 case MONO_TYPE_GENERICINST:
1803 type = &type->data.generic_class->container_class->byval_arg;
1806 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1812 * target_type_is_incompatible:
1813 * @cfg: MonoCompile context
1815 * Check that the item @arg on the evaluation stack can be stored
1816 * in the target type (can be a local, or field, etc).
1817 * The cfg arg can be used to check if we need verification or just
1820 * Returns: non-0 value if arg can't be stored on a target.
1823 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1825 MonoType *simple_type;
1828 if (target->byref) {
1829 /* FIXME: check that the pointed to types match */
1830 if (arg->type == STACK_MP)
1831 return arg->klass != mono_class_from_mono_type (target);
1832 if (arg->type == STACK_PTR)
1837 simple_type = mono_type_get_underlying_type (target);
1838 switch (simple_type->type) {
1839 case MONO_TYPE_VOID:
1843 case MONO_TYPE_BOOLEAN:
1846 case MONO_TYPE_CHAR:
1849 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1853 /* STACK_MP is needed when setting pinned locals */
1854 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1859 case MONO_TYPE_FNPTR:
1860 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1863 case MONO_TYPE_CLASS:
1864 case MONO_TYPE_STRING:
1865 case MONO_TYPE_OBJECT:
1866 case MONO_TYPE_SZARRAY:
1867 case MONO_TYPE_ARRAY:
1868 if (arg->type != STACK_OBJ)
1870 /* FIXME: check type compatibility */
1874 if (arg->type != STACK_I8)
1879 if (arg->type != STACK_R8)
1882 case MONO_TYPE_VALUETYPE:
1883 if (arg->type != STACK_VTYPE)
1885 klass = mono_class_from_mono_type (simple_type);
1886 if (klass != arg->klass)
1889 case MONO_TYPE_TYPEDBYREF:
1890 if (arg->type != STACK_VTYPE)
1892 klass = mono_class_from_mono_type (simple_type);
1893 if (klass != arg->klass)
1896 case MONO_TYPE_GENERICINST:
1897 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1898 if (arg->type != STACK_VTYPE)
1900 klass = mono_class_from_mono_type (simple_type);
1901 if (klass != arg->klass)
1905 if (arg->type != STACK_OBJ)
1907 /* FIXME: check type compatibility */
1911 case MONO_TYPE_MVAR:
1912 /* FIXME: all the arguments must be references for now,
1913 * later look inside cfg and see if the arg num is
1914 * really a reference
1916 g_assert (cfg->generic_sharing_context);
1917 if (arg->type != STACK_OBJ)
1921 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1927 * Prepare arguments for passing to a function call.
1928 * Return a non-zero value if the arguments can't be passed to the given
1930 * The type checks are not yet complete and some conversions may need
1931 * casts on 32 or 64 bit architectures.
1933 * FIXME: implement this using target_type_is_incompatible ()
1936 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1938 MonoType *simple_type;
1942 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1946 for (i = 0; i < sig->param_count; ++i) {
1947 if (sig->params [i]->byref) {
1948 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1952 simple_type = sig->params [i];
1953 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1955 switch (simple_type->type) {
1956 case MONO_TYPE_VOID:
1961 case MONO_TYPE_BOOLEAN:
1964 case MONO_TYPE_CHAR:
1967 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1973 case MONO_TYPE_FNPTR:
1974 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1977 case MONO_TYPE_CLASS:
1978 case MONO_TYPE_STRING:
1979 case MONO_TYPE_OBJECT:
1980 case MONO_TYPE_SZARRAY:
1981 case MONO_TYPE_ARRAY:
1982 if (args [i]->type != STACK_OBJ)
1987 if (args [i]->type != STACK_I8)
1992 if (args [i]->type != STACK_R8)
1995 case MONO_TYPE_VALUETYPE:
1996 if (simple_type->data.klass->enumtype) {
1997 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2000 if (args [i]->type != STACK_VTYPE)
2003 case MONO_TYPE_TYPEDBYREF:
2004 if (args [i]->type != STACK_VTYPE)
2007 case MONO_TYPE_GENERICINST:
2008 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2012 g_error ("unknown type 0x%02x in check_call_signature",
2020 callvirt_to_call (int opcode)
2025 case OP_VOIDCALLVIRT:
2034 g_assert_not_reached ();
2041 callvirt_to_call_membase (int opcode)
2045 return OP_CALL_MEMBASE;
2046 case OP_VOIDCALLVIRT:
2047 return OP_VOIDCALL_MEMBASE;
2049 return OP_FCALL_MEMBASE;
2051 return OP_LCALL_MEMBASE;
2053 return OP_VCALL_MEMBASE;
2055 g_assert_not_reached ();
2061 #ifdef MONO_ARCH_HAVE_IMT
2063 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2065 #ifdef MONO_ARCH_IMT_REG
2066 int method_reg = alloc_preg (cfg);
2069 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2070 } else if (cfg->compile_aot) {
2071 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2074 MONO_INST_NEW (cfg, ins, OP_PCONST);
2075 ins->inst_p0 = call->method;
2076 ins->dreg = method_reg;
2077 MONO_ADD_INS (cfg->cbb, ins);
2080 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2082 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2087 static MonoJumpInfo *
2088 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2090 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2094 ji->data.target = target;
2099 inline static MonoInst*
2100 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2102 inline static MonoCallInst *
2103 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2104 MonoInst **args, int calli, int virtual)
2107 #ifdef MONO_ARCH_SOFT_FLOAT
2111 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2114 call->signature = sig;
2116 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2118 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2119 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2122 temp->backend.is_pinvoke = sig->pinvoke;
2125 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2126 * address of return value to increase optimization opportunities.
2127 * Before vtype decomposition, the dreg of the call ins itself represents the
2128 * fact the call modifies the return value. After decomposition, the call will
2129 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2130 * will be transformed into an LDADDR.
2132 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2133 loada->dreg = alloc_preg (cfg);
2134 loada->inst_p0 = temp;
2135 /* We reference the call too since call->dreg could change during optimization */
2136 loada->inst_p1 = call;
2137 MONO_ADD_INS (cfg->cbb, loada);
2139 call->inst.dreg = temp->dreg;
2141 call->vret_var = loada;
2142 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2143 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2145 #ifdef MONO_ARCH_SOFT_FLOAT
2147 * If the call has a float argument, we would need to do an r8->r4 conversion using
2148 * an icall, but that cannot be done during the call sequence since it would clobber
2149 * the call registers + the stack. So we do it before emitting the call.
2151 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2153 MonoInst *in = call->args [i];
2155 if (i >= sig->hasthis)
2156 t = sig->params [i - sig->hasthis];
2158 t = &mono_defaults.int_class->byval_arg;
2159 t = mono_type_get_underlying_type (t);
2161 if (!t->byref && t->type == MONO_TYPE_R4) {
2162 MonoInst *iargs [1];
2166 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2168 /* The result will be in an int vreg */
2169 call->args [i] = conv;
2174 mono_arch_emit_call (cfg, call);
2176 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2177 cfg->flags |= MONO_CFG_HAS_CALLS;
2182 inline static MonoInst*
2183 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2185 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2187 call->inst.sreg1 = addr->dreg;
2189 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2191 return (MonoInst*)call;
2194 inline static MonoInst*
2195 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2197 #ifdef MONO_ARCH_RGCTX_REG
2202 rgctx_reg = mono_alloc_preg (cfg);
2203 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2205 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2207 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2208 cfg->uses_rgctx_reg = TRUE;
2209 call->rgctx_reg = TRUE;
2211 return (MonoInst*)call;
2213 g_assert_not_reached ();
2219 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2220 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2222 gboolean virtual = this != NULL;
2223 gboolean enable_for_aot = TRUE;
2226 if (method->string_ctor) {
2227 /* Create the real signature */
2228 /* FIXME: Cache these */
2229 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2230 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2235 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2237 if (this && sig->hasthis &&
2238 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2239 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2240 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2242 call->method = method;
2244 call->inst.flags |= MONO_INST_HAS_METHOD;
2245 call->inst.inst_left = this;
2248 int vtable_reg, slot_reg, this_reg;
2250 this_reg = this->dreg;
2252 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2253 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2254 /* Make a call to delegate->invoke_impl */
2255 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2256 call->inst.inst_basereg = this_reg;
2257 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2258 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2260 return (MonoInst*)call;
2264 if ((!cfg->compile_aot || enable_for_aot) &&
2265 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2266 (MONO_METHOD_IS_FINAL (method) &&
2267 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2269 * the method is not virtual, we just need to ensure this is not null
2270 * and then we can call the method directly.
2272 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2273 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2276 if (!method->string_ctor) {
2277 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2278 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2279 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2282 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2284 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2286 return (MonoInst*)call;
2289 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2291 * the method is virtual, but we can statically dispatch since either
2292 * it's class or the method itself are sealed.
2293 * But first we need to ensure it's not a null reference.
2295 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2296 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2297 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2299 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2300 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2302 return (MonoInst*)call;
2305 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2307 vtable_reg = alloc_preg (cfg);
2308 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2309 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2311 #ifdef MONO_ARCH_HAVE_IMT
2313 guint32 imt_slot = mono_method_get_imt_slot (method);
2314 emit_imt_argument (cfg, call, imt_arg);
2315 slot_reg = vtable_reg;
2316 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2319 if (slot_reg == -1) {
2320 slot_reg = alloc_preg (cfg);
2321 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2322 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2325 slot_reg = vtable_reg;
2326 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2327 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2328 #ifdef MONO_ARCH_HAVE_IMT
2330 g_assert (mono_method_signature (method)->generic_param_count);
2331 emit_imt_argument (cfg, call, imt_arg);
2336 call->inst.sreg1 = slot_reg;
2337 call->virtual = TRUE;
2340 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2342 return (MonoInst*)call;
2346 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2347 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2354 #ifdef MONO_ARCH_RGCTX_REG
2355 rgctx_reg = mono_alloc_preg (cfg);
2356 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2361 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2363 call = (MonoCallInst*)ins;
2365 #ifdef MONO_ARCH_RGCTX_REG
2366 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2367 cfg->uses_rgctx_reg = TRUE;
2368 call->rgctx_reg = TRUE;
2377 static inline MonoInst*
2378 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2380 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2384 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2391 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2394 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2396 return (MonoInst*)call;
2399 inline static MonoInst*
2400 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2402 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2406 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2410 * mono_emit_abs_call:
2412 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2414 inline static MonoInst*
2415 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2416 MonoMethodSignature *sig, MonoInst **args)
2418 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2422 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2425 if (cfg->abs_patches == NULL)
2426 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2427 g_hash_table_insert (cfg->abs_patches, ji, ji);
2428 ins = mono_emit_native_call (cfg, ji, sig, args);
2429 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2434 get_memcpy_method (void)
2436 static MonoMethod *memcpy_method = NULL;
2437 if (!memcpy_method) {
2438 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2440 g_error ("Old corlib found. Install a new one");
2442 return memcpy_method;
2446 * Emit code to copy a valuetype of type @klass whose address is stored in
2447 * @src->dreg to memory whose address is stored at @dest->dreg.
2450 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2452 MonoInst *iargs [3];
2455 MonoMethod *memcpy_method;
2459 * This check breaks with spilled vars... need to handle it during verification anyway.
2460 * g_assert (klass && klass == src->klass && klass == dest->klass);
2464 n = mono_class_native_size (klass, &align);
2466 n = mono_class_value_size (klass, &align);
2468 #if HAVE_WRITE_BARRIERS
2469 /* if native is true there should be no references in the struct */
2470 if (klass->has_references && !native) {
2471 /* Avoid barriers when storing to the stack */
2472 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2473 (dest->opcode == OP_LDADDR))) {
2476 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2478 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2483 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2484 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2485 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2489 EMIT_NEW_ICONST (cfg, iargs [2], n);
2491 memcpy_method = get_memcpy_method ();
2492 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2497 get_memset_method (void)
2499 static MonoMethod *memset_method = NULL;
2500 if (!memset_method) {
2501 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2503 g_error ("Old corlib found. Install a new one");
2505 return memset_method;
2509 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2511 MonoInst *iargs [3];
2514 MonoMethod *memset_method;
2516 /* FIXME: Optimize this for the case when dest is an LDADDR */
2518 mono_class_init (klass);
2519 n = mono_class_value_size (klass, &align);
2521 if (n <= sizeof (gpointer) * 5) {
2522 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2525 memset_method = get_memset_method ();
2527 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2528 EMIT_NEW_ICONST (cfg, iargs [2], n);
2529 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2534 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2536 MonoInst *this = NULL;
2538 g_assert (cfg->generic_sharing_context);
2540 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2541 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2542 !method->klass->valuetype)
2543 EMIT_NEW_ARGLOAD (cfg, this, 0);
2545 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2546 MonoInst *mrgctx_loc, *mrgctx_var;
2549 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2551 mrgctx_loc = mono_get_vtable_var (cfg);
2552 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2555 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2556 MonoInst *vtable_loc, *vtable_var;
2560 vtable_loc = mono_get_vtable_var (cfg);
2561 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2563 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2564 MonoInst *mrgctx_var = vtable_var;
2567 vtable_reg = alloc_preg (cfg);
2568 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2569 vtable_var->type = STACK_PTR;
2575 int vtable_reg, res_reg;
2577 vtable_reg = alloc_preg (cfg);
2578 res_reg = alloc_preg (cfg);
2579 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2584 static MonoJumpInfoRgctxEntry *
2585 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2587 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2588 res->method = method;
2589 res->in_mrgctx = in_mrgctx;
2590 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2591 res->data->type = patch_type;
2592 res->data->data.target = patch_data;
2593 res->info_type = info_type;
2598 static inline MonoInst*
2599 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2601 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2605 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2606 MonoClass *klass, int rgctx_type)
2608 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2609 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2611 return emit_rgctx_fetch (cfg, rgctx, entry);
2615 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2616 MonoMethod *cmethod, int rgctx_type)
2618 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2619 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2621 return emit_rgctx_fetch (cfg, rgctx, entry);
2625 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2626 MonoClassField *field, int rgctx_type)
2628 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2629 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2631 return emit_rgctx_fetch (cfg, rgctx, entry);
2635 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2637 int vtable_reg = alloc_preg (cfg);
2638 int context_used = 0;
2640 if (cfg->generic_sharing_context)
2641 context_used = mono_class_check_context_used (array_class);
2643 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2645 if (cfg->opt & MONO_OPT_SHARED) {
2646 int class_reg = alloc_preg (cfg);
2647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2648 if (cfg->compile_aot) {
2649 int klass_reg = alloc_preg (cfg);
2650 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2651 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2655 } else if (context_used) {
2656 MonoInst *vtable_ins;
2658 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2659 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2661 if (cfg->compile_aot) {
2662 int vt_reg = alloc_preg (cfg);
2663 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2664 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2666 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2670 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2674 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2676 if (mini_get_debug_options ()->better_cast_details) {
2677 int to_klass_reg = alloc_preg (cfg);
2678 int vtable_reg = alloc_preg (cfg);
2679 int klass_reg = alloc_preg (cfg);
2680 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2683 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2687 MONO_ADD_INS (cfg->cbb, tls_get);
2688 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2689 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2691 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2692 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2693 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2698 reset_cast_details (MonoCompile *cfg)
2700 /* Reset the variables holding the cast details */
2701 if (mini_get_debug_options ()->better_cast_details) {
2702 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2704 MONO_ADD_INS (cfg->cbb, tls_get);
2705 /* It is enough to reset the from field */
2706 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2711 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2712 * generic code is generated.
2715 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2717 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2720 MonoInst *rgctx, *addr;
2722 /* FIXME: What if the class is shared? We might not
2723 have to get the address of the method from the
2725 addr = emit_get_rgctx_method (cfg, context_used, method,
2726 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2728 rgctx = emit_get_rgctx (cfg, method, context_used);
2730 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2732 return mono_emit_method_call (cfg, method, &val, NULL);
2737 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2741 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2742 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2743 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2744 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2746 obj_reg = sp [0]->dreg;
2747 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2748 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2750 /* FIXME: generics */
2751 g_assert (klass->rank == 0);
2754 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2755 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2757 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2761 MonoInst *element_class;
2763 /* This assertion is from the unboxcast insn */
2764 g_assert (klass->rank == 0);
2766 element_class = emit_get_rgctx_klass (cfg, context_used,
2767 klass->element_class, MONO_RGCTX_INFO_KLASS);
2769 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2770 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2772 save_cast_details (cfg, klass->element_class, obj_reg);
2773 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2774 reset_cast_details (cfg);
2777 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2778 MONO_ADD_INS (cfg->cbb, add);
2779 add->type = STACK_MP;
2786 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2788 MonoInst *iargs [2];
2791 if (cfg->opt & MONO_OPT_SHARED) {
2792 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2793 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2795 alloc_ftn = mono_object_new;
2796 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2797 /* This happens often in argument checking code, eg. throw new FooException... */
2798 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2799 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2800 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2802 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2803 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2806 if (managed_alloc) {
2807 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2808 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2810 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2812 guint32 lw = vtable->klass->instance_size;
2813 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2814 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2815 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2818 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2822 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2826 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2829 MonoInst *iargs [2];
2830 MonoMethod *managed_alloc = NULL;
2834 FIXME: we cannot get managed_alloc here because we can't get
2835 the class's vtable (because it's not a closed class)
2837 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2838 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2841 if (cfg->opt & MONO_OPT_SHARED) {
2842 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2843 iargs [1] = data_inst;
2844 alloc_ftn = mono_object_new;
2846 if (managed_alloc) {
2847 iargs [0] = data_inst;
2848 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2851 iargs [0] = data_inst;
2852 alloc_ftn = mono_object_new_specific;
2855 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2859 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2861 MonoInst *alloc, *ins;
2863 if (mono_class_is_nullable (klass)) {
2864 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2865 return mono_emit_method_call (cfg, method, &val, NULL);
2868 alloc = handle_alloc (cfg, klass, TRUE);
2870 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2876 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2878 MonoInst *alloc, *ins;
2880 if (mono_class_is_nullable (klass)) {
2881 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2882 /* FIXME: What if the class is shared? We might not
2883 have to get the method address from the RGCTX. */
2884 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2885 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2886 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2888 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2890 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2892 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2899 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2901 MonoBasicBlock *is_null_bb;
2902 int obj_reg = src->dreg;
2903 int vtable_reg = alloc_preg (cfg);
2905 NEW_BBLOCK (cfg, is_null_bb);
2907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2908 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2910 save_cast_details (cfg, klass, obj_reg);
2912 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2913 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2914 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2916 int klass_reg = alloc_preg (cfg);
2918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2920 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2921 /* the remoting code is broken, access the class for now */
2923 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2924 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2926 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2927 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2929 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2931 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2932 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2936 MONO_START_BB (cfg, is_null_bb);
2938 reset_cast_details (cfg);
2944 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2947 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2948 int obj_reg = src->dreg;
2949 int vtable_reg = alloc_preg (cfg);
2950 int res_reg = alloc_preg (cfg);
2952 NEW_BBLOCK (cfg, is_null_bb);
2953 NEW_BBLOCK (cfg, false_bb);
2954 NEW_BBLOCK (cfg, end_bb);
2956 /* Do the assignment at the beginning, so the other assignment can be if converted */
2957 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2958 ins->type = STACK_OBJ;
2961 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2962 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
2964 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2965 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2966 /* the is_null_bb target simply copies the input register to the output */
2967 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
2969 int klass_reg = alloc_preg (cfg);
2971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2974 int rank_reg = alloc_preg (cfg);
2975 int eclass_reg = alloc_preg (cfg);
2977 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2978 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
2979 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
2980 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2981 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
2982 if (klass->cast_class == mono_defaults.object_class) {
2983 int parent_reg = alloc_preg (cfg);
2984 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
2985 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
2986 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2987 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2988 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
2989 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
2990 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2991 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2992 } else if (klass->cast_class == mono_defaults.enum_class) {
2993 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
2994 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
2995 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
2996 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
2998 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
2999 /* Check that the object is a vector too */
3000 int bounds_reg = alloc_preg (cfg);
3001 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3002 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3003 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3006 /* the is_null_bb target simply copies the input register to the output */
3007 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3009 } else if (mono_class_is_nullable (klass)) {
3010 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3011 /* the is_null_bb target simply copies the input register to the output */
3012 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3014 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3015 /* the remoting code is broken, access the class for now */
3017 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3020 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3021 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3023 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3024 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3026 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3027 /* the is_null_bb target simply copies the input register to the output */
3028 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3033 MONO_START_BB (cfg, false_bb);
3035 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3036 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3038 MONO_START_BB (cfg, is_null_bb);
3040 MONO_START_BB (cfg, end_bb);
3046 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3048 /* This opcode takes as input an object reference and a class, and returns:
3049 0) if the object is an instance of the class,
3050 1) if the object is not instance of the class,
3051 2) if the object is a proxy whose type cannot be determined */
3054 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3055 int obj_reg = src->dreg;
3056 int dreg = alloc_ireg (cfg);
3058 int klass_reg = alloc_preg (cfg);
3060 NEW_BBLOCK (cfg, true_bb);
3061 NEW_BBLOCK (cfg, false_bb);
3062 NEW_BBLOCK (cfg, false2_bb);
3063 NEW_BBLOCK (cfg, end_bb);
3064 NEW_BBLOCK (cfg, no_proxy_bb);
3066 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3067 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3069 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3070 NEW_BBLOCK (cfg, interface_fail_bb);
3072 tmp_reg = alloc_preg (cfg);
3073 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3074 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3075 MONO_START_BB (cfg, interface_fail_bb);
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3078 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3080 tmp_reg = alloc_preg (cfg);
3081 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3082 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3083 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3085 tmp_reg = alloc_preg (cfg);
3086 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3087 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3089 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3090 tmp_reg = alloc_preg (cfg);
3091 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3092 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3094 tmp_reg = alloc_preg (cfg);
3095 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3096 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3097 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3099 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3100 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3102 MONO_START_BB (cfg, no_proxy_bb);
3104 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3107 MONO_START_BB (cfg, false_bb);
3109 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3110 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3112 MONO_START_BB (cfg, false2_bb);
3114 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3115 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3117 MONO_START_BB (cfg, true_bb);
3119 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3121 MONO_START_BB (cfg, end_bb);
3124 MONO_INST_NEW (cfg, ins, OP_ICONST);
3126 ins->type = STACK_I4;
3132 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3134 /* This opcode takes as input an object reference and a class, and returns:
3135 0) if the object is an instance of the class,
3136 1) if the object is a proxy whose type cannot be determined
3137 an InvalidCastException exception is thrown otherwhise*/
3140 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3141 int obj_reg = src->dreg;
3142 int dreg = alloc_ireg (cfg);
3143 int tmp_reg = alloc_preg (cfg);
3144 int klass_reg = alloc_preg (cfg);
3146 NEW_BBLOCK (cfg, end_bb);
3147 NEW_BBLOCK (cfg, ok_result_bb);
3149 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3152 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3153 NEW_BBLOCK (cfg, interface_fail_bb);
3155 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3156 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3157 MONO_START_BB (cfg, interface_fail_bb);
3158 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3160 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3162 tmp_reg = alloc_preg (cfg);
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3164 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3165 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3167 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3168 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3171 NEW_BBLOCK (cfg, no_proxy_bb);
3173 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3174 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3175 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3177 tmp_reg = alloc_preg (cfg);
3178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3179 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3181 tmp_reg = alloc_preg (cfg);
3182 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3183 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3184 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3186 NEW_BBLOCK (cfg, fail_1_bb);
3188 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3190 MONO_START_BB (cfg, fail_1_bb);
3192 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3193 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3195 MONO_START_BB (cfg, no_proxy_bb);
3197 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3200 MONO_START_BB (cfg, ok_result_bb);
3202 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3204 MONO_START_BB (cfg, end_bb);
3207 MONO_INST_NEW (cfg, ins, OP_ICONST);
3209 ins->type = STACK_I4;
3214 static G_GNUC_UNUSED MonoInst*
3215 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3217 gpointer *trampoline;
3218 MonoInst *obj, *method_ins, *tramp_ins;
3222 obj = handle_alloc (cfg, klass, FALSE);
3224 /* Inline the contents of mono_delegate_ctor */
3226 /* Set target field */
3227 /* Optimize away setting of NULL target */
3228 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3229 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3231 /* Set method field */
3232 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3233 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3236 * To avoid looking up the compiled code belonging to the target method
3237 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3238 * store it, and we fill it after the method has been compiled.
3240 if (!cfg->compile_aot && !method->dynamic) {
3241 MonoInst *code_slot_ins;
3243 domain = mono_domain_get ();
3244 mono_domain_lock (domain);
3245 if (!domain_jit_info (domain)->method_code_hash)
3246 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3247 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3249 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3250 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3252 mono_domain_unlock (domain);
3254 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3255 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3258 /* Set invoke_impl field */
3259 if (cfg->compile_aot) {
3260 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3262 trampoline = mono_create_delegate_trampoline (klass);
3263 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3265 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3267 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3273 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3275 MonoJitICallInfo *info;
3277 /* Need to register the icall so it gets an icall wrapper */
3278 info = mono_get_array_new_va_icall (rank);
3280 cfg->flags |= MONO_CFG_HAS_VARARGS;
3282 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3283 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3287 mono_emit_load_got_addr (MonoCompile *cfg)
3289 MonoInst *getaddr, *dummy_use;
3291 if (!cfg->got_var || cfg->got_var_allocated)
3294 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3295 getaddr->dreg = cfg->got_var->dreg;
3297 /* Add it to the start of the first bblock */
3298 if (cfg->bb_entry->code) {
3299 getaddr->next = cfg->bb_entry->code;
3300 cfg->bb_entry->code = getaddr;
3303 MONO_ADD_INS (cfg->bb_entry, getaddr);
3305 cfg->got_var_allocated = TRUE;
3308 * Add a dummy use to keep the got_var alive, since real uses might
3309 * only be generated by the back ends.
3310 * Add it to end_bblock, so the variable's lifetime covers the whole
3312 * It would be better to make the usage of the got var explicit in all
3313 * cases when the backend needs it (i.e. calls, throw etc.), so this
3314 * wouldn't be needed.
3316 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3317 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3320 static int inline_limit;
3321 static gboolean inline_limit_inited;
3324 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3326 MonoMethodHeader *header;
3328 #ifdef MONO_ARCH_SOFT_FLOAT
3329 MonoMethodSignature *sig = mono_method_signature (method);
3333 if (cfg->generic_sharing_context)
3336 #ifdef MONO_ARCH_HAVE_LMF_OPS
3337 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3338 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3339 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3343 if (method->is_inflated)
3344 /* Avoid inflating the header */
3345 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3347 header = mono_method_get_header (method);
3349 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3350 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3351 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3352 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3353 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3354 (method->klass->marshalbyref) ||
3355 !header || header->num_clauses)
3358 /* also consider num_locals? */
3359 /* Do the size check early to avoid creating vtables */
3360 if (!inline_limit_inited) {
3361 if (getenv ("MONO_INLINELIMIT"))
3362 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3364 inline_limit = INLINE_LENGTH_LIMIT;
3365 inline_limit_inited = TRUE;
3367 if (header->code_size >= inline_limit)
3371 * if we can initialize the class of the method right away, we do,
3372 * otherwise we don't allow inlining if the class needs initialization,
3373 * since it would mean inserting a call to mono_runtime_class_init()
3374 * inside the inlined code
3376 if (!(cfg->opt & MONO_OPT_SHARED)) {
3377 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3378 if (cfg->run_cctors && method->klass->has_cctor) {
3379 if (!method->klass->runtime_info)
3380 /* No vtable created yet */
3382 vtable = mono_class_vtable (cfg->domain, method->klass);
3385 /* This makes so that inline cannot trigger */
3386 /* .cctors: too many apps depend on them */
3387 /* running with a specific order... */
3388 if (! vtable->initialized)
3390 mono_runtime_class_init (vtable);
3392 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3393 if (!method->klass->runtime_info)
3394 /* No vtable created yet */
3396 vtable = mono_class_vtable (cfg->domain, method->klass);
3399 if (!vtable->initialized)
3404 * If we're compiling for shared code
3405 * the cctor will need to be run at aot method load time, for example,
3406 * or at the end of the compilation of the inlining method.
3408 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3413 * CAS - do not inline methods with declarative security
3414 * Note: this has to be before any possible return TRUE;
3416 if (mono_method_has_declsec (method))
3419 #ifdef MONO_ARCH_SOFT_FLOAT
3421 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3423 for (i = 0; i < sig->param_count; ++i)
3424 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3432 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3434 if (vtable->initialized && !cfg->compile_aot)
3437 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3440 if (!mono_class_needs_cctor_run (vtable->klass, method))
3443 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3444 /* The initialization is already done before the method is called */
3451 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3455 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3457 mono_class_init (klass);
3458 size = mono_class_array_element_size (klass);
3460 mult_reg = alloc_preg (cfg);
3461 array_reg = arr->dreg;
3462 index_reg = index->dreg;
3464 #if SIZEOF_REGISTER == 8
3465 /* The array reg is 64 bits but the index reg is only 32 */
3466 index2_reg = alloc_preg (cfg);
3467 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3469 if (index->type == STACK_I8) {
3470 index2_reg = alloc_preg (cfg);
3471 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3473 index2_reg = index_reg;
3477 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3479 #if defined(__i386__) || defined(__x86_64__)
3480 if (size == 1 || size == 2 || size == 4 || size == 8) {
3481 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3483 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3484 ins->type = STACK_PTR;
3490 add_reg = alloc_preg (cfg);
3492 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3493 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3494 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3495 ins->type = STACK_PTR;
3496 MONO_ADD_INS (cfg->cbb, ins);
3501 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3503 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3505 int bounds_reg = alloc_preg (cfg);
3506 int add_reg = alloc_preg (cfg);
3507 int mult_reg = alloc_preg (cfg);
3508 int mult2_reg = alloc_preg (cfg);
3509 int low1_reg = alloc_preg (cfg);
3510 int low2_reg = alloc_preg (cfg);
3511 int high1_reg = alloc_preg (cfg);
3512 int high2_reg = alloc_preg (cfg);
3513 int realidx1_reg = alloc_preg (cfg);
3514 int realidx2_reg = alloc_preg (cfg);
3515 int sum_reg = alloc_preg (cfg);
3520 mono_class_init (klass);
3521 size = mono_class_array_element_size (klass);
3523 index1 = index_ins1->dreg;
3524 index2 = index_ins2->dreg;
3526 /* range checking */
3527 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3528 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3531 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3532 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3534 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3535 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3536 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3538 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3539 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3540 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3542 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3543 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3544 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3546 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3547 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3549 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3550 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3552 ins->type = STACK_MP;
3554 MONO_ADD_INS (cfg->cbb, ins);
3561 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3565 MonoMethod *addr_method;
3568 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3571 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3573 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3574 /* emit_ldelema_2 depends on OP_LMUL */
3575 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3576 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3580 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3581 addr_method = mono_marshal_get_array_address (rank, element_size);
3582 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3588 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3590 MonoInst *ins = NULL;
3592 static MonoClass *runtime_helpers_class = NULL;
3593 if (! runtime_helpers_class)
3594 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3595 "System.Runtime.CompilerServices", "RuntimeHelpers");
3597 if (cmethod->klass == mono_defaults.string_class) {
3598 if (strcmp (cmethod->name, "get_Chars") == 0) {
3599 int dreg = alloc_ireg (cfg);
3600 int index_reg = alloc_preg (cfg);
3601 int mult_reg = alloc_preg (cfg);
3602 int add_reg = alloc_preg (cfg);
3604 #if SIZEOF_REGISTER == 8
3605 /* The array reg is 64 bits but the index reg is only 32 */
3606 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3608 index_reg = args [1]->dreg;
3610 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3612 #if defined(__i386__) || defined(__x86_64__)
3613 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3614 add_reg = ins->dreg;
3615 /* Avoid a warning */
3617 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3620 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3621 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3622 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3623 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3625 type_from_op (ins, NULL, NULL);
3627 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3628 int dreg = alloc_ireg (cfg);
3629 /* Decompose later to allow more optimizations */
3630 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3631 ins->type = STACK_I4;
3632 cfg->cbb->has_array_access = TRUE;
3633 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3636 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3637 int mult_reg = alloc_preg (cfg);
3638 int add_reg = alloc_preg (cfg);
3640 /* The corlib functions check for oob already. */
3641 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3642 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3643 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3646 } else if (cmethod->klass == mono_defaults.object_class) {
3648 if (strcmp (cmethod->name, "GetType") == 0) {
3649 int dreg = alloc_preg (cfg);
3650 int vt_reg = alloc_preg (cfg);
3651 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3652 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3653 type_from_op (ins, NULL, NULL);
3656 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3657 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3658 int dreg = alloc_ireg (cfg);
3659 int t1 = alloc_ireg (cfg);
3661 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3662 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3663 ins->type = STACK_I4;
3667 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3668 MONO_INST_NEW (cfg, ins, OP_NOP);
3669 MONO_ADD_INS (cfg->cbb, ins);
3673 } else if (cmethod->klass == mono_defaults.array_class) {
3674 if (cmethod->name [0] != 'g')
3677 if (strcmp (cmethod->name, "get_Rank") == 0) {
3678 int dreg = alloc_ireg (cfg);
3679 int vtable_reg = alloc_preg (cfg);
3680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3681 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3682 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3683 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3684 type_from_op (ins, NULL, NULL);
3687 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3688 int dreg = alloc_ireg (cfg);
3690 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3691 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3692 type_from_op (ins, NULL, NULL);
3697 } else if (cmethod->klass == runtime_helpers_class) {
3699 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3700 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3704 } else if (cmethod->klass == mono_defaults.thread_class) {
3705 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3706 ins->dreg = alloc_preg (cfg);
3707 ins->type = STACK_OBJ;
3708 MONO_ADD_INS (cfg->cbb, ins);
3710 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3711 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3712 MONO_ADD_INS (cfg->cbb, ins);
3714 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3715 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3716 MONO_ADD_INS (cfg->cbb, ins);
3719 } else if (cmethod->klass == mono_defaults.monitor_class) {
3720 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3721 if (strcmp (cmethod->name, "Enter") == 0) {
3724 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3725 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3726 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3727 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3729 return (MonoInst*)call;
3730 } else if (strcmp (cmethod->name, "Exit") == 0) {
3733 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3734 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3735 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3736 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3738 return (MonoInst*)call;
3740 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3741 MonoMethod *fast_method = NULL;
3743 /* Avoid infinite recursion */
3744 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3745 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3746 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3749 if (strcmp (cmethod->name, "Enter") == 0 ||
3750 strcmp (cmethod->name, "Exit") == 0)
3751 fast_method = mono_monitor_get_fast_path (cmethod);
3755 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3757 } else if (mini_class_is_system_array (cmethod->klass) &&
3758 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3759 MonoInst *addr, *store, *load;
3760 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3762 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3763 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3764 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3766 } else if (cmethod->klass->image == mono_defaults.corlib &&
3767 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3768 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3771 #if SIZEOF_REGISTER == 8
3772 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3773 /* 64 bit reads are already atomic */
3774 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3775 ins->dreg = mono_alloc_preg (cfg);
3776 ins->inst_basereg = args [0]->dreg;
3777 ins->inst_offset = 0;
3778 MONO_ADD_INS (cfg->cbb, ins);
3782 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3783 if (strcmp (cmethod->name, "Increment") == 0) {
3784 MonoInst *ins_iconst;
3787 if (fsig->params [0]->type == MONO_TYPE_I4)
3788 opcode = OP_ATOMIC_ADD_NEW_I4;
3789 #if SIZEOF_REGISTER == 8
3790 else if (fsig->params [0]->type == MONO_TYPE_I8)
3791 opcode = OP_ATOMIC_ADD_NEW_I8;
3794 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3795 ins_iconst->inst_c0 = 1;
3796 ins_iconst->dreg = mono_alloc_ireg (cfg);
3797 MONO_ADD_INS (cfg->cbb, ins_iconst);
3799 MONO_INST_NEW (cfg, ins, opcode);
3800 ins->dreg = mono_alloc_ireg (cfg);
3801 ins->inst_basereg = args [0]->dreg;
3802 ins->inst_offset = 0;
3803 ins->sreg2 = ins_iconst->dreg;
3804 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3805 MONO_ADD_INS (cfg->cbb, ins);
3807 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3808 MonoInst *ins_iconst;
3811 if (fsig->params [0]->type == MONO_TYPE_I4)
3812 opcode = OP_ATOMIC_ADD_NEW_I4;
3813 #if SIZEOF_REGISTER == 8
3814 else if (fsig->params [0]->type == MONO_TYPE_I8)
3815 opcode = OP_ATOMIC_ADD_NEW_I8;
3818 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3819 ins_iconst->inst_c0 = -1;
3820 ins_iconst->dreg = mono_alloc_ireg (cfg);
3821 MONO_ADD_INS (cfg->cbb, ins_iconst);
3823 MONO_INST_NEW (cfg, ins, opcode);
3824 ins->dreg = mono_alloc_ireg (cfg);
3825 ins->inst_basereg = args [0]->dreg;
3826 ins->inst_offset = 0;
3827 ins->sreg2 = ins_iconst->dreg;
3828 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3829 MONO_ADD_INS (cfg->cbb, ins);
3831 } else if (strcmp (cmethod->name, "Add") == 0) {
3834 if (fsig->params [0]->type == MONO_TYPE_I4)
3835 opcode = OP_ATOMIC_ADD_NEW_I4;
3836 #if SIZEOF_REGISTER == 8
3837 else if (fsig->params [0]->type == MONO_TYPE_I8)
3838 opcode = OP_ATOMIC_ADD_NEW_I8;
3842 MONO_INST_NEW (cfg, ins, opcode);
3843 ins->dreg = mono_alloc_ireg (cfg);
3844 ins->inst_basereg = args [0]->dreg;
3845 ins->inst_offset = 0;
3846 ins->sreg2 = args [1]->dreg;
3847 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3848 MONO_ADD_INS (cfg->cbb, ins);
3851 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3853 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3854 if (strcmp (cmethod->name, "Exchange") == 0) {
3857 if (fsig->params [0]->type == MONO_TYPE_I4)
3858 opcode = OP_ATOMIC_EXCHANGE_I4;
3859 #if SIZEOF_REGISTER == 8
3860 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3861 (fsig->params [0]->type == MONO_TYPE_I) ||
3862 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3863 opcode = OP_ATOMIC_EXCHANGE_I8;
3865 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3866 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3867 opcode = OP_ATOMIC_EXCHANGE_I4;
3872 MONO_INST_NEW (cfg, ins, opcode);
3873 ins->dreg = mono_alloc_ireg (cfg);
3874 ins->inst_basereg = args [0]->dreg;
3875 ins->inst_offset = 0;
3876 ins->sreg2 = args [1]->dreg;
3877 MONO_ADD_INS (cfg->cbb, ins);
3879 switch (fsig->params [0]->type) {
3881 ins->type = STACK_I4;
3885 ins->type = STACK_I8;
3887 case MONO_TYPE_OBJECT:
3888 ins->type = STACK_OBJ;
3891 g_assert_not_reached ();
3894 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3896 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
3897 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3899 if (fsig->params [1]->type == MONO_TYPE_I4)
3901 else if (fsig->params [1]->type == MONO_TYPE_I || MONO_TYPE_IS_REFERENCE (fsig->params [1]))
3902 size = sizeof (gpointer);
3903 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
3906 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
3907 ins->dreg = alloc_ireg (cfg);
3908 ins->sreg1 = args [0]->dreg;
3909 ins->sreg2 = args [1]->dreg;
3910 ins->sreg3 = args [2]->dreg;
3911 ins->type = STACK_I4;
3912 MONO_ADD_INS (cfg->cbb, ins);
3913 } else if (size == 8) {
3914 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
3915 ins->dreg = alloc_ireg (cfg);
3916 ins->sreg1 = args [0]->dreg;
3917 ins->sreg2 = args [1]->dreg;
3918 ins->sreg3 = args [2]->dreg;
3919 ins->type = STACK_I8;
3920 MONO_ADD_INS (cfg->cbb, ins);
3922 /* g_assert_not_reached (); */
3925 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
3929 } else if (cmethod->klass->image == mono_defaults.corlib) {
3930 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3931 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3932 MONO_INST_NEW (cfg, ins, OP_BREAK);
3933 MONO_ADD_INS (cfg->cbb, ins);
3936 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3937 && strcmp (cmethod->klass->name, "Environment") == 0) {
3938 #ifdef PLATFORM_WIN32
3939 EMIT_NEW_ICONST (cfg, ins, 1);
3941 EMIT_NEW_ICONST (cfg, ins, 0);
3945 } else if (cmethod->klass == mono_defaults.math_class) {
3947 * There is general branches code for Min/Max, but it does not work for
3949 * http://everything2.com/?node_id=1051618
3953 #ifdef MONO_ARCH_SIMD_INTRINSICS
3954 if (cfg->opt & MONO_OPT_SIMD) {
3955 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
3961 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
3965 * This entry point could be used later for arbitrary method
3968 inline static MonoInst*
3969 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
3970 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
3972 if (method->klass == mono_defaults.string_class) {
3973 /* managed string allocation support */
3974 if (strcmp (method->name, "InternalAllocateStr") == 0) {
3975 MonoInst *iargs [2];
3976 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3977 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
3980 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3981 iargs [1] = args [0];
3982 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
3989 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
3991 MonoInst *store, *temp;
3994 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3995 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
3998 * FIXME: We should use *args++ = sp [0], but that would mean the arg
3999 * would be different than the MonoInst's used to represent arguments, and
4000 * the ldelema implementation can't deal with that.
4001 * Solution: When ldelema is used on an inline argument, create a var for
4002 * it, emit ldelema on that var, and emit the saving code below in
4003 * inline_method () if needed.
4005 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4006 cfg->args [i] = temp;
4007 /* This uses cfg->args [i] which is set by the preceeding line */
4008 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4009 store->cil_code = sp [0]->cil_code;
4014 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4015 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4017 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4019 check_inline_called_method_name_limit (MonoMethod *called_method)
4022 static char *limit = NULL;
4024 if (limit == NULL) {
4025 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4027 if (limit_string != NULL)
4028 limit = limit_string;
4030 limit = (char *) "";
4033 if (limit [0] != '\0') {
4034 char *called_method_name = mono_method_full_name (called_method, TRUE);
4036 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4037 g_free (called_method_name);
4039 //return (strncmp_result <= 0);
4040 return (strncmp_result == 0);
4047 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4049 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4052 static char *limit = NULL;
4054 if (limit == NULL) {
4055 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4056 if (limit_string != NULL) {
4057 limit = limit_string;
4059 limit = (char *) "";
4063 if (limit [0] != '\0') {
4064 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4066 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4067 g_free (caller_method_name);
4069 //return (strncmp_result <= 0);
4070 return (strncmp_result == 0);
4078 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4079 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4081 MonoInst *ins, *rvar = NULL;
4082 MonoMethodHeader *cheader;
4083 MonoBasicBlock *ebblock, *sbblock;
4085 MonoMethod *prev_inlined_method;
4086 MonoInst **prev_locals, **prev_args;
4087 MonoType **prev_arg_types;
4088 guint prev_real_offset;
4089 GHashTable *prev_cbb_hash;
4090 MonoBasicBlock **prev_cil_offset_to_bb;
4091 MonoBasicBlock *prev_cbb;
4092 unsigned char* prev_cil_start;
4093 guint32 prev_cil_offset_to_bb_len;
4094 MonoMethod *prev_current_method;
4095 MonoGenericContext *prev_generic_context;
4096 gboolean ret_var_set, prev_ret_var_set;
4098 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4100 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4101 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4104 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4105 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4109 if (cfg->verbose_level > 2)
4110 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4112 if (!cmethod->inline_info) {
4113 mono_jit_stats.inlineable_methods++;
4114 cmethod->inline_info = 1;
4116 /* allocate space to store the return value */
4117 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4118 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4121 /* allocate local variables */
4122 cheader = mono_method_get_header (cmethod);
4123 prev_locals = cfg->locals;
4124 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4125 for (i = 0; i < cheader->num_locals; ++i)
4126 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4128 /* allocate start and end blocks */
4129 /* This is needed so if the inline is aborted, we can clean up */
4130 NEW_BBLOCK (cfg, sbblock);
4131 sbblock->real_offset = real_offset;
4133 NEW_BBLOCK (cfg, ebblock);
4134 ebblock->block_num = cfg->num_bblocks++;
4135 ebblock->real_offset = real_offset;
4137 prev_args = cfg->args;
4138 prev_arg_types = cfg->arg_types;
4139 prev_inlined_method = cfg->inlined_method;
4140 cfg->inlined_method = cmethod;
4141 cfg->ret_var_set = FALSE;
4142 prev_real_offset = cfg->real_offset;
4143 prev_cbb_hash = cfg->cbb_hash;
4144 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4145 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4146 prev_cil_start = cfg->cil_start;
4147 prev_cbb = cfg->cbb;
4148 prev_current_method = cfg->current_method;
4149 prev_generic_context = cfg->generic_context;
4150 prev_ret_var_set = cfg->ret_var_set;
4152 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4154 ret_var_set = cfg->ret_var_set;
4156 cfg->inlined_method = prev_inlined_method;
4157 cfg->real_offset = prev_real_offset;
4158 cfg->cbb_hash = prev_cbb_hash;
4159 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4160 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4161 cfg->cil_start = prev_cil_start;
4162 cfg->locals = prev_locals;
4163 cfg->args = prev_args;
4164 cfg->arg_types = prev_arg_types;
4165 cfg->current_method = prev_current_method;
4166 cfg->generic_context = prev_generic_context;
4167 cfg->ret_var_set = prev_ret_var_set;
4169 if ((costs >= 0 && costs < 60) || inline_allways) {
4170 if (cfg->verbose_level > 2)
4171 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4173 mono_jit_stats.inlined_methods++;
4175 /* always add some code to avoid block split failures */
4176 MONO_INST_NEW (cfg, ins, OP_NOP);
4177 MONO_ADD_INS (prev_cbb, ins);
4179 prev_cbb->next_bb = sbblock;
4180 link_bblock (cfg, prev_cbb, sbblock);
4183 * Get rid of the begin and end bblocks if possible to aid local
4186 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4188 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4189 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4191 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4192 MonoBasicBlock *prev = ebblock->in_bb [0];
4193 mono_merge_basic_blocks (cfg, prev, ebblock);
4195 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4196 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4197 cfg->cbb = prev_cbb;
4205 * If the inlined method contains only a throw, then the ret var is not
4206 * set, so set it to a dummy value.
4209 static double r8_0 = 0.0;
4211 switch (rvar->type) {
4213 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4216 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4221 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4224 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4225 ins->type = STACK_R8;
4226 ins->inst_p0 = (void*)&r8_0;
4227 ins->dreg = rvar->dreg;
4228 MONO_ADD_INS (cfg->cbb, ins);
4231 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4234 g_assert_not_reached ();
4238 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4243 if (cfg->verbose_level > 2)
4244 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4245 cfg->exception_type = MONO_EXCEPTION_NONE;
4246 mono_loader_clear_error ();
4248 /* This gets rid of the newly added bblocks */
4249 cfg->cbb = prev_cbb;
4255 * Some of these comments may well be out-of-date.
4256 * Design decisions: we do a single pass over the IL code (and we do bblock
4257 * splitting/merging in the few cases when it's required: a back jump to an IL
4258 * address that was not already seen as bblock starting point).
4259 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4260 * Complex operations are decomposed in simpler ones right away. We need to let the
4261 * arch-specific code peek and poke inside this process somehow (except when the
4262 * optimizations can take advantage of the full semantic info of coarse opcodes).
4263 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4264 * MonoInst->opcode initially is the IL opcode or some simplification of that
4265 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4266 * opcode with value bigger than OP_LAST.
4267 * At this point the IR can be handed over to an interpreter, a dumb code generator
4268 * or to the optimizing code generator that will translate it to SSA form.
4270 * Profiling directed optimizations.
4271 * We may compile by default with few or no optimizations and instrument the code
4272 * or the user may indicate what methods to optimize the most either in a config file
4273 * or through repeated runs where the compiler applies offline the optimizations to
4274 * each method and then decides if it was worth it.
4277 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4278 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4279 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4280 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4281 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4282 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4283 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4284 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4286 /* offset from br.s -> br like opcodes */
4287 #define BIG_BRANCH_OFFSET 13
4290 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4292 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4294 return b == NULL || b == bb;
4298 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4300 unsigned char *ip = start;
4301 unsigned char *target;
4304 MonoBasicBlock *bblock;
4305 const MonoOpcode *opcode;
4308 cli_addr = ip - start;
4309 i = mono_opcode_value ((const guint8 **)&ip, end);
4312 opcode = &mono_opcodes [i];
4313 switch (opcode->argument) {
4314 case MonoInlineNone:
4317 case MonoInlineString:
4318 case MonoInlineType:
4319 case MonoInlineField:
4320 case MonoInlineMethod:
4323 case MonoShortInlineR:
4330 case MonoShortInlineVar:
4331 case MonoShortInlineI:
4334 case MonoShortInlineBrTarget:
4335 target = start + cli_addr + 2 + (signed char)ip [1];
4336 GET_BBLOCK (cfg, bblock, target);
4339 GET_BBLOCK (cfg, bblock, ip);
4341 case MonoInlineBrTarget:
4342 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4343 GET_BBLOCK (cfg, bblock, target);
4346 GET_BBLOCK (cfg, bblock, ip);
4348 case MonoInlineSwitch: {
4349 guint32 n = read32 (ip + 1);
4352 cli_addr += 5 + 4 * n;
4353 target = start + cli_addr;
4354 GET_BBLOCK (cfg, bblock, target);
4356 for (j = 0; j < n; ++j) {
4357 target = start + cli_addr + (gint32)read32 (ip);
4358 GET_BBLOCK (cfg, bblock, target);
4368 g_assert_not_reached ();
4371 if (i == CEE_THROW) {
4372 unsigned char *bb_start = ip - 1;
4374 /* Find the start of the bblock containing the throw */
4376 while ((bb_start >= start) && !bblock) {
4377 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4381 bblock->out_of_line = 1;
4390 static inline MonoMethod *
4391 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4395 if (m->wrapper_type != MONO_WRAPPER_NONE)
4396 return mono_method_get_wrapper_data (m, token);
4398 method = mono_get_method_full (m->klass->image, token, klass, context);
4403 static inline MonoMethod *
4404 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4406 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4408 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4414 static inline MonoClass*
4415 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4419 if (method->wrapper_type != MONO_WRAPPER_NONE)
4420 klass = mono_method_get_wrapper_data (method, token);
4422 klass = mono_class_get_full (method->klass->image, token, context);
4424 mono_class_init (klass);
4429 * Returns TRUE if the JIT should abort inlining because "callee"
4430 * is influenced by security attributes.
4433 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4437 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4441 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4442 if (result == MONO_JIT_SECURITY_OK)
4445 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4446 /* Generate code to throw a SecurityException before the actual call/link */
4447 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4450 NEW_ICONST (cfg, args [0], 4);
4451 NEW_METHODCONST (cfg, args [1], caller);
4452 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4453 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4454 /* don't hide previous results */
4455 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4456 cfg->exception_data = result;
4464 method_access_exception (void)
4466 static MonoMethod *method = NULL;
4469 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4470 method = mono_class_get_method_from_name (secman->securitymanager,
4471 "MethodAccessException", 2);
4478 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4479 MonoBasicBlock *bblock, unsigned char *ip)
4481 MonoMethod *thrower = method_access_exception ();
4484 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4485 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4486 mono_emit_method_call (cfg, thrower, args, NULL);
4490 field_access_exception (void)
4492 static MonoMethod *method = NULL;
4495 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4496 method = mono_class_get_method_from_name (secman->securitymanager,
4497 "FieldAccessException", 2);
4504 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4505 MonoBasicBlock *bblock, unsigned char *ip)
4507 MonoMethod *thrower = field_access_exception ();
4510 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4511 EMIT_NEW_METHODCONST (cfg, args [1], field);
4512 mono_emit_method_call (cfg, thrower, args, NULL);
4516 * Return the original method is a wrapper is specified. We can only access
4517 * the custom attributes from the original method.
4520 get_original_method (MonoMethod *method)
4522 if (method->wrapper_type == MONO_WRAPPER_NONE)
4525 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4526 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4529 /* in other cases we need to find the original method */
4530 return mono_marshal_method_from_wrapper (method);
4534 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4535 MonoBasicBlock *bblock, unsigned char *ip)
4537 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4538 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4541 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4542 caller = get_original_method (caller);
4546 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4547 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4548 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4552 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4553 MonoBasicBlock *bblock, unsigned char *ip)
4555 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4556 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4559 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4560 caller = get_original_method (caller);
4564 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4565 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4566 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4570 * Check that the IL instructions at ip are the array initialization
4571 * sequence and return the pointer to the data and the size.
4574 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4577 * newarr[System.Int32]
4579 * ldtoken field valuetype ...
4580 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4582 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4583 guint32 token = read32 (ip + 7);
4584 guint32 field_token = read32 (ip + 2);
4585 guint32 field_index = field_token & 0xffffff;
4587 const char *data_ptr;
4589 MonoMethod *cmethod;
4590 MonoClass *dummy_class;
4591 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4597 *out_field_token = field_token;
4599 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4602 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4604 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4605 case MONO_TYPE_BOOLEAN:
4609 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4610 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4611 case MONO_TYPE_CHAR:
4621 return NULL; /* stupid ARM FP swapped format */
4631 if (size > mono_type_size (field->type, &dummy_align))
4634 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4635 if (!method->klass->image->dynamic) {
4636 field_index = read32 (ip + 2) & 0xffffff;
4637 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4638 data_ptr = mono_image_rva_map (method->klass->image, rva);
4639 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4640 /* for aot code we do the lookup on load */
4641 if (aot && data_ptr)
4642 return GUINT_TO_POINTER (rva);
4644 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4646 data_ptr = mono_field_get_data (field);
4654 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4656 char *method_fname = mono_method_full_name (method, TRUE);
4659 if (mono_method_get_header (method)->code_size == 0)
4660 method_code = g_strdup ("method body is empty.");
4662 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4663 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4664 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4665 g_free (method_fname);
4666 g_free (method_code);
4670 set_exception_object (MonoCompile *cfg, MonoException *exception)
4672 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4673 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4674 cfg->exception_ptr = exception;
4678 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4682 if (cfg->generic_sharing_context)
4683 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4685 type = &klass->byval_arg;
4686 return MONO_TYPE_IS_REFERENCE (type);
4690 * mono_decompose_array_access_opts:
4692 * Decompose array access opcodes.
4693 * This should be in decompose.c, but it emits calls so it has to stay here until
4694 * the old JIT is gone.
4697 mono_decompose_array_access_opts (MonoCompile *cfg)
4699 MonoBasicBlock *bb, *first_bb;
4702 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4703 * can be executed anytime. It should be run before decompose_long
4707 * Create a dummy bblock and emit code into it so we can use the normal
4708 * code generation macros.
4710 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4711 first_bb = cfg->cbb;
4713 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4715 MonoInst *prev = NULL;
4717 MonoInst *iargs [3];
4720 if (!bb->has_array_access)
4723 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4725 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4731 for (ins = bb->code; ins; ins = ins->next) {
4732 switch (ins->opcode) {
4734 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4735 G_STRUCT_OFFSET (MonoArray, max_length));
4736 MONO_ADD_INS (cfg->cbb, dest);
4738 case OP_BOUNDS_CHECK:
4739 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4742 if (cfg->opt & MONO_OPT_SHARED) {
4743 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4744 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4745 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4746 iargs [2]->dreg = ins->sreg1;
4748 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4749 dest->dreg = ins->dreg;
4751 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4754 NEW_VTABLECONST (cfg, iargs [0], vtable);
4755 MONO_ADD_INS (cfg->cbb, iargs [0]);
4756 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4757 iargs [1]->dreg = ins->sreg1;
4759 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4760 dest->dreg = ins->dreg;
4764 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4765 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4766 MONO_ADD_INS (cfg->cbb, dest);
4772 g_assert (cfg->cbb == first_bb);
4774 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4775 /* Replace the original instruction with the new code sequence */
4777 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4778 first_bb->code = first_bb->last_ins = NULL;
4779 first_bb->in_count = first_bb->out_count = 0;
4780 cfg->cbb = first_bb;
4787 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4797 #ifdef MONO_ARCH_SOFT_FLOAT
4800 * mono_decompose_soft_float:
4802 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4803 * similar to long support on 32 bit platforms. 32 bit float values require special
4804 * handling when used as locals, arguments, and in calls.
4805 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4808 mono_decompose_soft_float (MonoCompile *cfg)
4810 MonoBasicBlock *bb, *first_bb;
4813 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4817 * Create a dummy bblock and emit code into it so we can use the normal
4818 * code generation macros.
4820 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4821 first_bb = cfg->cbb;
4823 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4825 MonoInst *prev = NULL;
4828 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4830 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4836 for (ins = bb->code; ins; ins = ins->next) {
4837 const char *spec = INS_INFO (ins->opcode);
4839 /* Most fp operations are handled automatically by opcode emulation */
4841 switch (ins->opcode) {
4844 d.vald = *(double*)ins->inst_p0;
4845 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4850 /* We load the r8 value */
4851 d.vald = *(float*)ins->inst_p0;
4852 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4856 ins->opcode = OP_LMOVE;
4859 ins->opcode = OP_MOVE;
4860 ins->sreg1 = ins->sreg1 + 1;
4863 ins->opcode = OP_MOVE;
4864 ins->sreg1 = ins->sreg1 + 2;
4867 int reg = ins->sreg1;
4869 ins->opcode = OP_SETLRET;
4871 ins->sreg1 = reg + 1;
4872 ins->sreg2 = reg + 2;
4875 case OP_LOADR8_MEMBASE:
4876 ins->opcode = OP_LOADI8_MEMBASE;
4878 case OP_STORER8_MEMBASE_REG:
4879 ins->opcode = OP_STOREI8_MEMBASE_REG;
4881 case OP_STORER4_MEMBASE_REG: {
4882 MonoInst *iargs [2];
4885 /* Arg 1 is the double value */
4886 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4887 iargs [0]->dreg = ins->sreg1;
4889 /* Arg 2 is the address to store to */
4890 addr_reg = mono_alloc_preg (cfg);
4891 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4892 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4896 case OP_LOADR4_MEMBASE: {
4897 MonoInst *iargs [1];
4901 addr_reg = mono_alloc_preg (cfg);
4902 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4903 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4904 conv->dreg = ins->dreg;
4909 case OP_FCALL_MEMBASE: {
4910 MonoCallInst *call = (MonoCallInst*)ins;
4911 if (call->signature->ret->type == MONO_TYPE_R4) {
4912 MonoCallInst *call2;
4913 MonoInst *iargs [1];
4916 /* Convert the call into a call returning an int */
4917 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4918 memcpy (call2, call, sizeof (MonoCallInst));
4919 switch (ins->opcode) {
4921 call2->inst.opcode = OP_CALL;
4924 call2->inst.opcode = OP_CALL_REG;
4926 case OP_FCALL_MEMBASE:
4927 call2->inst.opcode = OP_CALL_MEMBASE;
4930 g_assert_not_reached ();
4932 call2->inst.dreg = mono_alloc_ireg (cfg);
4933 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4935 /* FIXME: Optimize this */
4937 /* Emit an r4->r8 conversion */
4938 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4939 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4940 conv->dreg = ins->dreg;
4942 switch (ins->opcode) {
4944 ins->opcode = OP_LCALL;
4947 ins->opcode = OP_LCALL_REG;
4949 case OP_FCALL_MEMBASE:
4950 ins->opcode = OP_LCALL_MEMBASE;
4953 g_assert_not_reached ();
4959 MonoJitICallInfo *info;
4960 MonoInst *iargs [2];
4961 MonoInst *call, *cmp, *br;
4963 /* Convert fcompare+fbcc to icall+icompare+beq */
4965 info = mono_find_jit_opcode_emulation (ins->next->opcode);
4968 /* Create dummy MonoInst's for the arguments */
4969 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4970 iargs [0]->dreg = ins->sreg1;
4971 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
4972 iargs [1]->dreg = ins->sreg2;
4974 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
4976 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
4977 cmp->sreg1 = call->dreg;
4979 MONO_ADD_INS (cfg->cbb, cmp);
4981 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
4982 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
4983 br->inst_true_bb = ins->next->inst_true_bb;
4984 br->inst_false_bb = ins->next->inst_false_bb;
4985 MONO_ADD_INS (cfg->cbb, br);
4987 /* The call sequence might include fp ins */
4990 /* Skip fbcc or fccc */
4991 NULLIFY_INS (ins->next);
4999 MonoJitICallInfo *info;
5000 MonoInst *iargs [2];
5003 /* Convert fccc to icall+icompare+iceq */
5005 info = mono_find_jit_opcode_emulation (ins->opcode);
5008 /* Create dummy MonoInst's for the arguments */
5009 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5010 iargs [0]->dreg = ins->sreg1;
5011 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5012 iargs [1]->dreg = ins->sreg2;
5014 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5016 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5017 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5019 /* The call sequence might include fp ins */
5024 MonoInst *iargs [2];
5025 MonoInst *call, *cmp;
5027 /* Convert to icall+icompare+cond_exc+move */
5029 /* Create dummy MonoInst's for the arguments */
5030 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5031 iargs [0]->dreg = ins->sreg1;
5033 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5035 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5036 cmp->sreg1 = call->dreg;
5038 MONO_ADD_INS (cfg->cbb, cmp);
5040 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5042 /* Do the assignment if the value is finite */
5043 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5049 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5050 mono_print_ins (ins);
5051 g_assert_not_reached ();
5056 g_assert (cfg->cbb == first_bb);
5058 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5059 /* Replace the original instruction with the new code sequence */
5061 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5062 first_bb->code = first_bb->last_ins = NULL;
5063 first_bb->in_count = first_bb->out_count = 0;
5064 cfg->cbb = first_bb;
5071 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5074 mono_decompose_long_opts (cfg);
5080 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5083 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5084 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5085 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5086 /* Optimize reg-reg moves away */
5088 * Can't optimize other opcodes, since sp[0] might point to
5089 * the last ins of a decomposed opcode.
5091 sp [0]->dreg = (cfg)->locals [n]->dreg;
5093 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5098 * ldloca inhibits many optimizations so try to get rid of it in common
5101 static inline unsigned char *
5102 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5111 local = read16 (ip + 2);
5115 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5116 gboolean skip = FALSE;
5118 /* From the INITOBJ case */
5119 token = read32 (ip + 2);
5120 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5121 CHECK_TYPELOAD (klass);
5122 if (generic_class_is_reference_type (cfg, klass)) {
5123 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5124 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5125 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5126 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5127 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5140 is_exception_class (MonoClass *class)
5143 if (class == mono_defaults.exception_class)
5145 class = class->parent;
5151 * mono_method_to_ir:
5153 * Translate the .net IL into linear IR.
5156 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5157 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5158 guint inline_offset, gboolean is_virtual_call)
5160 MonoInst *ins, **sp, **stack_start;
5161 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5162 MonoMethod *cmethod, *method_definition;
5163 MonoInst **arg_array;
5164 MonoMethodHeader *header;
5166 guint32 token, ins_flag;
5168 MonoClass *constrained_call = NULL;
5169 unsigned char *ip, *end, *target, *err_pos;
5170 static double r8_0 = 0.0;
5171 MonoMethodSignature *sig;
5172 MonoGenericContext *generic_context = NULL;
5173 MonoGenericContainer *generic_container = NULL;
5174 MonoType **param_types;
5175 int i, n, start_new_bblock, dreg;
5176 int num_calls = 0, inline_costs = 0;
5177 int breakpoint_id = 0;
5179 MonoBoolean security, pinvoke;
5180 MonoSecurityManager* secman = NULL;
5181 MonoDeclSecurityActions actions;
5182 GSList *class_inits = NULL;
5183 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5186 /* serialization and xdomain stuff may need access to private fields and methods */
5187 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5188 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5189 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5190 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5191 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5192 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5194 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5196 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5197 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5198 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5199 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5201 image = method->klass->image;
5202 header = mono_method_get_header (method);
5203 generic_container = mono_method_get_generic_container (method);
5204 sig = mono_method_signature (method);
5205 num_args = sig->hasthis + sig->param_count;
5206 ip = (unsigned char*)header->code;
5207 cfg->cil_start = ip;
5208 end = ip + header->code_size;
5209 mono_jit_stats.cil_code_size += header->code_size;
5211 method_definition = method;
5212 while (method_definition->is_inflated) {
5213 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5214 method_definition = imethod->declaring;
5217 /* SkipVerification is not allowed if core-clr is enabled */
5218 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5220 dont_verify_stloc = TRUE;
5223 if (!dont_verify && mini_method_verify (cfg, method_definition))
5224 goto exception_exit;
5226 if (mono_debug_using_mono_debugger ())
5227 cfg->keep_cil_nops = TRUE;
5229 if (sig->is_inflated)
5230 generic_context = mono_method_get_context (method);
5231 else if (generic_container)
5232 generic_context = &generic_container->context;
5233 cfg->generic_context = generic_context;
5235 if (!cfg->generic_sharing_context)
5236 g_assert (!sig->has_type_parameters);
5238 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5239 g_assert (method->is_inflated);
5240 g_assert (mono_method_get_context (method)->method_inst);
5242 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5243 g_assert (sig->generic_param_count);
5245 if (cfg->method == method) {
5246 cfg->real_offset = 0;
5248 cfg->real_offset = inline_offset;
5251 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5252 cfg->cil_offset_to_bb_len = header->code_size;
5254 cfg->current_method = method;
5256 if (cfg->verbose_level > 2)
5257 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5259 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5261 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5262 for (n = 0; n < sig->param_count; ++n)
5263 param_types [n + sig->hasthis] = sig->params [n];
5264 cfg->arg_types = param_types;
5266 dont_inline = g_list_prepend (dont_inline, method);
5267 if (cfg->method == method) {
5269 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5270 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5273 NEW_BBLOCK (cfg, start_bblock);
5274 cfg->bb_entry = start_bblock;
5275 start_bblock->cil_code = NULL;
5276 start_bblock->cil_length = 0;
5279 NEW_BBLOCK (cfg, end_bblock);
5280 cfg->bb_exit = end_bblock;
5281 end_bblock->cil_code = NULL;
5282 end_bblock->cil_length = 0;
5283 g_assert (cfg->num_bblocks == 2);
5285 arg_array = cfg->args;
5287 if (header->num_clauses) {
5288 cfg->spvars = g_hash_table_new (NULL, NULL);
5289 cfg->exvars = g_hash_table_new (NULL, NULL);
5291 /* handle exception clauses */
5292 for (i = 0; i < header->num_clauses; ++i) {
5293 MonoBasicBlock *try_bb;
5294 MonoExceptionClause *clause = &header->clauses [i];
5295 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5296 try_bb->real_offset = clause->try_offset;
5297 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5298 tblock->real_offset = clause->handler_offset;
5299 tblock->flags |= BB_EXCEPTION_HANDLER;
5301 link_bblock (cfg, try_bb, tblock);
5303 if (*(ip + clause->handler_offset) == CEE_POP)
5304 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5306 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5307 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5308 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5309 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5310 MONO_ADD_INS (tblock, ins);
5312 /* todo: is a fault block unsafe to optimize? */
5313 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5314 tblock->flags |= BB_EXCEPTION_UNSAFE;
5318 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5320 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5322 /* catch and filter blocks get the exception object on the stack */
5323 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5324 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5325 MonoInst *dummy_use;
5327 /* mostly like handle_stack_args (), but just sets the input args */
5328 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5329 tblock->in_scount = 1;
5330 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5331 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5334 * Add a dummy use for the exvar so its liveness info will be
5338 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5340 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5341 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5342 tblock->flags |= BB_EXCEPTION_HANDLER;
5343 tblock->real_offset = clause->data.filter_offset;
5344 tblock->in_scount = 1;
5345 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5346 /* The filter block shares the exvar with the handler block */
5347 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5348 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5349 MONO_ADD_INS (tblock, ins);
5353 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5354 clause->data.catch_class &&
5355 cfg->generic_sharing_context &&
5356 mono_class_check_context_used (clause->data.catch_class)) {
5358 * In shared generic code with catch
5359 * clauses containing type variables
5360 * the exception handling code has to
5361 * be able to get to the rgctx.
5362 * Therefore we have to make sure that
5363 * the vtable/mrgctx argument (for
5364 * static or generic methods) or the
5365 * "this" argument (for non-static
5366 * methods) are live.
5368 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5369 mini_method_get_context (method)->method_inst ||
5370 method->klass->valuetype) {
5371 mono_get_vtable_var (cfg);
5373 MonoInst *dummy_use;
5375 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5380 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5381 cfg->cbb = start_bblock;
5382 cfg->args = arg_array;
5383 mono_save_args (cfg, sig, inline_args);
5386 /* FIRST CODE BLOCK */
5387 NEW_BBLOCK (cfg, bblock);
5388 bblock->cil_code = ip;
5392 ADD_BBLOCK (cfg, bblock);
5394 if (cfg->method == method) {
5395 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5396 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5397 MONO_INST_NEW (cfg, ins, OP_BREAK);
5398 MONO_ADD_INS (bblock, ins);
5402 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5403 secman = mono_security_manager_get_methods ();
5405 security = (secman && mono_method_has_declsec (method));
5406 /* at this point having security doesn't mean we have any code to generate */
5407 if (security && (cfg->method == method)) {
5408 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5409 * And we do not want to enter the next section (with allocation) if we
5410 * have nothing to generate */
5411 security = mono_declsec_get_demands (method, &actions);
5414 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5415 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5417 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5418 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5419 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5421 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5422 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5426 mono_custom_attrs_free (custom);
5429 custom = mono_custom_attrs_from_class (wrapped->klass);
5430 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5434 mono_custom_attrs_free (custom);
5437 /* not a P/Invoke after all */
5442 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5443 /* we use a separate basic block for the initialization code */
5444 NEW_BBLOCK (cfg, init_localsbb);
5445 cfg->bb_init = init_localsbb;
5446 init_localsbb->real_offset = cfg->real_offset;
5447 start_bblock->next_bb = init_localsbb;
5448 init_localsbb->next_bb = bblock;
5449 link_bblock (cfg, start_bblock, init_localsbb);
5450 link_bblock (cfg, init_localsbb, bblock);
5452 cfg->cbb = init_localsbb;
5454 start_bblock->next_bb = bblock;
5455 link_bblock (cfg, start_bblock, bblock);
5458 /* at this point we know, if security is TRUE, that some code needs to be generated */
5459 if (security && (cfg->method == method)) {
5462 mono_jit_stats.cas_demand_generation++;
5464 if (actions.demand.blob) {
5465 /* Add code for SecurityAction.Demand */
5466 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5467 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5468 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5469 mono_emit_method_call (cfg, secman->demand, args, NULL);
5471 if (actions.noncasdemand.blob) {
5472 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5473 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5474 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5475 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5476 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5477 mono_emit_method_call (cfg, secman->demand, args, NULL);
5479 if (actions.demandchoice.blob) {
5480 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5481 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5482 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5483 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5484 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5488 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5490 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5493 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5494 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5495 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5496 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5497 if (!(method->klass && method->klass->image &&
5498 mono_security_core_clr_is_platform_image (method->klass->image))) {
5499 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5505 if (header->code_size == 0)
5508 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5513 if (cfg->method == method)
5514 mono_debug_init_method (cfg, bblock, breakpoint_id);
5516 for (n = 0; n < header->num_locals; ++n) {
5517 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5522 /* We force the vtable variable here for all shared methods
5523 for the possibility that they might show up in a stack
5524 trace where their exact instantiation is needed. */
5525 if (cfg->generic_sharing_context && method == cfg->method) {
5526 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5527 mini_method_get_context (method)->method_inst ||
5528 method->klass->valuetype) {
5529 mono_get_vtable_var (cfg);
5531 /* FIXME: Is there a better way to do this?
5532 We need the variable live for the duration
5533 of the whole method. */
5534 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5538 /* add a check for this != NULL to inlined methods */
5539 if (is_virtual_call) {
5542 NEW_ARGLOAD (cfg, arg_ins, 0);
5543 MONO_ADD_INS (cfg->cbb, arg_ins);
5544 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5545 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5546 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5549 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5550 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5553 start_new_bblock = 0;
5557 if (cfg->method == method)
5558 cfg->real_offset = ip - header->code;
5560 cfg->real_offset = inline_offset;
5565 if (start_new_bblock) {
5566 bblock->cil_length = ip - bblock->cil_code;
5567 if (start_new_bblock == 2) {
5568 g_assert (ip == tblock->cil_code);
5570 GET_BBLOCK (cfg, tblock, ip);
5572 bblock->next_bb = tblock;
5575 start_new_bblock = 0;
5576 for (i = 0; i < bblock->in_scount; ++i) {
5577 if (cfg->verbose_level > 3)
5578 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5579 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5583 g_slist_free (class_inits);
5586 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5587 link_bblock (cfg, bblock, tblock);
5588 if (sp != stack_start) {
5589 handle_stack_args (cfg, stack_start, sp - stack_start);
5591 CHECK_UNVERIFIABLE (cfg);
5593 bblock->next_bb = tblock;
5596 for (i = 0; i < bblock->in_scount; ++i) {
5597 if (cfg->verbose_level > 3)
5598 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5599 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5602 g_slist_free (class_inits);
5607 bblock->real_offset = cfg->real_offset;
5609 if ((cfg->method == method) && cfg->coverage_info) {
5610 guint32 cil_offset = ip - header->code;
5611 cfg->coverage_info->data [cil_offset].cil_code = ip;
5613 /* TODO: Use an increment here */
5614 #if defined(__i386__)
5615 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5616 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5618 MONO_ADD_INS (cfg->cbb, ins);
5620 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5621 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5625 if (cfg->verbose_level > 3)
5626 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5630 if (cfg->keep_cil_nops)
5631 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5633 MONO_INST_NEW (cfg, ins, OP_NOP);
5635 MONO_ADD_INS (bblock, ins);
5638 MONO_INST_NEW (cfg, ins, OP_BREAK);
5640 MONO_ADD_INS (bblock, ins);
5646 CHECK_STACK_OVF (1);
5647 n = (*ip)-CEE_LDARG_0;
5649 EMIT_NEW_ARGLOAD (cfg, ins, n);
5657 CHECK_STACK_OVF (1);
5658 n = (*ip)-CEE_LDLOC_0;
5660 EMIT_NEW_LOCLOAD (cfg, ins, n);
5669 n = (*ip)-CEE_STLOC_0;
5672 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5674 emit_stloc_ir (cfg, sp, header, n);
5681 CHECK_STACK_OVF (1);
5684 EMIT_NEW_ARGLOAD (cfg, ins, n);
5690 CHECK_STACK_OVF (1);
5693 NEW_ARGLOADA (cfg, ins, n);
5694 MONO_ADD_INS (cfg->cbb, ins);
5704 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5706 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5711 CHECK_STACK_OVF (1);
5714 EMIT_NEW_LOCLOAD (cfg, ins, n);
5718 case CEE_LDLOCA_S: {
5719 unsigned char *tmp_ip;
5721 CHECK_STACK_OVF (1);
5722 CHECK_LOCAL (ip [1]);
5724 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5730 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5739 CHECK_LOCAL (ip [1]);
5740 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5742 emit_stloc_ir (cfg, sp, header, ip [1]);
5747 CHECK_STACK_OVF (1);
5748 EMIT_NEW_PCONST (cfg, ins, NULL);
5749 ins->type = STACK_OBJ;
5754 CHECK_STACK_OVF (1);
5755 EMIT_NEW_ICONST (cfg, ins, -1);
5768 CHECK_STACK_OVF (1);
5769 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5775 CHECK_STACK_OVF (1);
5777 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5783 CHECK_STACK_OVF (1);
5784 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5790 CHECK_STACK_OVF (1);
5791 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5792 ins->type = STACK_I8;
5793 ins->dreg = alloc_dreg (cfg, STACK_I8);
5795 ins->inst_l = (gint64)read64 (ip);
5796 MONO_ADD_INS (bblock, ins);
5802 /* FIXME: we should really allocate this only late in the compilation process */
5803 f = mono_domain_alloc (cfg->domain, sizeof (float));
5805 CHECK_STACK_OVF (1);
5806 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5807 ins->type = STACK_R8;
5808 ins->dreg = alloc_dreg (cfg, STACK_R8);
5812 MONO_ADD_INS (bblock, ins);
5820 /* FIXME: we should really allocate this only late in the compilation process */
5821 d = mono_domain_alloc (cfg->domain, sizeof (double));
5823 CHECK_STACK_OVF (1);
5824 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5825 ins->type = STACK_R8;
5826 ins->dreg = alloc_dreg (cfg, STACK_R8);
5830 MONO_ADD_INS (bblock, ins);
5837 MonoInst *temp, *store;
5839 CHECK_STACK_OVF (1);
5843 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5844 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5846 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5849 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5862 if (sp [0]->type == STACK_R8)
5863 /* we need to pop the value from the x86 FP stack */
5864 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5873 if (stack_start != sp)
5875 token = read32 (ip + 1);
5876 /* FIXME: check the signature matches */
5877 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5882 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5883 GENERIC_SHARING_FAILURE (CEE_JMP);
5885 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5886 CHECK_CFG_EXCEPTION;
5890 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5893 /* Handle tail calls similarly to calls */
5894 n = fsig->param_count + fsig->hasthis;
5896 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5897 call->method = cmethod;
5898 call->tail_call = TRUE;
5899 call->signature = mono_method_signature (cmethod);
5900 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5901 call->inst.inst_p0 = cmethod;
5902 for (i = 0; i < n; ++i)
5903 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5905 mono_arch_emit_call (cfg, call);
5906 MONO_ADD_INS (bblock, (MonoInst*)call);
5909 for (i = 0; i < num_args; ++i)
5910 /* Prevent arguments from being optimized away */
5911 arg_array [i]->flags |= MONO_INST_VOLATILE;
5913 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5914 ins = (MonoInst*)call;
5915 ins->inst_p0 = cmethod;
5916 MONO_ADD_INS (bblock, ins);
5920 start_new_bblock = 1;
5925 case CEE_CALLVIRT: {
5926 MonoInst *addr = NULL;
5927 MonoMethodSignature *fsig = NULL;
5929 int virtual = *ip == CEE_CALLVIRT;
5930 int calli = *ip == CEE_CALLI;
5931 gboolean pass_imt_from_rgctx = FALSE;
5932 MonoInst *imt_arg = NULL;
5933 gboolean pass_vtable = FALSE;
5934 gboolean pass_mrgctx = FALSE;
5935 MonoInst *vtable_arg = NULL;
5936 gboolean check_this = FALSE;
5939 token = read32 (ip + 1);
5946 if (method->wrapper_type != MONO_WRAPPER_NONE)
5947 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5949 fsig = mono_metadata_parse_signature (image, token);
5951 n = fsig->param_count + fsig->hasthis;
5953 MonoMethod *cil_method;
5955 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5956 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5957 cil_method = cmethod;
5958 } else if (constrained_call) {
5959 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
5961 * This is needed since get_method_constrained can't find
5962 * the method in klass representing a type var.
5963 * The type var is guaranteed to be a reference type in this
5966 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5967 cil_method = cmethod;
5968 g_assert (!cmethod->klass->valuetype);
5970 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
5973 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5974 cil_method = cmethod;
5979 if (!dont_verify && !cfg->skip_visibility) {
5980 MonoMethod *target_method = cil_method;
5981 if (method->is_inflated) {
5982 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
5984 if (!mono_method_can_access_method (method_definition, target_method) &&
5985 !mono_method_can_access_method (method, cil_method))
5986 METHOD_ACCESS_FAILURE;
5989 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
5990 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
5992 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
5993 /* MS.NET seems to silently convert this to a callvirt */
5996 if (!cmethod->klass->inited)
5997 if (!mono_class_init (cmethod->klass))
6000 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6001 mini_class_is_system_array (cmethod->klass)) {
6002 array_rank = cmethod->klass->rank;
6003 fsig = mono_method_signature (cmethod);
6005 if (mono_method_signature (cmethod)->pinvoke) {
6006 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6007 check_for_pending_exc, FALSE);
6008 fsig = mono_method_signature (wrapper);
6009 } else if (constrained_call) {
6010 fsig = mono_method_signature (cmethod);
6012 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6016 mono_save_token_info (cfg, image, token, cil_method);
6018 n = fsig->param_count + fsig->hasthis;
6020 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6021 if (check_linkdemand (cfg, method, cmethod))
6023 CHECK_CFG_EXCEPTION;
6026 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6027 g_assert_not_reached ();
6030 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6033 if (!cfg->generic_sharing_context && cmethod)
6034 g_assert (!mono_method_check_context_used (cmethod));
6038 //g_assert (!virtual || fsig->hasthis);
6042 if (constrained_call) {
6044 * We have the `constrained.' prefix opcode.
6046 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6050 * The type parameter is instantiated as a valuetype,
6051 * but that type doesn't override the method we're
6052 * calling, so we need to box `this'.
6054 dreg = alloc_dreg (cfg, STACK_VTYPE);
6055 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6056 ins->klass = constrained_call;
6057 sp [0] = handle_box (cfg, ins, constrained_call);
6058 } else if (!constrained_call->valuetype) {
6059 int dreg = alloc_preg (cfg);
6062 * The type parameter is instantiated as a reference
6063 * type. We have a managed pointer on the stack, so
6064 * we need to dereference it here.
6066 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6067 ins->type = STACK_OBJ;
6069 } else if (cmethod->klass->valuetype)
6071 constrained_call = NULL;
6074 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6078 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6079 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6080 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6081 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6082 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6085 * Pass vtable iff target method might
6086 * be shared, which means that sharing
6087 * is enabled for its class and its
6088 * context is sharable (and it's not a
6091 if (sharing_enabled && context_sharable &&
6092 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6096 if (cmethod && mini_method_get_context (cmethod) &&
6097 mini_method_get_context (cmethod)->method_inst) {
6098 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6099 MonoGenericContext *context = mini_method_get_context (cmethod);
6100 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6102 g_assert (!pass_vtable);
6104 if (sharing_enabled && context_sharable)
6108 if (cfg->generic_sharing_context && cmethod) {
6109 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6111 context_used = mono_method_check_context_used (cmethod);
6113 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6114 /* Generic method interface
6115 calls are resolved via a
6116 helper function and don't
6118 if (!cmethod_context || !cmethod_context->method_inst)
6119 pass_imt_from_rgctx = TRUE;
6123 * If a shared method calls another
6124 * shared method then the caller must
6125 * have a generic sharing context
6126 * because the magic trampoline
6127 * requires it. FIXME: We shouldn't
6128 * have to force the vtable/mrgctx
6129 * variable here. Instead there
6130 * should be a flag in the cfg to
6131 * request a generic sharing context.
6134 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6135 mono_get_vtable_var (cfg);
6140 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6142 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6144 CHECK_TYPELOAD (cmethod->klass);
6145 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6150 g_assert (!vtable_arg);
6153 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6155 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6158 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6159 MONO_METHOD_IS_FINAL (cmethod)) {
6166 if (pass_imt_from_rgctx) {
6167 g_assert (!pass_vtable);
6170 imt_arg = emit_get_rgctx_method (cfg, context_used,
6171 cmethod, MONO_RGCTX_INFO_METHOD);
6177 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6178 check->sreg1 = sp [0]->dreg;
6179 MONO_ADD_INS (cfg->cbb, check);
6182 /* Calling virtual generic methods */
6183 if (cmethod && virtual &&
6184 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6185 !(MONO_METHOD_IS_FINAL (cmethod) &&
6186 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6187 mono_method_signature (cmethod)->generic_param_count) {
6188 MonoInst *this_temp, *this_arg_temp, *store;
6189 MonoInst *iargs [4];
6191 g_assert (mono_method_signature (cmethod)->is_inflated);
6193 /* Prevent inlining of methods that contain indirect calls */
6196 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6197 if (cmethod->wrapper_type == MONO_WRAPPER_NONE) {
6198 g_assert (!imt_arg);
6200 imt_arg = emit_get_rgctx_method (cfg, context_used,
6201 cmethod, MONO_RGCTX_INFO_METHOD);
6204 g_assert (cmethod->is_inflated);
6205 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6207 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6211 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6212 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6213 MONO_ADD_INS (bblock, store);
6215 /* FIXME: This should be a managed pointer */
6216 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6218 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6220 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6221 cmethod, MONO_RGCTX_INFO_METHOD);
6222 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6223 addr = mono_emit_jit_icall (cfg,
6224 mono_helper_compile_generic_method, iargs);
6226 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6227 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6228 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6231 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6233 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6236 if (!MONO_TYPE_IS_VOID (fsig->ret))
6245 /* FIXME: runtime generic context pointer for jumps? */
6246 /* FIXME: handle this for generic sharing eventually */
6247 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6248 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod))) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret)) {
6251 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6254 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6255 call->tail_call = TRUE;
6256 call->method = cmethod;
6257 call->signature = mono_method_signature (cmethod);
6260 /* Handle tail calls similarly to calls */
6261 call->inst.opcode = OP_TAILCALL;
6263 mono_arch_emit_call (cfg, call);
6266 * We implement tail calls by storing the actual arguments into the
6267 * argument variables, then emitting a CEE_JMP.
6269 for (i = 0; i < n; ++i) {
6270 /* Prevent argument from being register allocated */
6271 arg_array [i]->flags |= MONO_INST_VOLATILE;
6272 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6276 ins = (MonoInst*)call;
6277 ins->inst_p0 = cmethod;
6278 ins->inst_p1 = arg_array [0];
6279 MONO_ADD_INS (bblock, ins);
6280 link_bblock (cfg, bblock, end_bblock);
6281 start_new_bblock = 1;
6282 /* skip CEE_RET as well */
6288 /* Conversion to a JIT intrinsic */
6289 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6290 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6291 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6302 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6303 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6304 mono_method_check_inlining (cfg, cmethod) &&
6305 !g_list_find (dont_inline, cmethod)) {
6307 gboolean allways = FALSE;
6309 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6310 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6311 /* Prevent inlining of methods that call wrappers */
6313 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6317 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6319 cfg->real_offset += 5;
6322 if (!MONO_TYPE_IS_VOID (fsig->ret))
6323 /* *sp is already set by inline_method */
6326 inline_costs += costs;
6332 inline_costs += 10 * num_calls++;
6334 /* Tail recursion elimination */
6335 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6336 gboolean has_vtargs = FALSE;
6339 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6342 /* keep it simple */
6343 for (i = fsig->param_count - 1; i >= 0; i--) {
6344 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6349 for (i = 0; i < n; ++i)
6350 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6351 MONO_INST_NEW (cfg, ins, OP_BR);
6352 MONO_ADD_INS (bblock, ins);
6353 tblock = start_bblock->out_bb [0];
6354 link_bblock (cfg, bblock, tblock);
6355 ins->inst_target_bb = tblock;
6356 start_new_bblock = 1;
6358 /* skip the CEE_RET, too */
6359 if (ip_in_bb (cfg, bblock, ip + 5))
6369 /* Generic sharing */
6370 /* FIXME: only do this for generic methods if
6371 they are not shared! */
6372 if (context_used && !imt_arg && !array_rank &&
6373 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6374 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6375 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6376 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6379 g_assert (cfg->generic_sharing_context && cmethod);
6383 * We are compiling a call to a
6384 * generic method from shared code,
6385 * which means that we have to look up
6386 * the method in the rgctx and do an
6389 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6392 /* Indirect calls */
6394 g_assert (!imt_arg);
6396 if (*ip == CEE_CALL)
6397 g_assert (context_used);
6398 else if (*ip == CEE_CALLI)
6399 g_assert (!vtable_arg);
6401 /* FIXME: what the hell is this??? */
6402 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6403 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6405 /* Prevent inlining of methods with indirect calls */
6409 #ifdef MONO_ARCH_RGCTX_REG
6411 int rgctx_reg = mono_alloc_preg (cfg);
6413 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6414 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6415 call = (MonoCallInst*)ins;
6416 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6417 cfg->uses_rgctx_reg = TRUE;
6418 call->rgctx_reg = TRUE;
6423 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6425 * Instead of emitting an indirect call, emit a direct call
6426 * with the contents of the aotconst as the patch info.
6428 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6431 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6434 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6435 if (fsig->pinvoke && !fsig->ret->byref) {
6439 * Native code might return non register sized integers
6440 * without initializing the upper bits.
6442 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
6443 case OP_LOADI1_MEMBASE:
6444 widen_op = OP_ICONV_TO_I1;
6446 case OP_LOADU1_MEMBASE:
6447 widen_op = OP_ICONV_TO_U1;
6449 case OP_LOADI2_MEMBASE:
6450 widen_op = OP_ICONV_TO_I2;
6452 case OP_LOADU2_MEMBASE:
6453 widen_op = OP_ICONV_TO_U2;
6459 if (widen_op != -1) {
6460 int dreg = alloc_preg (cfg);
6463 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
6464 widen->type = ins->type;
6481 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6482 if (sp [fsig->param_count]->type == STACK_OBJ) {
6483 MonoInst *iargs [2];
6486 iargs [1] = sp [fsig->param_count];
6488 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6491 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6492 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6493 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6494 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6496 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6499 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6500 if (!cmethod->klass->element_class->valuetype && !readonly)
6501 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6504 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6507 g_assert_not_reached ();
6515 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6517 if (!MONO_TYPE_IS_VOID (fsig->ret))
6528 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6530 } else if (imt_arg) {
6531 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6533 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6536 if (!MONO_TYPE_IS_VOID (fsig->ret))
6544 if (cfg->method != method) {
6545 /* return from inlined method */
6547 * If in_count == 0, that means the ret is unreachable due to
6548 * being preceeded by a throw. In that case, inline_method () will
6549 * handle setting the return value
6550 * (test case: test_0_inline_throw ()).
6552 if (return_var && cfg->cbb->in_count) {
6556 //g_assert (returnvar != -1);
6557 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6558 cfg->ret_var_set = TRUE;
6562 MonoType *ret_type = mono_method_signature (method)->ret;
6564 g_assert (!return_var);
6567 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6570 if (!cfg->vret_addr) {
6573 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6575 EMIT_NEW_RETLOADA (cfg, ret_addr);
6577 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6578 ins->klass = mono_class_from_mono_type (ret_type);
6581 #ifdef MONO_ARCH_SOFT_FLOAT
6582 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6583 MonoInst *iargs [1];
6587 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6588 mono_arch_emit_setret (cfg, method, conv);
6590 mono_arch_emit_setret (cfg, method, *sp);
6593 mono_arch_emit_setret (cfg, method, *sp);
6598 if (sp != stack_start)
6600 MONO_INST_NEW (cfg, ins, OP_BR);
6602 ins->inst_target_bb = end_bblock;
6603 MONO_ADD_INS (bblock, ins);
6604 link_bblock (cfg, bblock, end_bblock);
6605 start_new_bblock = 1;
6609 MONO_INST_NEW (cfg, ins, OP_BR);
6611 target = ip + 1 + (signed char)(*ip);
6613 GET_BBLOCK (cfg, tblock, target);
6614 link_bblock (cfg, bblock, tblock);
6615 ins->inst_target_bb = tblock;
6616 if (sp != stack_start) {
6617 handle_stack_args (cfg, stack_start, sp - stack_start);
6619 CHECK_UNVERIFIABLE (cfg);
6621 MONO_ADD_INS (bblock, ins);
6622 start_new_bblock = 1;
6623 inline_costs += BRANCH_COST;
6637 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6639 target = ip + 1 + *(signed char*)ip;
6645 inline_costs += BRANCH_COST;
6649 MONO_INST_NEW (cfg, ins, OP_BR);
6652 target = ip + 4 + (gint32)read32(ip);
6654 GET_BBLOCK (cfg, tblock, target);
6655 link_bblock (cfg, bblock, tblock);
6656 ins->inst_target_bb = tblock;
6657 if (sp != stack_start) {
6658 handle_stack_args (cfg, stack_start, sp - stack_start);
6660 CHECK_UNVERIFIABLE (cfg);
6663 MONO_ADD_INS (bblock, ins);
6665 start_new_bblock = 1;
6666 inline_costs += BRANCH_COST;
6673 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6674 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6675 guint32 opsize = is_short ? 1 : 4;
6677 CHECK_OPSIZE (opsize);
6679 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6682 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6687 GET_BBLOCK (cfg, tblock, target);
6688 link_bblock (cfg, bblock, tblock);
6689 GET_BBLOCK (cfg, tblock, ip);
6690 link_bblock (cfg, bblock, tblock);
6692 if (sp != stack_start) {
6693 handle_stack_args (cfg, stack_start, sp - stack_start);
6694 CHECK_UNVERIFIABLE (cfg);
6697 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6698 cmp->sreg1 = sp [0]->dreg;
6699 type_from_op (cmp, sp [0], NULL);
6702 #if SIZEOF_REGISTER == 4
6703 if (cmp->opcode == OP_LCOMPARE_IMM) {
6704 /* Convert it to OP_LCOMPARE */
6705 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6706 ins->type = STACK_I8;
6707 ins->dreg = alloc_dreg (cfg, STACK_I8);
6709 MONO_ADD_INS (bblock, ins);
6710 cmp->opcode = OP_LCOMPARE;
6711 cmp->sreg2 = ins->dreg;
6714 MONO_ADD_INS (bblock, cmp);
6716 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6717 type_from_op (ins, sp [0], NULL);
6718 MONO_ADD_INS (bblock, ins);
6719 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6720 GET_BBLOCK (cfg, tblock, target);
6721 ins->inst_true_bb = tblock;
6722 GET_BBLOCK (cfg, tblock, ip);
6723 ins->inst_false_bb = tblock;
6724 start_new_bblock = 2;
6727 inline_costs += BRANCH_COST;
6742 MONO_INST_NEW (cfg, ins, *ip);
6744 target = ip + 4 + (gint32)read32(ip);
6750 inline_costs += BRANCH_COST;
6754 MonoBasicBlock **targets;
6755 MonoBasicBlock *default_bblock;
6756 MonoJumpInfoBBTable *table;
6757 int offset_reg = alloc_preg (cfg);
6758 int target_reg = alloc_preg (cfg);
6759 int table_reg = alloc_preg (cfg);
6760 int sum_reg = alloc_preg (cfg);
6761 gboolean use_op_switch;
6765 n = read32 (ip + 1);
6768 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6772 CHECK_OPSIZE (n * sizeof (guint32));
6773 target = ip + n * sizeof (guint32);
6775 GET_BBLOCK (cfg, default_bblock, target);
6777 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6778 for (i = 0; i < n; ++i) {
6779 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6780 targets [i] = tblock;
6784 if (sp != stack_start) {
6786 * Link the current bb with the targets as well, so handle_stack_args
6787 * will set their in_stack correctly.
6789 link_bblock (cfg, bblock, default_bblock);
6790 for (i = 0; i < n; ++i)
6791 link_bblock (cfg, bblock, targets [i]);
6793 handle_stack_args (cfg, stack_start, sp - stack_start);
6795 CHECK_UNVERIFIABLE (cfg);
6798 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6799 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6802 for (i = 0; i < n; ++i)
6803 link_bblock (cfg, bblock, targets [i]);
6805 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6806 table->table = targets;
6807 table->table_size = n;
6809 use_op_switch = FALSE;
6811 /* ARM implements SWITCH statements differently */
6812 /* FIXME: Make it use the generic implementation */
6813 if (!cfg->compile_aot)
6814 use_op_switch = TRUE;
6817 if (COMPILE_LLVM (cfg))
6818 use_op_switch = TRUE;
6820 if (use_op_switch) {
6821 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6822 ins->sreg1 = src1->dreg;
6823 ins->inst_p0 = table;
6824 ins->inst_many_bb = targets;
6825 ins->klass = GUINT_TO_POINTER (n);
6826 MONO_ADD_INS (cfg->cbb, ins);
6828 if (sizeof (gpointer) == 8)
6829 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6831 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6833 #if SIZEOF_REGISTER == 8
6834 /* The upper word might not be zero, and we add it to a 64 bit address later */
6835 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6838 if (cfg->compile_aot) {
6839 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6841 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6842 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6843 ins->inst_p0 = table;
6844 ins->dreg = table_reg;
6845 MONO_ADD_INS (cfg->cbb, ins);
6848 /* FIXME: Use load_memindex */
6849 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6850 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6851 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6853 start_new_bblock = 1;
6854 inline_costs += (BRANCH_COST * 2);
6874 dreg = alloc_freg (cfg);
6877 dreg = alloc_lreg (cfg);
6880 dreg = alloc_preg (cfg);
6883 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6884 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6885 ins->flags |= ins_flag;
6887 MONO_ADD_INS (bblock, ins);
6902 #if HAVE_WRITE_BARRIERS
6903 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6904 /* insert call to write barrier */
6905 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6906 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6913 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6914 ins->flags |= ins_flag;
6916 MONO_ADD_INS (bblock, ins);
6924 MONO_INST_NEW (cfg, ins, (*ip));
6926 ins->sreg1 = sp [0]->dreg;
6927 ins->sreg2 = sp [1]->dreg;
6928 type_from_op (ins, sp [0], sp [1]);
6930 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6932 /* Use the immediate opcodes if possible */
6933 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6934 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6935 if (imm_opcode != -1) {
6936 ins->opcode = imm_opcode;
6937 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6940 sp [1]->opcode = OP_NOP;
6944 MONO_ADD_INS ((cfg)->cbb, (ins));
6947 mono_decompose_opcode (cfg, ins);
6964 MONO_INST_NEW (cfg, ins, (*ip));
6966 ins->sreg1 = sp [0]->dreg;
6967 ins->sreg2 = sp [1]->dreg;
6968 type_from_op (ins, sp [0], sp [1]);
6970 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6971 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6973 /* FIXME: Pass opcode to is_inst_imm */
6975 /* Use the immediate opcodes if possible */
6976 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6979 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6980 if (imm_opcode != -1) {
6981 ins->opcode = imm_opcode;
6982 if (sp [1]->opcode == OP_I8CONST) {
6983 #if SIZEOF_REGISTER == 8
6984 ins->inst_imm = sp [1]->inst_l;
6986 ins->inst_ls_word = sp [1]->inst_ls_word;
6987 ins->inst_ms_word = sp [1]->inst_ms_word;
6991 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6994 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6995 if (sp [1]->next == NULL)
6996 sp [1]->opcode = OP_NOP;
6999 MONO_ADD_INS ((cfg)->cbb, (ins));
7002 mono_decompose_opcode (cfg, ins);
7015 case CEE_CONV_OVF_I8:
7016 case CEE_CONV_OVF_U8:
7020 /* Special case this earlier so we have long constants in the IR */
7021 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7022 int data = sp [-1]->inst_c0;
7023 sp [-1]->opcode = OP_I8CONST;
7024 sp [-1]->type = STACK_I8;
7025 #if SIZEOF_REGISTER == 8
7026 if ((*ip) == CEE_CONV_U8)
7027 sp [-1]->inst_c0 = (guint32)data;
7029 sp [-1]->inst_c0 = data;
7031 sp [-1]->inst_ls_word = data;
7032 if ((*ip) == CEE_CONV_U8)
7033 sp [-1]->inst_ms_word = 0;
7035 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7037 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7044 case CEE_CONV_OVF_I4:
7045 case CEE_CONV_OVF_I1:
7046 case CEE_CONV_OVF_I2:
7047 case CEE_CONV_OVF_I:
7048 case CEE_CONV_OVF_U:
7051 if (sp [-1]->type == STACK_R8) {
7052 ADD_UNOP (CEE_CONV_OVF_I8);
7059 case CEE_CONV_OVF_U1:
7060 case CEE_CONV_OVF_U2:
7061 case CEE_CONV_OVF_U4:
7064 if (sp [-1]->type == STACK_R8) {
7065 ADD_UNOP (CEE_CONV_OVF_U8);
7072 case CEE_CONV_OVF_I1_UN:
7073 case CEE_CONV_OVF_I2_UN:
7074 case CEE_CONV_OVF_I4_UN:
7075 case CEE_CONV_OVF_I8_UN:
7076 case CEE_CONV_OVF_U1_UN:
7077 case CEE_CONV_OVF_U2_UN:
7078 case CEE_CONV_OVF_U4_UN:
7079 case CEE_CONV_OVF_U8_UN:
7080 case CEE_CONV_OVF_I_UN:
7081 case CEE_CONV_OVF_U_UN:
7091 case CEE_ADD_OVF_UN:
7093 case CEE_MUL_OVF_UN:
7095 case CEE_SUB_OVF_UN:
7103 token = read32 (ip + 1);
7104 klass = mini_get_class (method, token, generic_context);
7105 CHECK_TYPELOAD (klass);
7107 if (generic_class_is_reference_type (cfg, klass)) {
7108 MonoInst *store, *load;
7109 int dreg = alloc_preg (cfg);
7111 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7112 load->flags |= ins_flag;
7113 MONO_ADD_INS (cfg->cbb, load);
7115 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7116 store->flags |= ins_flag;
7117 MONO_ADD_INS (cfg->cbb, store);
7119 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7131 token = read32 (ip + 1);
7132 klass = mini_get_class (method, token, generic_context);
7133 CHECK_TYPELOAD (klass);
7135 /* Optimize the common ldobj+stloc combination */
7145 loc_index = ip [5] - CEE_STLOC_0;
7152 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7153 CHECK_LOCAL (loc_index);
7155 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7156 ins->dreg = cfg->locals [loc_index]->dreg;
7162 /* Optimize the ldobj+stobj combination */
7163 /* The reference case ends up being a load+store anyway */
7164 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7169 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7176 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7185 CHECK_STACK_OVF (1);
7187 n = read32 (ip + 1);
7189 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7190 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7191 ins->type = STACK_OBJ;
7194 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7195 MonoInst *iargs [1];
7197 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7198 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7200 if (cfg->opt & MONO_OPT_SHARED) {
7201 MonoInst *iargs [3];
7203 if (cfg->compile_aot) {
7204 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7206 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7207 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7208 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7209 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7210 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7212 if (bblock->out_of_line) {
7213 MonoInst *iargs [2];
7215 if (image == mono_defaults.corlib) {
7217 * Avoid relocations in AOT and save some space by using a
7218 * version of helper_ldstr specialized to mscorlib.
7220 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7221 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7223 /* Avoid creating the string object */
7224 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7225 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7226 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7230 if (cfg->compile_aot) {
7231 NEW_LDSTRCONST (cfg, ins, image, n);
7233 MONO_ADD_INS (bblock, ins);
7236 NEW_PCONST (cfg, ins, NULL);
7237 ins->type = STACK_OBJ;
7238 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7240 MONO_ADD_INS (bblock, ins);
7249 MonoInst *iargs [2];
7250 MonoMethodSignature *fsig;
7253 MonoInst *vtable_arg = NULL;
7256 token = read32 (ip + 1);
7257 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7260 fsig = mono_method_get_signature (cmethod, image, token);
7262 mono_save_token_info (cfg, image, token, cmethod);
7264 if (!mono_class_init (cmethod->klass))
7267 if (cfg->generic_sharing_context)
7268 context_used = mono_method_check_context_used (cmethod);
7270 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7271 if (check_linkdemand (cfg, method, cmethod))
7273 CHECK_CFG_EXCEPTION;
7274 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7275 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7278 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7279 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7280 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7282 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7283 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7285 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7289 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7290 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7292 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7294 CHECK_TYPELOAD (cmethod->klass);
7295 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7300 n = fsig->param_count;
7304 * Generate smaller code for the common newobj <exception> instruction in
7305 * argument checking code.
7307 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7308 is_exception_class (cmethod->klass) && n <= 2 &&
7309 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7310 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7311 MonoInst *iargs [3];
7313 g_assert (!vtable_arg);
7317 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7320 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7324 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7329 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7332 g_assert_not_reached ();
7340 /* move the args to allow room for 'this' in the first position */
7346 /* check_call_signature () requires sp[0] to be set */
7347 this_ins.type = STACK_OBJ;
7349 if (check_call_signature (cfg, fsig, sp))
7354 if (mini_class_is_system_array (cmethod->klass)) {
7355 g_assert (!vtable_arg);
7358 *sp = emit_get_rgctx_method (cfg, context_used,
7359 cmethod, MONO_RGCTX_INFO_METHOD);
7361 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7364 /* Avoid varargs in the common case */
7365 if (fsig->param_count == 1)
7366 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7367 else if (fsig->param_count == 2)
7368 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7370 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7371 } else if (cmethod->string_ctor) {
7372 g_assert (!context_used);
7373 g_assert (!vtable_arg);
7374 /* we simply pass a null pointer */
7375 EMIT_NEW_PCONST (cfg, *sp, NULL);
7376 /* now call the string ctor */
7377 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7379 MonoInst* callvirt_this_arg = NULL;
7381 if (cmethod->klass->valuetype) {
7382 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7383 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7384 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7389 * The code generated by mini_emit_virtual_call () expects
7390 * iargs [0] to be a boxed instance, but luckily the vcall
7391 * will be transformed into a normal call there.
7393 } else if (context_used) {
7397 if (cfg->opt & MONO_OPT_SHARED)
7398 rgctx_info = MONO_RGCTX_INFO_KLASS;
7400 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7401 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7403 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7406 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7408 CHECK_TYPELOAD (cmethod->klass);
7411 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7412 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7413 * As a workaround, we call class cctors before allocating objects.
7415 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7416 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7417 if (cfg->verbose_level > 2)
7418 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7419 class_inits = g_slist_prepend (class_inits, vtable);
7422 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7427 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7429 /* Now call the actual ctor */
7430 /* Avoid virtual calls to ctors if possible */
7431 if (cmethod->klass->marshalbyref)
7432 callvirt_this_arg = sp [0];
7434 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7435 mono_method_check_inlining (cfg, cmethod) &&
7436 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7437 !g_list_find (dont_inline, cmethod)) {
7440 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7441 cfg->real_offset += 5;
7444 inline_costs += costs - 5;
7447 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7449 } else if (context_used &&
7450 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7451 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7452 MonoInst *cmethod_addr;
7454 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7455 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7457 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7460 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7461 callvirt_this_arg, NULL, vtable_arg);
7462 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7463 GENERIC_SHARING_FAILURE (*ip);
7467 if (alloc == NULL) {
7469 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7470 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7484 token = read32 (ip + 1);
7485 klass = mini_get_class (method, token, generic_context);
7486 CHECK_TYPELOAD (klass);
7487 if (sp [0]->type != STACK_OBJ)
7490 if (cfg->generic_sharing_context)
7491 context_used = mono_class_check_context_used (klass);
7500 args [1] = emit_get_rgctx_klass (cfg, context_used,
7501 klass, MONO_RGCTX_INFO_KLASS);
7503 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7507 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7508 MonoMethod *mono_castclass;
7509 MonoInst *iargs [1];
7512 mono_castclass = mono_marshal_get_castclass (klass);
7515 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7516 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7517 g_assert (costs > 0);
7520 cfg->real_offset += 5;
7525 inline_costs += costs;
7528 ins = handle_castclass (cfg, klass, *sp);
7538 token = read32 (ip + 1);
7539 klass = mini_get_class (method, token, generic_context);
7540 CHECK_TYPELOAD (klass);
7541 if (sp [0]->type != STACK_OBJ)
7544 if (cfg->generic_sharing_context)
7545 context_used = mono_class_check_context_used (klass);
7554 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7556 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7560 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7561 MonoMethod *mono_isinst;
7562 MonoInst *iargs [1];
7565 mono_isinst = mono_marshal_get_isinst (klass);
7568 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7569 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7570 g_assert (costs > 0);
7573 cfg->real_offset += 5;
7578 inline_costs += costs;
7581 ins = handle_isinst (cfg, klass, *sp);
7588 case CEE_UNBOX_ANY: {
7592 token = read32 (ip + 1);
7593 klass = mini_get_class (method, token, generic_context);
7594 CHECK_TYPELOAD (klass);
7596 mono_save_token_info (cfg, image, token, klass);
7598 if (cfg->generic_sharing_context)
7599 context_used = mono_class_check_context_used (klass);
7601 if (generic_class_is_reference_type (cfg, klass)) {
7604 MonoInst *iargs [2];
7609 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7610 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7614 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7615 MonoMethod *mono_castclass;
7616 MonoInst *iargs [1];
7619 mono_castclass = mono_marshal_get_castclass (klass);
7622 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7623 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7625 g_assert (costs > 0);
7628 cfg->real_offset += 5;
7632 inline_costs += costs;
7634 ins = handle_castclass (cfg, klass, *sp);
7642 if (mono_class_is_nullable (klass)) {
7643 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7650 ins = handle_unbox (cfg, klass, sp, context_used);
7656 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7669 token = read32 (ip + 1);
7670 klass = mini_get_class (method, token, generic_context);
7671 CHECK_TYPELOAD (klass);
7673 mono_save_token_info (cfg, image, token, klass);
7675 if (cfg->generic_sharing_context)
7676 context_used = mono_class_check_context_used (klass);
7678 if (generic_class_is_reference_type (cfg, klass)) {
7684 if (klass == mono_defaults.void_class)
7686 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7688 /* frequent check in generic code: box (struct), brtrue */
7689 if (!mono_class_is_nullable (klass) &&
7690 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7691 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7693 MONO_INST_NEW (cfg, ins, OP_BR);
7694 if (*ip == CEE_BRTRUE_S) {
7697 target = ip + 1 + (signed char)(*ip);
7702 target = ip + 4 + (gint)(read32 (ip));
7705 GET_BBLOCK (cfg, tblock, target);
7706 link_bblock (cfg, bblock, tblock);
7707 ins->inst_target_bb = tblock;
7708 GET_BBLOCK (cfg, tblock, ip);
7710 * This leads to some inconsistency, since the two bblocks are
7711 * not really connected, but it is needed for handling stack
7712 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7713 * FIXME: This should only be needed if sp != stack_start, but that
7714 * doesn't work for some reason (test failure in mcs/tests on x86).
7716 link_bblock (cfg, bblock, tblock);
7717 if (sp != stack_start) {
7718 handle_stack_args (cfg, stack_start, sp - stack_start);
7720 CHECK_UNVERIFIABLE (cfg);
7722 MONO_ADD_INS (bblock, ins);
7723 start_new_bblock = 1;
7731 if (cfg->opt & MONO_OPT_SHARED)
7732 rgctx_info = MONO_RGCTX_INFO_KLASS;
7734 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7735 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7736 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7738 *sp++ = handle_box (cfg, val, klass);
7749 token = read32 (ip + 1);
7750 klass = mini_get_class (method, token, generic_context);
7751 CHECK_TYPELOAD (klass);
7753 mono_save_token_info (cfg, image, token, klass);
7755 if (cfg->generic_sharing_context)
7756 context_used = mono_class_check_context_used (klass);
7758 if (mono_class_is_nullable (klass)) {
7761 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7762 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7766 ins = handle_unbox (cfg, klass, sp, context_used);
7776 MonoClassField *field;
7780 if (*ip == CEE_STFLD) {
7787 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7789 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7792 token = read32 (ip + 1);
7793 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7794 field = mono_method_get_wrapper_data (method, token);
7795 klass = field->parent;
7798 field = mono_field_from_token (image, token, &klass, generic_context);
7802 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7803 FIELD_ACCESS_FAILURE;
7804 mono_class_init (klass);
7806 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7807 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7808 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7809 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7812 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7813 if (*ip == CEE_STFLD) {
7814 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7816 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7817 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7818 MonoInst *iargs [5];
7821 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7822 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7823 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7827 if (cfg->opt & MONO_OPT_INLINE) {
7828 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7829 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7830 g_assert (costs > 0);
7832 cfg->real_offset += 5;
7835 inline_costs += costs;
7837 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7842 #if HAVE_WRITE_BARRIERS
7843 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7844 /* insert call to write barrier */
7845 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7846 MonoInst *iargs [2];
7849 dreg = alloc_preg (cfg);
7850 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7852 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7856 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7858 store->flags |= ins_flag;
7865 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7866 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7867 MonoInst *iargs [4];
7870 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7871 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7872 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7873 if ((cfg->opt & MONO_OPT_INLINE) && !MONO_TYPE_ISSTRUCT (mono_method_signature (wrapper)->ret)) {
7874 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7875 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7877 g_assert (costs > 0);
7879 cfg->real_offset += 5;
7883 inline_costs += costs;
7885 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7889 if (sp [0]->type == STACK_VTYPE) {
7892 /* Have to compute the address of the variable */
7894 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7896 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7898 g_assert (var->klass == klass);
7900 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7904 if (*ip == CEE_LDFLDA) {
7905 dreg = alloc_preg (cfg);
7907 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7908 ins->klass = mono_class_from_mono_type (field->type);
7909 ins->type = STACK_MP;
7914 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7915 load->flags |= ins_flag;
7926 MonoClassField *field;
7927 gpointer addr = NULL;
7928 gboolean is_special_static;
7931 token = read32 (ip + 1);
7933 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7934 field = mono_method_get_wrapper_data (method, token);
7935 klass = field->parent;
7938 field = mono_field_from_token (image, token, &klass, generic_context);
7941 mono_class_init (klass);
7942 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7943 FIELD_ACCESS_FAILURE;
7945 /* if the class is Critical then transparent code cannot access it's fields */
7946 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7947 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7950 * We can only support shared generic static
7951 * field access on architectures where the
7952 * trampoline code has been extended to handle
7953 * the generic class init.
7955 #ifndef MONO_ARCH_VTABLE_REG
7956 GENERIC_SHARING_FAILURE (*ip);
7959 if (cfg->generic_sharing_context)
7960 context_used = mono_class_check_context_used (klass);
7962 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7964 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7965 * to be called here.
7967 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7968 mono_class_vtable (cfg->domain, klass);
7969 CHECK_TYPELOAD (klass);
7971 mono_domain_lock (cfg->domain);
7972 if (cfg->domain->special_static_fields)
7973 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7974 mono_domain_unlock (cfg->domain);
7976 is_special_static = mono_class_field_is_special_static (field);
7978 /* Generate IR to compute the field address */
7980 if ((cfg->opt & MONO_OPT_SHARED) ||
7981 (cfg->compile_aot && is_special_static) ||
7982 (context_used && is_special_static)) {
7983 MonoInst *iargs [2];
7985 g_assert (field->parent);
7986 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7988 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7989 field, MONO_RGCTX_INFO_CLASS_FIELD);
7991 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7993 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7994 } else if (context_used) {
7995 MonoInst *static_data;
7998 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
7999 method->klass->name_space, method->klass->name, method->name,
8000 depth, field->offset);
8003 if (mono_class_needs_cctor_run (klass, method)) {
8007 vtable = emit_get_rgctx_klass (cfg, context_used,
8008 klass, MONO_RGCTX_INFO_VTABLE);
8010 // FIXME: This doesn't work since it tries to pass the argument
8011 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8013 * The vtable pointer is always passed in a register regardless of
8014 * the calling convention, so assign it manually, and make a call
8015 * using a signature without parameters.
8017 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8018 #ifdef MONO_ARCH_VTABLE_REG
8019 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8020 cfg->uses_vtable_reg = TRUE;
8027 * The pointer we're computing here is
8029 * super_info.static_data + field->offset
8031 static_data = emit_get_rgctx_klass (cfg, context_used,
8032 klass, MONO_RGCTX_INFO_STATIC_DATA);
8034 if (field->offset == 0) {
8037 int addr_reg = mono_alloc_preg (cfg);
8038 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8040 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8041 MonoInst *iargs [2];
8043 g_assert (field->parent);
8044 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8045 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8046 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8048 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8050 CHECK_TYPELOAD (klass);
8052 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8053 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8054 if (cfg->verbose_level > 2)
8055 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8056 class_inits = g_slist_prepend (class_inits, vtable);
8058 if (cfg->run_cctors) {
8060 /* This makes so that inline cannot trigger */
8061 /* .cctors: too many apps depend on them */
8062 /* running with a specific order... */
8063 if (! vtable->initialized)
8065 ex = mono_runtime_class_init_full (vtable, FALSE);
8067 set_exception_object (cfg, ex);
8068 goto exception_exit;
8072 addr = (char*)vtable->data + field->offset;
8074 if (cfg->compile_aot)
8075 EMIT_NEW_SFLDACONST (cfg, ins, field);
8077 EMIT_NEW_PCONST (cfg, ins, addr);
8080 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8081 * This could be later optimized to do just a couple of
8082 * memory dereferences with constant offsets.
8084 MonoInst *iargs [1];
8085 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8086 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8090 /* Generate IR to do the actual load/store operation */
8092 if (*ip == CEE_LDSFLDA) {
8093 ins->klass = mono_class_from_mono_type (field->type);
8094 ins->type = STACK_PTR;
8096 } else if (*ip == CEE_STSFLD) {
8101 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8102 store->flags |= ins_flag;
8104 gboolean is_const = FALSE;
8105 MonoVTable *vtable = NULL;
8107 if (!context_used) {
8108 vtable = mono_class_vtable (cfg->domain, klass);
8109 CHECK_TYPELOAD (klass);
8111 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8112 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8113 gpointer addr = (char*)vtable->data + field->offset;
8114 int ro_type = field->type->type;
8115 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8116 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8118 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8121 case MONO_TYPE_BOOLEAN:
8123 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8127 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8130 case MONO_TYPE_CHAR:
8132 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8136 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8141 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8145 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8148 #ifndef HAVE_MOVING_COLLECTOR
8151 case MONO_TYPE_STRING:
8152 case MONO_TYPE_OBJECT:
8153 case MONO_TYPE_CLASS:
8154 case MONO_TYPE_SZARRAY:
8156 case MONO_TYPE_FNPTR:
8157 case MONO_TYPE_ARRAY:
8158 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8159 type_to_eval_stack_type ((cfg), field->type, *sp);
8165 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8170 case MONO_TYPE_VALUETYPE:
8180 CHECK_STACK_OVF (1);
8182 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8183 load->flags |= ins_flag;
8196 token = read32 (ip + 1);
8197 klass = mini_get_class (method, token, generic_context);
8198 CHECK_TYPELOAD (klass);
8199 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8200 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8211 const char *data_ptr;
8213 guint32 field_token;
8219 token = read32 (ip + 1);
8221 klass = mini_get_class (method, token, generic_context);
8222 CHECK_TYPELOAD (klass);
8224 if (cfg->generic_sharing_context)
8225 context_used = mono_class_check_context_used (klass);
8230 /* FIXME: Decompose later to help abcrem */
8233 args [0] = emit_get_rgctx_klass (cfg, context_used,
8234 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8239 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8241 if (cfg->opt & MONO_OPT_SHARED) {
8242 /* Decompose now to avoid problems with references to the domainvar */
8243 MonoInst *iargs [3];
8245 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8246 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8249 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8251 /* Decompose later since it is needed by abcrem */
8252 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8253 ins->dreg = alloc_preg (cfg);
8254 ins->sreg1 = sp [0]->dreg;
8255 ins->inst_newa_class = klass;
8256 ins->type = STACK_OBJ;
8258 MONO_ADD_INS (cfg->cbb, ins);
8259 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8260 cfg->cbb->has_array_access = TRUE;
8262 /* Needed so mono_emit_load_get_addr () gets called */
8263 mono_get_got_var (cfg);
8273 * we inline/optimize the initialization sequence if possible.
8274 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8275 * for small sizes open code the memcpy
8276 * ensure the rva field is big enough
8278 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8279 MonoMethod *memcpy_method = get_memcpy_method ();
8280 MonoInst *iargs [3];
8281 int add_reg = alloc_preg (cfg);
8283 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8284 if (cfg->compile_aot) {
8285 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8287 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8289 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8290 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8299 if (sp [0]->type != STACK_OBJ)
8302 dreg = alloc_preg (cfg);
8303 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8304 ins->dreg = alloc_preg (cfg);
8305 ins->sreg1 = sp [0]->dreg;
8306 ins->type = STACK_I4;
8307 MONO_ADD_INS (cfg->cbb, ins);
8308 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8309 cfg->cbb->has_array_access = TRUE;
8317 if (sp [0]->type != STACK_OBJ)
8320 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8322 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8323 CHECK_TYPELOAD (klass);
8324 /* we need to make sure that this array is exactly the type it needs
8325 * to be for correctness. the wrappers are lax with their usage
8326 * so we need to ignore them here
8328 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8329 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8332 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8336 case CEE_LDELEM_ANY:
8347 case CEE_LDELEM_REF: {
8353 if (*ip == CEE_LDELEM_ANY) {
8355 token = read32 (ip + 1);
8356 klass = mini_get_class (method, token, generic_context);
8357 CHECK_TYPELOAD (klass);
8358 mono_class_init (klass);
8361 klass = array_access_to_klass (*ip);
8363 if (sp [0]->type != STACK_OBJ)
8366 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8368 if (sp [1]->opcode == OP_ICONST) {
8369 int array_reg = sp [0]->dreg;
8370 int index_reg = sp [1]->dreg;
8371 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8373 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8374 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8376 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8377 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8380 if (*ip == CEE_LDELEM_ANY)
8393 case CEE_STELEM_REF:
8394 case CEE_STELEM_ANY: {
8400 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8402 if (*ip == CEE_STELEM_ANY) {
8404 token = read32 (ip + 1);
8405 klass = mini_get_class (method, token, generic_context);
8406 CHECK_TYPELOAD (klass);
8407 mono_class_init (klass);
8410 klass = array_access_to_klass (*ip);
8412 if (sp [0]->type != STACK_OBJ)
8415 /* storing a NULL doesn't need any of the complex checks in stelemref */
8416 if (generic_class_is_reference_type (cfg, klass) &&
8417 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8418 MonoMethod* helper = mono_marshal_get_stelemref ();
8419 MonoInst *iargs [3];
8421 if (sp [0]->type != STACK_OBJ)
8423 if (sp [2]->type != STACK_OBJ)
8430 mono_emit_method_call (cfg, helper, iargs, NULL);
8432 if (sp [1]->opcode == OP_ICONST) {
8433 int array_reg = sp [0]->dreg;
8434 int index_reg = sp [1]->dreg;
8435 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8437 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8438 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8440 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8441 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8445 if (*ip == CEE_STELEM_ANY)
8452 case CEE_CKFINITE: {
8456 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8457 ins->sreg1 = sp [0]->dreg;
8458 ins->dreg = alloc_freg (cfg);
8459 ins->type = STACK_R8;
8460 MONO_ADD_INS (bblock, ins);
8463 mono_decompose_opcode (cfg, ins);
8468 case CEE_REFANYVAL: {
8469 MonoInst *src_var, *src;
8471 int klass_reg = alloc_preg (cfg);
8472 int dreg = alloc_preg (cfg);
8475 MONO_INST_NEW (cfg, ins, *ip);
8478 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8479 CHECK_TYPELOAD (klass);
8480 mono_class_init (klass);
8482 if (cfg->generic_sharing_context)
8483 context_used = mono_class_check_context_used (klass);
8486 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8488 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8489 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8490 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8493 MonoInst *klass_ins;
8495 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8496 klass, MONO_RGCTX_INFO_KLASS);
8499 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8500 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8502 mini_emit_class_check (cfg, klass_reg, klass);
8504 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8505 ins->type = STACK_MP;
8510 case CEE_MKREFANY: {
8511 MonoInst *loc, *addr;
8514 MONO_INST_NEW (cfg, ins, *ip);
8517 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8518 CHECK_TYPELOAD (klass);
8519 mono_class_init (klass);
8521 if (cfg->generic_sharing_context)
8522 context_used = mono_class_check_context_used (klass);
8524 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8525 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8528 MonoInst *const_ins;
8529 int type_reg = alloc_preg (cfg);
8531 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8532 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8533 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8535 } else if (cfg->compile_aot) {
8536 int const_reg = alloc_preg (cfg);
8537 int type_reg = alloc_preg (cfg);
8539 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8540 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8542 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8544 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8545 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8547 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8549 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8550 ins->type = STACK_VTYPE;
8551 ins->klass = mono_defaults.typed_reference_class;
8558 MonoClass *handle_class;
8560 CHECK_STACK_OVF (1);
8563 n = read32 (ip + 1);
8565 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8566 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8567 handle = mono_method_get_wrapper_data (method, n);
8568 handle_class = mono_method_get_wrapper_data (method, n + 1);
8569 if (handle_class == mono_defaults.typehandle_class)
8570 handle = &((MonoClass*)handle)->byval_arg;
8573 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8577 mono_class_init (handle_class);
8578 if (cfg->generic_sharing_context) {
8579 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8580 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8581 /* This case handles ldtoken
8582 of an open type, like for
8585 } else if (handle_class == mono_defaults.typehandle_class) {
8586 /* If we get a MONO_TYPE_CLASS
8587 then we need to provide the
8589 instantiation of it. */
8590 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8593 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8594 } else if (handle_class == mono_defaults.fieldhandle_class)
8595 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8596 else if (handle_class == mono_defaults.methodhandle_class)
8597 context_used = mono_method_check_context_used (handle);
8599 g_assert_not_reached ();
8602 if ((cfg->opt & MONO_OPT_SHARED) &&
8603 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8604 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8605 MonoInst *addr, *vtvar, *iargs [3];
8606 int method_context_used;
8608 if (cfg->generic_sharing_context)
8609 method_context_used = mono_method_check_context_used (method);
8611 method_context_used = 0;
8613 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8615 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8616 EMIT_NEW_ICONST (cfg, iargs [1], n);
8617 if (method_context_used) {
8618 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8619 method, MONO_RGCTX_INFO_METHOD);
8620 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8622 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8623 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8625 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8627 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8629 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8631 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8632 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8633 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8634 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8635 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8636 MonoClass *tclass = mono_class_from_mono_type (handle);
8638 mono_class_init (tclass);
8640 ins = emit_get_rgctx_klass (cfg, context_used,
8641 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8642 } else if (cfg->compile_aot) {
8643 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8645 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8647 ins->type = STACK_OBJ;
8648 ins->klass = cmethod->klass;
8651 MonoInst *addr, *vtvar;
8653 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8656 if (handle_class == mono_defaults.typehandle_class) {
8657 ins = emit_get_rgctx_klass (cfg, context_used,
8658 mono_class_from_mono_type (handle),
8659 MONO_RGCTX_INFO_TYPE);
8660 } else if (handle_class == mono_defaults.methodhandle_class) {
8661 ins = emit_get_rgctx_method (cfg, context_used,
8662 handle, MONO_RGCTX_INFO_METHOD);
8663 } else if (handle_class == mono_defaults.fieldhandle_class) {
8664 ins = emit_get_rgctx_field (cfg, context_used,
8665 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8667 g_assert_not_reached ();
8669 } else if (cfg->compile_aot) {
8670 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8672 EMIT_NEW_PCONST (cfg, ins, handle);
8674 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8675 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8676 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8686 MONO_INST_NEW (cfg, ins, OP_THROW);
8688 ins->sreg1 = sp [0]->dreg;
8690 bblock->out_of_line = TRUE;
8691 MONO_ADD_INS (bblock, ins);
8692 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8693 MONO_ADD_INS (bblock, ins);
8696 link_bblock (cfg, bblock, end_bblock);
8697 start_new_bblock = 1;
8699 case CEE_ENDFINALLY:
8700 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8701 MONO_ADD_INS (bblock, ins);
8703 start_new_bblock = 1;
8706 * Control will leave the method so empty the stack, otherwise
8707 * the next basic block will start with a nonempty stack.
8709 while (sp != stack_start) {
8717 if (*ip == CEE_LEAVE) {
8719 target = ip + 5 + (gint32)read32(ip + 1);
8722 target = ip + 2 + (signed char)(ip [1]);
8725 /* empty the stack */
8726 while (sp != stack_start) {
8731 * If this leave statement is in a catch block, check for a
8732 * pending exception, and rethrow it if necessary.
8734 for (i = 0; i < header->num_clauses; ++i) {
8735 MonoExceptionClause *clause = &header->clauses [i];
8738 * Use <= in the final comparison to handle clauses with multiple
8739 * leave statements, like in bug #78024.
8740 * The ordering of the exception clauses guarantees that we find the
8743 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8745 MonoBasicBlock *dont_throw;
8750 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8753 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8755 NEW_BBLOCK (cfg, dont_throw);
8758 * Currently, we allways rethrow the abort exception, despite the
8759 * fact that this is not correct. See thread6.cs for an example.
8760 * But propagating the abort exception is more important than
8761 * getting the sematics right.
8763 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8764 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8765 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8767 MONO_START_BB (cfg, dont_throw);
8772 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8774 for (tmp = handlers; tmp; tmp = tmp->next) {
8776 link_bblock (cfg, bblock, tblock);
8777 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8778 ins->inst_target_bb = tblock;
8779 MONO_ADD_INS (bblock, ins);
8781 g_list_free (handlers);
8784 MONO_INST_NEW (cfg, ins, OP_BR);
8785 MONO_ADD_INS (bblock, ins);
8786 GET_BBLOCK (cfg, tblock, target);
8787 link_bblock (cfg, bblock, tblock);
8788 ins->inst_target_bb = tblock;
8789 start_new_bblock = 1;
8791 if (*ip == CEE_LEAVE)
8800 * Mono specific opcodes
8802 case MONO_CUSTOM_PREFIX: {
8804 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8808 case CEE_MONO_ICALL: {
8810 MonoJitICallInfo *info;
8812 token = read32 (ip + 2);
8813 func = mono_method_get_wrapper_data (method, token);
8814 info = mono_find_jit_icall_by_addr (func);
8817 CHECK_STACK (info->sig->param_count);
8818 sp -= info->sig->param_count;
8820 ins = mono_emit_jit_icall (cfg, info->func, sp);
8821 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8825 inline_costs += 10 * num_calls++;
8829 case CEE_MONO_LDPTR: {
8832 CHECK_STACK_OVF (1);
8834 token = read32 (ip + 2);
8836 ptr = mono_method_get_wrapper_data (method, token);
8837 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8838 MonoJitICallInfo *callinfo;
8839 const char *icall_name;
8841 icall_name = method->name + strlen ("__icall_wrapper_");
8842 g_assert (icall_name);
8843 callinfo = mono_find_jit_icall_by_name (icall_name);
8844 g_assert (callinfo);
8846 if (ptr == callinfo->func) {
8847 /* Will be transformed into an AOTCONST later */
8848 EMIT_NEW_PCONST (cfg, ins, ptr);
8854 /* FIXME: Generalize this */
8855 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8856 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8861 EMIT_NEW_PCONST (cfg, ins, ptr);
8864 inline_costs += 10 * num_calls++;
8865 /* Can't embed random pointers into AOT code */
8866 cfg->disable_aot = 1;
8869 case CEE_MONO_ICALL_ADDR: {
8870 MonoMethod *cmethod;
8873 CHECK_STACK_OVF (1);
8875 token = read32 (ip + 2);
8877 cmethod = mono_method_get_wrapper_data (method, token);
8879 if (cfg->compile_aot) {
8880 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8882 ptr = mono_lookup_internal_call (cmethod);
8884 EMIT_NEW_PCONST (cfg, ins, ptr);
8890 case CEE_MONO_VTADDR: {
8891 MonoInst *src_var, *src;
8897 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8898 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8903 case CEE_MONO_NEWOBJ: {
8904 MonoInst *iargs [2];
8906 CHECK_STACK_OVF (1);
8908 token = read32 (ip + 2);
8909 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8910 mono_class_init (klass);
8911 NEW_DOMAINCONST (cfg, iargs [0]);
8912 MONO_ADD_INS (cfg->cbb, iargs [0]);
8913 NEW_CLASSCONST (cfg, iargs [1], klass);
8914 MONO_ADD_INS (cfg->cbb, iargs [1]);
8915 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8917 inline_costs += 10 * num_calls++;
8920 case CEE_MONO_OBJADDR:
8923 MONO_INST_NEW (cfg, ins, OP_MOVE);
8924 ins->dreg = alloc_preg (cfg);
8925 ins->sreg1 = sp [0]->dreg;
8926 ins->type = STACK_MP;
8927 MONO_ADD_INS (cfg->cbb, ins);
8931 case CEE_MONO_LDNATIVEOBJ:
8933 * Similar to LDOBJ, but instead load the unmanaged
8934 * representation of the vtype to the stack.
8939 token = read32 (ip + 2);
8940 klass = mono_method_get_wrapper_data (method, token);
8941 g_assert (klass->valuetype);
8942 mono_class_init (klass);
8945 MonoInst *src, *dest, *temp;
8948 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8949 temp->backend.is_pinvoke = 1;
8950 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8951 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8953 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8954 dest->type = STACK_VTYPE;
8955 dest->klass = klass;
8961 case CEE_MONO_RETOBJ: {
8963 * Same as RET, but return the native representation of a vtype
8966 g_assert (cfg->ret);
8967 g_assert (mono_method_signature (method)->pinvoke);
8972 token = read32 (ip + 2);
8973 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8975 if (!cfg->vret_addr) {
8976 g_assert (cfg->ret_var_is_local);
8978 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8980 EMIT_NEW_RETLOADA (cfg, ins);
8982 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8984 if (sp != stack_start)
8987 MONO_INST_NEW (cfg, ins, OP_BR);
8988 ins->inst_target_bb = end_bblock;
8989 MONO_ADD_INS (bblock, ins);
8990 link_bblock (cfg, bblock, end_bblock);
8991 start_new_bblock = 1;
8995 case CEE_MONO_CISINST:
8996 case CEE_MONO_CCASTCLASS: {
9001 token = read32 (ip + 2);
9002 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9003 if (ip [1] == CEE_MONO_CISINST)
9004 ins = handle_cisinst (cfg, klass, sp [0]);
9006 ins = handle_ccastclass (cfg, klass, sp [0]);
9012 case CEE_MONO_SAVE_LMF:
9013 case CEE_MONO_RESTORE_LMF:
9014 #ifdef MONO_ARCH_HAVE_LMF_OPS
9015 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9016 MONO_ADD_INS (bblock, ins);
9017 cfg->need_lmf_area = TRUE;
9021 case CEE_MONO_CLASSCONST:
9022 CHECK_STACK_OVF (1);
9024 token = read32 (ip + 2);
9025 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9028 inline_costs += 10 * num_calls++;
9030 case CEE_MONO_NOT_TAKEN:
9031 bblock->out_of_line = TRUE;
9035 CHECK_STACK_OVF (1);
9037 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9038 ins->dreg = alloc_preg (cfg);
9039 ins->inst_offset = (gint32)read32 (ip + 2);
9040 ins->type = STACK_PTR;
9041 MONO_ADD_INS (bblock, ins);
9046 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9056 /* somewhat similar to LDTOKEN */
9057 MonoInst *addr, *vtvar;
9058 CHECK_STACK_OVF (1);
9059 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9061 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9062 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9064 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9065 ins->type = STACK_VTYPE;
9066 ins->klass = mono_defaults.argumenthandle_class;
9079 * The following transforms:
9080 * CEE_CEQ into OP_CEQ
9081 * CEE_CGT into OP_CGT
9082 * CEE_CGT_UN into OP_CGT_UN
9083 * CEE_CLT into OP_CLT
9084 * CEE_CLT_UN into OP_CLT_UN
9086 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9088 MONO_INST_NEW (cfg, ins, cmp->opcode);
9090 cmp->sreg1 = sp [0]->dreg;
9091 cmp->sreg2 = sp [1]->dreg;
9092 type_from_op (cmp, sp [0], sp [1]);
9094 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9095 cmp->opcode = OP_LCOMPARE;
9096 else if (sp [0]->type == STACK_R8)
9097 cmp->opcode = OP_FCOMPARE;
9099 cmp->opcode = OP_ICOMPARE;
9100 MONO_ADD_INS (bblock, cmp);
9101 ins->type = STACK_I4;
9102 ins->dreg = alloc_dreg (cfg, ins->type);
9103 type_from_op (ins, sp [0], sp [1]);
9105 if (cmp->opcode == OP_FCOMPARE) {
9107 * The backends expect the fceq opcodes to do the
9110 cmp->opcode = OP_NOP;
9111 ins->sreg1 = cmp->sreg1;
9112 ins->sreg2 = cmp->sreg2;
9114 MONO_ADD_INS (bblock, ins);
9121 MonoMethod *cil_method;
9122 gboolean needs_static_rgctx_invoke;
9124 CHECK_STACK_OVF (1);
9126 n = read32 (ip + 2);
9127 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9130 mono_class_init (cmethod->klass);
9132 mono_save_token_info (cfg, image, n, cmethod);
9134 if (cfg->generic_sharing_context)
9135 context_used = mono_method_check_context_used (cmethod);
9137 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9139 cil_method = cmethod;
9140 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9141 METHOD_ACCESS_FAILURE;
9143 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9144 if (check_linkdemand (cfg, method, cmethod))
9146 CHECK_CFG_EXCEPTION;
9147 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9148 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9152 * Optimize the common case of ldftn+delegate creation
9154 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9155 /* FIXME: SGEN support */
9156 /* FIXME: handle shared static generic methods */
9157 /* FIXME: handle this in shared code */
9158 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9159 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9160 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9161 MonoInst *target_ins;
9164 invoke = mono_get_delegate_invoke (ctor_method->klass);
9165 if (!invoke || !mono_method_signature (invoke))
9169 if (cfg->verbose_level > 3)
9170 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9171 target_ins = sp [-1];
9173 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9182 if (needs_static_rgctx_invoke)
9183 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9185 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9186 } else if (needs_static_rgctx_invoke) {
9187 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9189 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9191 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9195 inline_costs += 10 * num_calls++;
9198 case CEE_LDVIRTFTN: {
9203 n = read32 (ip + 2);
9204 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9207 mono_class_init (cmethod->klass);
9209 if (cfg->generic_sharing_context)
9210 context_used = mono_method_check_context_used (cmethod);
9212 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9213 if (check_linkdemand (cfg, method, cmethod))
9215 CHECK_CFG_EXCEPTION;
9216 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9217 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9224 args [1] = emit_get_rgctx_method (cfg, context_used,
9225 cmethod, MONO_RGCTX_INFO_METHOD);
9226 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9228 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9229 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9233 inline_costs += 10 * num_calls++;
9237 CHECK_STACK_OVF (1);
9239 n = read16 (ip + 2);
9241 EMIT_NEW_ARGLOAD (cfg, ins, n);
9246 CHECK_STACK_OVF (1);
9248 n = read16 (ip + 2);
9250 NEW_ARGLOADA (cfg, ins, n);
9251 MONO_ADD_INS (cfg->cbb, ins);
9259 n = read16 (ip + 2);
9261 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9263 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9267 CHECK_STACK_OVF (1);
9269 n = read16 (ip + 2);
9271 EMIT_NEW_LOCLOAD (cfg, ins, n);
9276 unsigned char *tmp_ip;
9277 CHECK_STACK_OVF (1);
9279 n = read16 (ip + 2);
9282 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9288 EMIT_NEW_LOCLOADA (cfg, ins, n);
9297 n = read16 (ip + 2);
9299 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9301 emit_stloc_ir (cfg, sp, header, n);
9308 if (sp != stack_start)
9310 if (cfg->method != method)
9312 * Inlining this into a loop in a parent could lead to
9313 * stack overflows which is different behavior than the
9314 * non-inlined case, thus disable inlining in this case.
9316 goto inline_failure;
9318 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9319 ins->dreg = alloc_preg (cfg);
9320 ins->sreg1 = sp [0]->dreg;
9321 ins->type = STACK_PTR;
9322 MONO_ADD_INS (cfg->cbb, ins);
9324 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9325 if (header->init_locals)
9326 ins->flags |= MONO_INST_INIT;
9331 case CEE_ENDFILTER: {
9332 MonoExceptionClause *clause, *nearest;
9333 int cc, nearest_num;
9337 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9339 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9340 ins->sreg1 = (*sp)->dreg;
9341 MONO_ADD_INS (bblock, ins);
9342 start_new_bblock = 1;
9347 for (cc = 0; cc < header->num_clauses; ++cc) {
9348 clause = &header->clauses [cc];
9349 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9350 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9351 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9357 if ((ip - header->code) != nearest->handler_offset)
9362 case CEE_UNALIGNED_:
9363 ins_flag |= MONO_INST_UNALIGNED;
9364 /* FIXME: record alignment? we can assume 1 for now */
9369 ins_flag |= MONO_INST_VOLATILE;
9373 ins_flag |= MONO_INST_TAILCALL;
9374 cfg->flags |= MONO_CFG_HAS_TAIL;
9375 /* Can't inline tail calls at this time */
9376 inline_costs += 100000;
9383 token = read32 (ip + 2);
9384 klass = mini_get_class (method, token, generic_context);
9385 CHECK_TYPELOAD (klass);
9386 if (generic_class_is_reference_type (cfg, klass))
9387 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9389 mini_emit_initobj (cfg, *sp, NULL, klass);
9393 case CEE_CONSTRAINED_:
9395 token = read32 (ip + 2);
9396 constrained_call = mono_class_get_full (image, token, generic_context);
9397 CHECK_TYPELOAD (constrained_call);
9402 MonoInst *iargs [3];
9406 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9407 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9408 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9409 /* emit_memset only works when val == 0 */
9410 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9415 if (ip [1] == CEE_CPBLK) {
9416 MonoMethod *memcpy_method = get_memcpy_method ();
9417 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9419 MonoMethod *memset_method = get_memset_method ();
9420 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9430 ins_flag |= MONO_INST_NOTYPECHECK;
9432 ins_flag |= MONO_INST_NORANGECHECK;
9433 /* we ignore the no-nullcheck for now since we
9434 * really do it explicitly only when doing callvirt->call
9440 int handler_offset = -1;
9442 for (i = 0; i < header->num_clauses; ++i) {
9443 MonoExceptionClause *clause = &header->clauses [i];
9444 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9445 handler_offset = clause->handler_offset;
9450 bblock->flags |= BB_EXCEPTION_UNSAFE;
9452 g_assert (handler_offset != -1);
9454 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9455 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9456 ins->sreg1 = load->dreg;
9457 MONO_ADD_INS (bblock, ins);
9459 link_bblock (cfg, bblock, end_bblock);
9460 start_new_bblock = 1;
9468 CHECK_STACK_OVF (1);
9470 token = read32 (ip + 2);
9471 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9472 MonoType *type = mono_type_create_from_typespec (image, token);
9473 token = mono_type_size (type, &ialign);
9475 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9476 CHECK_TYPELOAD (klass);
9477 mono_class_init (klass);
9478 token = mono_class_value_size (klass, &align);
9480 EMIT_NEW_ICONST (cfg, ins, token);
9485 case CEE_REFANYTYPE: {
9486 MonoInst *src_var, *src;
9492 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9494 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9495 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9496 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9506 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9511 g_error ("opcode 0x%02x not handled", *ip);
9514 if (start_new_bblock != 1)
9517 bblock->cil_length = ip - bblock->cil_code;
9518 bblock->next_bb = end_bblock;
9520 if (cfg->method == method && cfg->domainvar) {
9522 MonoInst *get_domain;
9524 cfg->cbb = init_localsbb;
9526 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9527 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9530 get_domain->dreg = alloc_preg (cfg);
9531 MONO_ADD_INS (cfg->cbb, get_domain);
9533 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9534 MONO_ADD_INS (cfg->cbb, store);
9537 if (cfg->method == method && cfg->got_var)
9538 mono_emit_load_got_addr (cfg);
9540 if (header->init_locals) {
9543 cfg->cbb = init_localsbb;
9545 for (i = 0; i < header->num_locals; ++i) {
9546 MonoType *ptype = header->locals [i];
9547 int t = ptype->type;
9548 dreg = cfg->locals [i]->dreg;
9550 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9551 t = mono_class_enum_basetype (ptype->data.klass)->type;
9553 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9554 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9555 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9556 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9557 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9558 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9559 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9560 ins->type = STACK_R8;
9561 ins->inst_p0 = (void*)&r8_0;
9562 ins->dreg = alloc_dreg (cfg, STACK_R8);
9563 MONO_ADD_INS (init_localsbb, ins);
9564 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9565 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9566 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9567 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9569 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9576 if (cfg->method == method) {
9578 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9579 bb->region = mono_find_block_region (cfg, bb->real_offset);
9581 mono_create_spvar_for_region (cfg, bb->region);
9582 if (cfg->verbose_level > 2)
9583 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9587 g_slist_free (class_inits);
9588 dont_inline = g_list_remove (dont_inline, method);
9590 if (inline_costs < 0) {
9593 /* Method is too large */
9594 mname = mono_method_full_name (method, TRUE);
9595 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9596 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9601 if ((cfg->verbose_level > 2) && (cfg->method == method))
9602 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9604 return inline_costs;
9607 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9608 g_slist_free (class_inits);
9609 dont_inline = g_list_remove (dont_inline, method);
9613 g_slist_free (class_inits);
9614 dont_inline = g_list_remove (dont_inline, method);
9618 g_slist_free (class_inits);
9619 dont_inline = g_list_remove (dont_inline, method);
9620 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9624 g_slist_free (class_inits);
9625 dont_inline = g_list_remove (dont_inline, method);
9626 set_exception_type_from_invalid_il (cfg, method, ip);
9631 store_membase_reg_to_store_membase_imm (int opcode)
9634 case OP_STORE_MEMBASE_REG:
9635 return OP_STORE_MEMBASE_IMM;
9636 case OP_STOREI1_MEMBASE_REG:
9637 return OP_STOREI1_MEMBASE_IMM;
9638 case OP_STOREI2_MEMBASE_REG:
9639 return OP_STOREI2_MEMBASE_IMM;
9640 case OP_STOREI4_MEMBASE_REG:
9641 return OP_STOREI4_MEMBASE_IMM;
9642 case OP_STOREI8_MEMBASE_REG:
9643 return OP_STOREI8_MEMBASE_IMM;
9645 g_assert_not_reached ();
9651 #endif /* DISABLE_JIT */
9654 mono_op_to_op_imm (int opcode)
9664 return OP_IDIV_UN_IMM;
9668 return OP_IREM_UN_IMM;
9682 return OP_ISHR_UN_IMM;
9699 return OP_LSHR_UN_IMM;
9702 return OP_COMPARE_IMM;
9704 return OP_ICOMPARE_IMM;
9706 return OP_LCOMPARE_IMM;
9708 case OP_STORE_MEMBASE_REG:
9709 return OP_STORE_MEMBASE_IMM;
9710 case OP_STOREI1_MEMBASE_REG:
9711 return OP_STOREI1_MEMBASE_IMM;
9712 case OP_STOREI2_MEMBASE_REG:
9713 return OP_STOREI2_MEMBASE_IMM;
9714 case OP_STOREI4_MEMBASE_REG:
9715 return OP_STOREI4_MEMBASE_IMM;
9717 #if defined(__i386__) || defined (__x86_64__)
9719 return OP_X86_PUSH_IMM;
9720 case OP_X86_COMPARE_MEMBASE_REG:
9721 return OP_X86_COMPARE_MEMBASE_IMM;
9723 #if defined(__x86_64__)
9724 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9725 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9727 case OP_VOIDCALL_REG:
9736 return OP_LOCALLOC_IMM;
9743 ldind_to_load_membase (int opcode)
9747 return OP_LOADI1_MEMBASE;
9749 return OP_LOADU1_MEMBASE;
9751 return OP_LOADI2_MEMBASE;
9753 return OP_LOADU2_MEMBASE;
9755 return OP_LOADI4_MEMBASE;
9757 return OP_LOADU4_MEMBASE;
9759 return OP_LOAD_MEMBASE;
9761 return OP_LOAD_MEMBASE;
9763 return OP_LOADI8_MEMBASE;
9765 return OP_LOADR4_MEMBASE;
9767 return OP_LOADR8_MEMBASE;
9769 g_assert_not_reached ();
9776 stind_to_store_membase (int opcode)
9780 return OP_STOREI1_MEMBASE_REG;
9782 return OP_STOREI2_MEMBASE_REG;
9784 return OP_STOREI4_MEMBASE_REG;
9787 return OP_STORE_MEMBASE_REG;
9789 return OP_STOREI8_MEMBASE_REG;
9791 return OP_STORER4_MEMBASE_REG;
9793 return OP_STORER8_MEMBASE_REG;
9795 g_assert_not_reached ();
9802 mono_load_membase_to_load_mem (int opcode)
9804 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9805 #if defined(__i386__) || defined(__x86_64__)
9807 case OP_LOAD_MEMBASE:
9809 case OP_LOADU1_MEMBASE:
9810 return OP_LOADU1_MEM;
9811 case OP_LOADU2_MEMBASE:
9812 return OP_LOADU2_MEM;
9813 case OP_LOADI4_MEMBASE:
9814 return OP_LOADI4_MEM;
9815 case OP_LOADU4_MEMBASE:
9816 return OP_LOADU4_MEM;
9817 #if SIZEOF_REGISTER == 8
9818 case OP_LOADI8_MEMBASE:
9819 return OP_LOADI8_MEM;
9828 op_to_op_dest_membase (int store_opcode, int opcode)
9830 #if defined(__i386__)
9831 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9836 return OP_X86_ADD_MEMBASE_REG;
9838 return OP_X86_SUB_MEMBASE_REG;
9840 return OP_X86_AND_MEMBASE_REG;
9842 return OP_X86_OR_MEMBASE_REG;
9844 return OP_X86_XOR_MEMBASE_REG;
9847 return OP_X86_ADD_MEMBASE_IMM;
9850 return OP_X86_SUB_MEMBASE_IMM;
9853 return OP_X86_AND_MEMBASE_IMM;
9856 return OP_X86_OR_MEMBASE_IMM;
9859 return OP_X86_XOR_MEMBASE_IMM;
9865 #if defined(__x86_64__)
9866 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9871 return OP_X86_ADD_MEMBASE_REG;
9873 return OP_X86_SUB_MEMBASE_REG;
9875 return OP_X86_AND_MEMBASE_REG;
9877 return OP_X86_OR_MEMBASE_REG;
9879 return OP_X86_XOR_MEMBASE_REG;
9881 return OP_X86_ADD_MEMBASE_IMM;
9883 return OP_X86_SUB_MEMBASE_IMM;
9885 return OP_X86_AND_MEMBASE_IMM;
9887 return OP_X86_OR_MEMBASE_IMM;
9889 return OP_X86_XOR_MEMBASE_IMM;
9891 return OP_AMD64_ADD_MEMBASE_REG;
9893 return OP_AMD64_SUB_MEMBASE_REG;
9895 return OP_AMD64_AND_MEMBASE_REG;
9897 return OP_AMD64_OR_MEMBASE_REG;
9899 return OP_AMD64_XOR_MEMBASE_REG;
9902 return OP_AMD64_ADD_MEMBASE_IMM;
9905 return OP_AMD64_SUB_MEMBASE_IMM;
9908 return OP_AMD64_AND_MEMBASE_IMM;
9911 return OP_AMD64_OR_MEMBASE_IMM;
9914 return OP_AMD64_XOR_MEMBASE_IMM;
9924 op_to_op_store_membase (int store_opcode, int opcode)
9926 #if defined(__i386__) || defined(__x86_64__)
9929 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9930 return OP_X86_SETEQ_MEMBASE;
9932 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9933 return OP_X86_SETNE_MEMBASE;
9941 op_to_op_src1_membase (int load_opcode, int opcode)
9944 /* FIXME: This has sign extension issues */
9946 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9947 return OP_X86_COMPARE_MEMBASE8_IMM;
9950 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9955 return OP_X86_PUSH_MEMBASE;
9956 case OP_COMPARE_IMM:
9957 case OP_ICOMPARE_IMM:
9958 return OP_X86_COMPARE_MEMBASE_IMM;
9961 return OP_X86_COMPARE_MEMBASE_REG;
9966 /* FIXME: This has sign extension issues */
9968 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9969 return OP_X86_COMPARE_MEMBASE8_IMM;
9974 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9975 return OP_X86_PUSH_MEMBASE;
9977 /* FIXME: This only works for 32 bit immediates
9978 case OP_COMPARE_IMM:
9979 case OP_LCOMPARE_IMM:
9980 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9981 return OP_AMD64_COMPARE_MEMBASE_IMM;
9983 case OP_ICOMPARE_IMM:
9984 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9985 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9989 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9990 return OP_AMD64_COMPARE_MEMBASE_REG;
9993 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9994 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10003 op_to_op_src2_membase (int load_opcode, int opcode)
10006 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10012 return OP_X86_COMPARE_REG_MEMBASE;
10014 return OP_X86_ADD_REG_MEMBASE;
10016 return OP_X86_SUB_REG_MEMBASE;
10018 return OP_X86_AND_REG_MEMBASE;
10020 return OP_X86_OR_REG_MEMBASE;
10022 return OP_X86_XOR_REG_MEMBASE;
10029 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10030 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10034 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10035 return OP_AMD64_COMPARE_REG_MEMBASE;
10038 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10039 return OP_X86_ADD_REG_MEMBASE;
10041 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10042 return OP_X86_SUB_REG_MEMBASE;
10044 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10045 return OP_X86_AND_REG_MEMBASE;
10047 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10048 return OP_X86_OR_REG_MEMBASE;
10050 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10051 return OP_X86_XOR_REG_MEMBASE;
10053 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10054 return OP_AMD64_ADD_REG_MEMBASE;
10056 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10057 return OP_AMD64_SUB_REG_MEMBASE;
10059 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10060 return OP_AMD64_AND_REG_MEMBASE;
10062 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10063 return OP_AMD64_OR_REG_MEMBASE;
10065 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10066 return OP_AMD64_XOR_REG_MEMBASE;
10074 mono_op_to_op_imm_noemul (int opcode)
10077 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10082 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10090 return mono_op_to_op_imm (opcode);
10094 #ifndef DISABLE_JIT
10097 * mono_handle_global_vregs:
10099 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10103 mono_handle_global_vregs (MonoCompile *cfg)
10105 gint32 *vreg_to_bb;
10106 MonoBasicBlock *bb;
10109 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10111 #ifdef MONO_ARCH_SIMD_INTRINSICS
10112 if (cfg->uses_simd_intrinsics)
10113 mono_simd_simplify_indirection (cfg);
10116 /* Find local vregs used in more than one bb */
10117 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10118 MonoInst *ins = bb->code;
10119 int block_num = bb->block_num;
10121 if (cfg->verbose_level > 2)
10122 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10125 for (; ins; ins = ins->next) {
10126 const char *spec = INS_INFO (ins->opcode);
10127 int regtype, regindex;
10130 if (G_UNLIKELY (cfg->verbose_level > 2))
10131 mono_print_ins (ins);
10133 g_assert (ins->opcode >= MONO_CEE_LAST);
10135 for (regindex = 0; regindex < 4; regindex ++) {
10138 if (regindex == 0) {
10139 regtype = spec [MONO_INST_DEST];
10140 if (regtype == ' ')
10143 } else if (regindex == 1) {
10144 regtype = spec [MONO_INST_SRC1];
10145 if (regtype == ' ')
10148 } else if (regindex == 2) {
10149 regtype = spec [MONO_INST_SRC2];
10150 if (regtype == ' ')
10153 } else if (regindex == 3) {
10154 regtype = spec [MONO_INST_SRC3];
10155 if (regtype == ' ')
10160 #if SIZEOF_REGISTER == 4
10161 if (regtype == 'l') {
10163 * Since some instructions reference the original long vreg,
10164 * and some reference the two component vregs, it is quite hard
10165 * to determine when it needs to be global. So be conservative.
10167 if (!get_vreg_to_inst (cfg, vreg)) {
10168 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10170 if (cfg->verbose_level > 2)
10171 printf ("LONG VREG R%d made global.\n", vreg);
10175 * Make the component vregs volatile since the optimizations can
10176 * get confused otherwise.
10178 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10179 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10183 g_assert (vreg != -1);
10185 prev_bb = vreg_to_bb [vreg];
10186 if (prev_bb == 0) {
10187 /* 0 is a valid block num */
10188 vreg_to_bb [vreg] = block_num + 1;
10189 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10190 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10193 if (!get_vreg_to_inst (cfg, vreg)) {
10194 if (G_UNLIKELY (cfg->verbose_level > 2))
10195 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10199 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10202 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10205 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10208 g_assert_not_reached ();
10212 /* Flag as having been used in more than one bb */
10213 vreg_to_bb [vreg] = -1;
10219 /* If a variable is used in only one bblock, convert it into a local vreg */
10220 for (i = 0; i < cfg->num_varinfo; i++) {
10221 MonoInst *var = cfg->varinfo [i];
10222 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10224 switch (var->type) {
10230 #if SIZEOF_REGISTER == 8
10233 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10234 /* Enabling this screws up the fp stack on x86 */
10237 /* Arguments are implicitly global */
10238 /* Putting R4 vars into registers doesn't work currently */
10239 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10241 * Make that the variable's liveness interval doesn't contain a call, since
10242 * that would cause the lvreg to be spilled, making the whole optimization
10245 /* This is too slow for JIT compilation */
10247 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10249 int def_index, call_index, ins_index;
10250 gboolean spilled = FALSE;
10255 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10256 const char *spec = INS_INFO (ins->opcode);
10258 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10259 def_index = ins_index;
10261 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10262 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10263 if (call_index > def_index) {
10269 if (MONO_IS_CALL (ins))
10270 call_index = ins_index;
10280 if (G_UNLIKELY (cfg->verbose_level > 2))
10281 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10282 var->flags |= MONO_INST_IS_DEAD;
10283 cfg->vreg_to_inst [var->dreg] = NULL;
10290 * Compress the varinfo and vars tables so the liveness computation is faster and
10291 * takes up less space.
10294 for (i = 0; i < cfg->num_varinfo; ++i) {
10295 MonoInst *var = cfg->varinfo [i];
10296 if (pos < i && cfg->locals_start == i)
10297 cfg->locals_start = pos;
10298 if (!(var->flags & MONO_INST_IS_DEAD)) {
10300 cfg->varinfo [pos] = cfg->varinfo [i];
10301 cfg->varinfo [pos]->inst_c0 = pos;
10302 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10303 cfg->vars [pos].idx = pos;
10304 #if SIZEOF_REGISTER == 4
10305 if (cfg->varinfo [pos]->type == STACK_I8) {
10306 /* Modify the two component vars too */
10309 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10310 var1->inst_c0 = pos;
10311 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10312 var1->inst_c0 = pos;
10319 cfg->num_varinfo = pos;
10320 if (cfg->locals_start > cfg->num_varinfo)
10321 cfg->locals_start = cfg->num_varinfo;
10325 * mono_spill_global_vars:
10327 * Generate spill code for variables which are not allocated to registers,
10328 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10329 * code is generated which could be optimized by the local optimization passes.
10332 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10334 MonoBasicBlock *bb;
10336 int orig_next_vreg;
10337 guint32 *vreg_to_lvreg;
10339 guint32 i, lvregs_len;
10340 gboolean dest_has_lvreg = FALSE;
10341 guint32 stacktypes [128];
10342 MonoInst **live_range_start, **live_range_end;
10343 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10345 *need_local_opts = FALSE;
10347 memset (spec2, 0, sizeof (spec2));
10349 /* FIXME: Move this function to mini.c */
10350 stacktypes ['i'] = STACK_PTR;
10351 stacktypes ['l'] = STACK_I8;
10352 stacktypes ['f'] = STACK_R8;
10353 #ifdef MONO_ARCH_SIMD_INTRINSICS
10354 stacktypes ['x'] = STACK_VTYPE;
10357 #if SIZEOF_REGISTER == 4
10358 /* Create MonoInsts for longs */
10359 for (i = 0; i < cfg->num_varinfo; i++) {
10360 MonoInst *ins = cfg->varinfo [i];
10362 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10363 switch (ins->type) {
10364 #ifdef MONO_ARCH_SOFT_FLOAT
10370 g_assert (ins->opcode == OP_REGOFFSET);
10372 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10374 tree->opcode = OP_REGOFFSET;
10375 tree->inst_basereg = ins->inst_basereg;
10376 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10378 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10380 tree->opcode = OP_REGOFFSET;
10381 tree->inst_basereg = ins->inst_basereg;
10382 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10392 /* FIXME: widening and truncation */
10395 * As an optimization, when a variable allocated to the stack is first loaded into
10396 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10397 * the variable again.
10399 orig_next_vreg = cfg->next_vreg;
10400 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10401 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10405 * These arrays contain the first and last instructions accessing a given
10407 * Since we emit bblocks in the same order we process them here, and we
10408 * don't split live ranges, these will precisely describe the live range of
10409 * the variable, i.e. the instruction range where a valid value can be found
10410 * in the variables location.
10412 /* FIXME: Only do this if debugging info is requested */
10413 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10414 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10415 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10416 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10418 /* Add spill loads/stores */
10419 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10422 if (cfg->verbose_level > 2)
10423 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10425 /* Clear vreg_to_lvreg array */
10426 for (i = 0; i < lvregs_len; i++)
10427 vreg_to_lvreg [lvregs [i]] = 0;
10431 MONO_BB_FOR_EACH_INS (bb, ins) {
10432 const char *spec = INS_INFO (ins->opcode);
10433 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10434 gboolean store, no_lvreg;
10435 int sregs [MONO_MAX_SRC_REGS];
10437 if (G_UNLIKELY (cfg->verbose_level > 2))
10438 mono_print_ins (ins);
10440 if (ins->opcode == OP_NOP)
10444 * We handle LDADDR here as well, since it can only be decomposed
10445 * when variable addresses are known.
10447 if (ins->opcode == OP_LDADDR) {
10448 MonoInst *var = ins->inst_p0;
10450 if (var->opcode == OP_VTARG_ADDR) {
10451 /* Happens on SPARC/S390 where vtypes are passed by reference */
10452 MonoInst *vtaddr = var->inst_left;
10453 if (vtaddr->opcode == OP_REGVAR) {
10454 ins->opcode = OP_MOVE;
10455 ins->sreg1 = vtaddr->dreg;
10457 else if (var->inst_left->opcode == OP_REGOFFSET) {
10458 ins->opcode = OP_LOAD_MEMBASE;
10459 ins->inst_basereg = vtaddr->inst_basereg;
10460 ins->inst_offset = vtaddr->inst_offset;
10464 g_assert (var->opcode == OP_REGOFFSET);
10466 ins->opcode = OP_ADD_IMM;
10467 ins->sreg1 = var->inst_basereg;
10468 ins->inst_imm = var->inst_offset;
10471 *need_local_opts = TRUE;
10472 spec = INS_INFO (ins->opcode);
10475 if (ins->opcode < MONO_CEE_LAST) {
10476 mono_print_ins (ins);
10477 g_assert_not_reached ();
10481 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10485 if (MONO_IS_STORE_MEMBASE (ins)) {
10486 tmp_reg = ins->dreg;
10487 ins->dreg = ins->sreg2;
10488 ins->sreg2 = tmp_reg;
10491 spec2 [MONO_INST_DEST] = ' ';
10492 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10493 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10494 spec2 [MONO_INST_SRC3] = ' ';
10496 } else if (MONO_IS_STORE_MEMINDEX (ins))
10497 g_assert_not_reached ();
10502 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10503 printf ("\t %.3s %d", spec, ins->dreg);
10504 num_sregs = mono_inst_get_src_registers (ins, sregs);
10505 for (srcindex = 0; srcindex < 3; ++srcindex)
10506 printf (" %d", sregs [srcindex]);
10513 regtype = spec [MONO_INST_DEST];
10514 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10517 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10518 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10519 MonoInst *store_ins;
10521 MonoInst *def_ins = ins;
10522 int dreg = ins->dreg; /* The original vreg */
10524 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10526 if (var->opcode == OP_REGVAR) {
10527 ins->dreg = var->dreg;
10528 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10530 * Instead of emitting a load+store, use a _membase opcode.
10532 g_assert (var->opcode == OP_REGOFFSET);
10533 if (ins->opcode == OP_MOVE) {
10537 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10538 ins->inst_basereg = var->inst_basereg;
10539 ins->inst_offset = var->inst_offset;
10542 spec = INS_INFO (ins->opcode);
10546 g_assert (var->opcode == OP_REGOFFSET);
10548 prev_dreg = ins->dreg;
10550 /* Invalidate any previous lvreg for this vreg */
10551 vreg_to_lvreg [ins->dreg] = 0;
10555 #ifdef MONO_ARCH_SOFT_FLOAT
10556 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10558 store_opcode = OP_STOREI8_MEMBASE_REG;
10562 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10564 if (regtype == 'l') {
10565 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10566 mono_bblock_insert_after_ins (bb, ins, store_ins);
10567 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10568 mono_bblock_insert_after_ins (bb, ins, store_ins);
10569 def_ins = store_ins;
10572 g_assert (store_opcode != OP_STOREV_MEMBASE);
10574 /* Try to fuse the store into the instruction itself */
10575 /* FIXME: Add more instructions */
10576 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10577 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10578 ins->inst_imm = ins->inst_c0;
10579 ins->inst_destbasereg = var->inst_basereg;
10580 ins->inst_offset = var->inst_offset;
10581 spec = INS_INFO (ins->opcode);
10582 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10583 ins->opcode = store_opcode;
10584 ins->inst_destbasereg = var->inst_basereg;
10585 ins->inst_offset = var->inst_offset;
10589 tmp_reg = ins->dreg;
10590 ins->dreg = ins->sreg2;
10591 ins->sreg2 = tmp_reg;
10594 spec2 [MONO_INST_DEST] = ' ';
10595 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10596 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10597 spec2 [MONO_INST_SRC3] = ' ';
10599 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10600 // FIXME: The backends expect the base reg to be in inst_basereg
10601 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10603 ins->inst_basereg = var->inst_basereg;
10604 ins->inst_offset = var->inst_offset;
10605 spec = INS_INFO (ins->opcode);
10607 /* printf ("INS: "); mono_print_ins (ins); */
10608 /* Create a store instruction */
10609 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10611 /* Insert it after the instruction */
10612 mono_bblock_insert_after_ins (bb, ins, store_ins);
10614 def_ins = store_ins;
10617 * We can't assign ins->dreg to var->dreg here, since the
10618 * sregs could use it. So set a flag, and do it after
10621 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10622 dest_has_lvreg = TRUE;
10627 if (def_ins && !live_range_start [dreg]) {
10628 live_range_start [dreg] = def_ins;
10629 live_range_start_bb [dreg] = bb;
10636 num_sregs = mono_inst_get_src_registers (ins, sregs);
10637 for (srcindex = 0; srcindex < 3; ++srcindex) {
10638 regtype = spec [MONO_INST_SRC1 + srcindex];
10639 sreg = sregs [srcindex];
10641 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10642 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10643 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10644 MonoInst *use_ins = ins;
10645 MonoInst *load_ins;
10646 guint32 load_opcode;
10648 if (var->opcode == OP_REGVAR) {
10649 sregs [srcindex] = var->dreg;
10650 //mono_inst_set_src_registers (ins, sregs);
10651 live_range_end [sreg] = use_ins;
10652 live_range_end_bb [sreg] = bb;
10656 g_assert (var->opcode == OP_REGOFFSET);
10658 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10660 g_assert (load_opcode != OP_LOADV_MEMBASE);
10662 if (vreg_to_lvreg [sreg]) {
10663 g_assert (vreg_to_lvreg [sreg] != -1);
10665 /* The variable is already loaded to an lvreg */
10666 if (G_UNLIKELY (cfg->verbose_level > 2))
10667 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10668 sregs [srcindex] = vreg_to_lvreg [sreg];
10669 //mono_inst_set_src_registers (ins, sregs);
10673 /* Try to fuse the load into the instruction */
10674 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10675 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10676 sregs [0] = var->inst_basereg;
10677 //mono_inst_set_src_registers (ins, sregs);
10678 ins->inst_offset = var->inst_offset;
10679 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10680 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10681 sregs [1] = var->inst_basereg;
10682 //mono_inst_set_src_registers (ins, sregs);
10683 ins->inst_offset = var->inst_offset;
10685 if (MONO_IS_REAL_MOVE (ins)) {
10686 ins->opcode = OP_NOP;
10689 //printf ("%d ", srcindex); mono_print_ins (ins);
10691 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10693 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10694 if (var->dreg == prev_dreg) {
10696 * sreg refers to the value loaded by the load
10697 * emitted below, but we need to use ins->dreg
10698 * since it refers to the store emitted earlier.
10702 g_assert (sreg != -1);
10703 vreg_to_lvreg [var->dreg] = sreg;
10704 g_assert (lvregs_len < 1024);
10705 lvregs [lvregs_len ++] = var->dreg;
10709 sregs [srcindex] = sreg;
10710 //mono_inst_set_src_registers (ins, sregs);
10712 if (regtype == 'l') {
10713 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10714 mono_bblock_insert_before_ins (bb, ins, load_ins);
10715 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10716 mono_bblock_insert_before_ins (bb, ins, load_ins);
10717 use_ins = load_ins;
10720 #if SIZEOF_REGISTER == 4
10721 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10723 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10724 mono_bblock_insert_before_ins (bb, ins, load_ins);
10725 use_ins = load_ins;
10729 if (var->dreg < orig_next_vreg) {
10730 live_range_end [var->dreg] = use_ins;
10731 live_range_end_bb [var->dreg] = bb;
10735 mono_inst_set_src_registers (ins, sregs);
10737 if (dest_has_lvreg) {
10738 g_assert (ins->dreg != -1);
10739 vreg_to_lvreg [prev_dreg] = ins->dreg;
10740 g_assert (lvregs_len < 1024);
10741 lvregs [lvregs_len ++] = prev_dreg;
10742 dest_has_lvreg = FALSE;
10746 tmp_reg = ins->dreg;
10747 ins->dreg = ins->sreg2;
10748 ins->sreg2 = tmp_reg;
10751 if (MONO_IS_CALL (ins)) {
10752 /* Clear vreg_to_lvreg array */
10753 for (i = 0; i < lvregs_len; i++)
10754 vreg_to_lvreg [lvregs [i]] = 0;
10756 } else if (ins->opcode == OP_NOP) {
10758 MONO_INST_NULLIFY_SREGS (ins);
10761 if (cfg->verbose_level > 2)
10762 mono_print_ins_index (1, ins);
10766 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10768 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10769 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10771 for (i = 0; i < cfg->num_varinfo; ++i) {
10772 int vreg = MONO_VARINFO (cfg, i)->vreg;
10775 if (live_range_start [vreg]) {
10776 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10778 ins->inst_c1 = vreg;
10779 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10781 if (live_range_end [vreg]) {
10782 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10784 ins->inst_c1 = vreg;
10785 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10790 g_free (live_range_start);
10791 g_free (live_range_end);
10792 g_free (live_range_start_bb);
10793 g_free (live_range_end_bb);
10798 * - use 'iadd' instead of 'int_add'
10799 * - handling ovf opcodes: decompose in method_to_ir.
10800 * - unify iregs/fregs
10801 * -> partly done, the missing parts are:
10802 * - a more complete unification would involve unifying the hregs as well, so
10803 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10804 * would no longer map to the machine hregs, so the code generators would need to
10805 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10806 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10807 * fp/non-fp branches speeds it up by about 15%.
10808 * - use sext/zext opcodes instead of shifts
10810 * - get rid of TEMPLOADs if possible and use vregs instead
10811 * - clean up usage of OP_P/OP_ opcodes
10812 * - cleanup usage of DUMMY_USE
10813 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10815 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10816 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10817 * - make sure handle_stack_args () is called before the branch is emitted
10818 * - when the new IR is done, get rid of all unused stuff
10819 * - COMPARE/BEQ as separate instructions or unify them ?
10820 * - keeping them separate allows specialized compare instructions like
10821 * compare_imm, compare_membase
10822 * - most back ends unify fp compare+branch, fp compare+ceq
10823 * - integrate mono_save_args into inline_method
10824 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10825 * - handle long shift opts on 32 bit platforms somehow: they require
10826 * 3 sregs (2 for arg1 and 1 for arg2)
10827 * - make byref a 'normal' type.
10828 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10829 * variable if needed.
10830 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10831 * like inline_method.
10832 * - remove inlining restrictions
10833 * - fix LNEG and enable cfold of INEG
10834 * - generalize x86 optimizations like ldelema as a peephole optimization
10835 * - add store_mem_imm for amd64
10836 * - optimize the loading of the interruption flag in the managed->native wrappers
10837 * - avoid special handling of OP_NOP in passes
10838 * - move code inserting instructions into one function/macro.
10839 * - try a coalescing phase after liveness analysis
10840 * - add float -> vreg conversion + local optimizations on !x86
10841 * - figure out how to handle decomposed branches during optimizations, ie.
10842 * compare+branch, op_jump_table+op_br etc.
10843 * - promote RuntimeXHandles to vregs
10844 * - vtype cleanups:
10845 * - add a NEW_VARLOADA_VREG macro
10846 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10847 * accessing vtype fields.
10848 * - get rid of I8CONST on 64 bit platforms
10849 * - dealing with the increase in code size due to branches created during opcode
10851 * - use extended basic blocks
10852 * - all parts of the JIT
10853 * - handle_global_vregs () && local regalloc
10854 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10855 * - sources of increase in code size:
10858 * - isinst and castclass
10859 * - lvregs not allocated to global registers even if used multiple times
10860 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10862 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10863 * - add all micro optimizations from the old JIT
10864 * - put tree optimizations into the deadce pass
10865 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10866 * specific function.
10867 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10868 * fcompare + branchCC.
10869 * - create a helper function for allocating a stack slot, taking into account
10870 * MONO_CFG_HAS_SPILLUP.
10872 * - merge the ia64 switch changes.
10873 * - optimize mono_regstate2_alloc_int/float.
10874 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10875 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10876 * parts of the tree could be separated by other instructions, killing the tree
10877 * arguments, or stores killing loads etc. Also, should we fold loads into other
10878 * instructions if the result of the load is used multiple times ?
10879 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10880 * - LAST MERGE: 108395.
10881 * - when returning vtypes in registers, generate IR and append it to the end of the
10882 * last bb instead of doing it in the epilog.
10883 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10891 - When to decompose opcodes:
10892 - earlier: this makes some optimizations hard to implement, since the low level IR
10893 no longer contains the neccessary information. But it is easier to do.
10894 - later: harder to implement, enables more optimizations.
10895 - Branches inside bblocks:
10896 - created when decomposing complex opcodes.
10897 - branches to another bblock: harmless, but not tracked by the branch
10898 optimizations, so need to branch to a label at the start of the bblock.
10899 - branches to inside the same bblock: very problematic, trips up the local
10900 reg allocator. Can be fixed by spitting the current bblock, but that is a
10901 complex operation, since some local vregs can become global vregs etc.
10902 - Local/global vregs:
10903 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10904 local register allocator.
10905 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10906 structure, created by mono_create_var (). Assigned to hregs or the stack by
10907 the global register allocator.
10908 - When to do optimizations like alu->alu_imm:
10909 - earlier -> saves work later on since the IR will be smaller/simpler
10910 - later -> can work on more instructions
10911 - Handling of valuetypes:
10912 - When a vtype is pushed on the stack, a new temporary is created, an
10913 instruction computing its address (LDADDR) is emitted and pushed on
10914 the stack. Need to optimize cases when the vtype is used immediately as in
10915 argument passing, stloc etc.
10916 - Instead of the to_end stuff in the old JIT, simply call the function handling
10917 the values on the stack before emitting the last instruction of the bb.
10920 #endif /* DISABLE_JIT */