2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #ifdef HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
34 #include <mono/metadata/assembly.h>
35 #include <mono/metadata/loader.h>
36 #include <mono/metadata/tabledefs.h>
37 #include <mono/metadata/class.h>
38 #include <mono/metadata/object.h>
39 #include <mono/metadata/exception.h>
40 #include <mono/metadata/opcodes.h>
41 #include <mono/metadata/mono-endian.h>
42 #include <mono/metadata/tokentype.h>
43 #include <mono/metadata/tabledefs.h>
44 #include <mono/metadata/marshal.h>
45 #include <mono/metadata/debug-helpers.h>
46 #include <mono/metadata/mono-debug.h>
47 #include <mono/metadata/gc-internal.h>
48 #include <mono/metadata/security-manager.h>
49 #include <mono/metadata/threads-types.h>
50 #include <mono/metadata/security-core-clr.h>
51 #include <mono/metadata/monitor.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #define BRANCH_COST 100
62 #define INLINE_LENGTH_LIMIT 20
63 #define INLINE_FAILURE do {\
64 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
67 #define CHECK_CFG_EXCEPTION do {\
68 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
71 #define METHOD_ACCESS_FAILURE do { \
72 char *method_fname = mono_method_full_name (method, TRUE); \
73 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
74 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
75 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
76 g_free (method_fname); \
77 g_free (cil_method_fname); \
78 goto exception_exit; \
80 #define FIELD_ACCESS_FAILURE do { \
81 char *method_fname = mono_method_full_name (method, TRUE); \
82 char *field_fname = mono_field_full_name (field); \
83 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
84 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
85 g_free (method_fname); \
86 g_free (field_fname); \
87 goto exception_exit; \
89 #define GENERIC_SHARING_FAILURE(opcode) do { \
90 if (cfg->generic_sharing_context) { \
91 if (cfg->verbose_level > 2) \
92 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
93 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
94 goto exception_exit; \
98 /* Determine whenever 'ins' represents a load of the 'this' argument */
99 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
101 static int ldind_to_load_membase (int opcode);
102 static int stind_to_store_membase (int opcode);
104 int mono_op_to_op_imm (int opcode);
105 int mono_op_to_op_imm_noemul (int opcode);
107 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
108 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
109 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
111 /* helper methods signature */
112 extern MonoMethodSignature *helper_sig_class_init_trampoline;
113 extern MonoMethodSignature *helper_sig_domain_get;
114 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
116 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 * Instruction metadata
127 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
128 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
134 #if SIZEOF_REGISTER == 8
139 /* keep in sync with the enum in mini.h */
142 #include "mini-ops.h"
147 #define MINI_OP(a,b,dest,src1,src2) (((src1) != NONE) + ((src2) != NONE)),
148 #define MINI_OP3(a,b,dest,src1,src2,src3) (((src1) != NONE) + ((src2) != NONE) + ((src3) != NONE)),
149 const gint8 ins_sreg_counts[] = {
150 #include "mini-ops.h"
155 extern GHashTable *jit_icall_name_hash;
157 #define MONO_INIT_VARINFO(vi,id) do { \
158 (vi)->range.first_use.pos.bid = 0xffff; \
164 mono_inst_set_src_registers (MonoInst *ins, int *regs)
166 ins->sreg1 = regs [0];
167 ins->sreg2 = regs [1];
168 ins->sreg3 = regs [2];
172 mono_alloc_ireg (MonoCompile *cfg)
174 return alloc_ireg (cfg);
178 mono_alloc_freg (MonoCompile *cfg)
180 return alloc_freg (cfg);
184 mono_alloc_preg (MonoCompile *cfg)
186 return alloc_preg (cfg);
190 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
192 return alloc_dreg (cfg, stack_type);
196 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
202 switch (type->type) {
205 case MONO_TYPE_BOOLEAN:
217 case MONO_TYPE_FNPTR:
219 case MONO_TYPE_CLASS:
220 case MONO_TYPE_STRING:
221 case MONO_TYPE_OBJECT:
222 case MONO_TYPE_SZARRAY:
223 case MONO_TYPE_ARRAY:
227 #if SIZEOF_REGISTER == 8
236 case MONO_TYPE_VALUETYPE:
237 if (type->data.klass->enumtype) {
238 type = mono_class_enum_basetype (type->data.klass);
241 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
244 case MONO_TYPE_TYPEDBYREF:
246 case MONO_TYPE_GENERICINST:
247 type = &type->data.generic_class->container_class->byval_arg;
251 g_assert (cfg->generic_sharing_context);
254 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
260 mono_print_bb (MonoBasicBlock *bb, const char *msg)
265 printf ("\n%s %d: [IN: ", msg, bb->block_num);
266 for (i = 0; i < bb->in_count; ++i)
267 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
269 for (i = 0; i < bb->out_count; ++i)
270 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
272 for (tree = bb->code; tree; tree = tree->next)
273 mono_print_ins_index (-1, tree);
277 * Can't put this at the beginning, since other files reference stuff from this
282 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
284 #define GET_BBLOCK(cfg,tblock,ip) do { \
285 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
287 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
288 NEW_BBLOCK (cfg, (tblock)); \
289 (tblock)->cil_code = (ip); \
290 ADD_BBLOCK (cfg, (tblock)); \
294 #if defined(__i386__) || defined(__x86_64__)
295 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
296 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
297 (dest)->dreg = alloc_preg ((cfg)); \
298 (dest)->sreg1 = (sr1); \
299 (dest)->sreg2 = (sr2); \
300 (dest)->inst_imm = (imm); \
301 (dest)->backend.shift_amount = (shift); \
302 MONO_ADD_INS ((cfg)->cbb, (dest)); \
306 #if SIZEOF_REGISTER == 8
307 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
308 /* FIXME: Need to add many more cases */ \
309 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
311 int dr = alloc_preg (cfg); \
312 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
313 (ins)->sreg2 = widen->dreg; \
317 #define ADD_WIDEN_OP(ins, arg1, arg2)
320 #define ADD_BINOP(op) do { \
321 MONO_INST_NEW (cfg, ins, (op)); \
323 ins->sreg1 = sp [0]->dreg; \
324 ins->sreg2 = sp [1]->dreg; \
325 type_from_op (ins, sp [0], sp [1]); \
327 /* Have to insert a widening op */ \
328 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
329 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
330 MONO_ADD_INS ((cfg)->cbb, (ins)); \
331 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
334 #define ADD_UNOP(op) do { \
335 MONO_INST_NEW (cfg, ins, (op)); \
337 ins->sreg1 = sp [0]->dreg; \
338 type_from_op (ins, sp [0], NULL); \
340 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
341 MONO_ADD_INS ((cfg)->cbb, (ins)); \
342 *sp++ = mono_decompose_opcode (cfg, ins); \
345 #define ADD_BINCOND(next_block) do { \
348 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
349 cmp->sreg1 = sp [0]->dreg; \
350 cmp->sreg2 = sp [1]->dreg; \
351 type_from_op (cmp, sp [0], sp [1]); \
353 type_from_op (ins, sp [0], sp [1]); \
354 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
355 GET_BBLOCK (cfg, tblock, target); \
356 link_bblock (cfg, bblock, tblock); \
357 ins->inst_true_bb = tblock; \
358 if ((next_block)) { \
359 link_bblock (cfg, bblock, (next_block)); \
360 ins->inst_false_bb = (next_block); \
361 start_new_bblock = 1; \
363 GET_BBLOCK (cfg, tblock, ip); \
364 link_bblock (cfg, bblock, tblock); \
365 ins->inst_false_bb = tblock; \
366 start_new_bblock = 2; \
368 if (sp != stack_start) { \
369 handle_stack_args (cfg, stack_start, sp - stack_start); \
370 CHECK_UNVERIFIABLE (cfg); \
372 MONO_ADD_INS (bblock, cmp); \
373 MONO_ADD_INS (bblock, ins); \
377 * link_bblock: Links two basic blocks
379 * links two basic blocks in the control flow graph, the 'from'
380 * argument is the starting block and the 'to' argument is the block
381 * the control flow ends to after 'from'.
384 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
386 MonoBasicBlock **newa;
390 if (from->cil_code) {
392 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
394 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
397 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
399 printf ("edge from entry to exit\n");
404 for (i = 0; i < from->out_count; ++i) {
405 if (to == from->out_bb [i]) {
411 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
412 for (i = 0; i < from->out_count; ++i) {
413 newa [i] = from->out_bb [i];
421 for (i = 0; i < to->in_count; ++i) {
422 if (from == to->in_bb [i]) {
428 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
429 for (i = 0; i < to->in_count; ++i) {
430 newa [i] = to->in_bb [i];
439 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
441 link_bblock (cfg, from, to);
445 * mono_find_block_region:
447 * We mark each basic block with a region ID. We use that to avoid BB
448 * optimizations when blocks are in different regions.
451 * A region token that encodes where this region is, and information
452 * about the clause owner for this block.
454 * The region encodes the try/catch/filter clause that owns this block
455 * as well as the type. -1 is a special value that represents a block
456 * that is in none of try/catch/filter.
459 mono_find_block_region (MonoCompile *cfg, int offset)
461 MonoMethod *method = cfg->method;
462 MonoMethodHeader *header = mono_method_get_header (method);
463 MonoExceptionClause *clause;
466 for (i = 0; i < header->num_clauses; ++i) {
467 clause = &header->clauses [i];
468 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
469 (offset < (clause->handler_offset)))
470 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
472 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
473 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
474 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
475 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
476 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
478 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
481 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
482 return ((i + 1) << 8) | clause->flags;
489 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
491 MonoMethod *method = cfg->method;
492 MonoMethodHeader *header = mono_method_get_header (method);
493 MonoExceptionClause *clause;
494 MonoBasicBlock *handler;
498 for (i = 0; i < header->num_clauses; ++i) {
499 clause = &header->clauses [i];
500 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
501 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
502 if (clause->flags == type) {
503 handler = cfg->cil_offset_to_bb [clause->handler_offset];
505 res = g_list_append (res, handler);
513 mono_create_spvar_for_region (MonoCompile *cfg, int region)
517 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
521 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
522 /* prevent it from being register allocated */
523 var->flags |= MONO_INST_INDIRECT;
525 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
529 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
531 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
535 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
539 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
543 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
544 /* prevent it from being register allocated */
545 var->flags |= MONO_INST_INDIRECT;
547 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
553 * Returns the type used in the eval stack when @type is loaded.
554 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
557 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
561 inst->klass = klass = mono_class_from_mono_type (type);
563 inst->type = STACK_MP;
568 switch (type->type) {
570 inst->type = STACK_INV;
574 case MONO_TYPE_BOOLEAN:
580 inst->type = STACK_I4;
585 case MONO_TYPE_FNPTR:
586 inst->type = STACK_PTR;
588 case MONO_TYPE_CLASS:
589 case MONO_TYPE_STRING:
590 case MONO_TYPE_OBJECT:
591 case MONO_TYPE_SZARRAY:
592 case MONO_TYPE_ARRAY:
593 inst->type = STACK_OBJ;
597 inst->type = STACK_I8;
601 inst->type = STACK_R8;
603 case MONO_TYPE_VALUETYPE:
604 if (type->data.klass->enumtype) {
605 type = mono_class_enum_basetype (type->data.klass);
609 inst->type = STACK_VTYPE;
612 case MONO_TYPE_TYPEDBYREF:
613 inst->klass = mono_defaults.typed_reference_class;
614 inst->type = STACK_VTYPE;
616 case MONO_TYPE_GENERICINST:
617 type = &type->data.generic_class->container_class->byval_arg;
620 case MONO_TYPE_MVAR :
621 /* FIXME: all the arguments must be references for now,
622 * later look inside cfg and see if the arg num is
625 g_assert (cfg->generic_sharing_context);
626 inst->type = STACK_OBJ;
629 g_error ("unknown type 0x%02x in eval stack type", type->type);
634 * The following tables are used to quickly validate the IL code in type_from_op ().
637 bin_num_table [STACK_MAX] [STACK_MAX] = {
638 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
639 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
640 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
641 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
642 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
643 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
644 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
650 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
653 /* reduce the size of this table */
655 bin_int_table [STACK_MAX] [STACK_MAX] = {
656 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
657 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
658 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
659 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
660 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
661 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
662 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
667 bin_comp_table [STACK_MAX] [STACK_MAX] = {
668 /* Inv i L p F & O vt */
670 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
671 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
672 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
673 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
674 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
675 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
676 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
679 /* reduce the size of this table */
681 shift_table [STACK_MAX] [STACK_MAX] = {
682 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
683 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
684 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
685 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
686 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
687 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
688 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
693 * Tables to map from the non-specific opcode to the matching
694 * type-specific opcode.
696 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
698 binops_op_map [STACK_MAX] = {
699 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
702 /* handles from CEE_NEG to CEE_CONV_U8 */
704 unops_op_map [STACK_MAX] = {
705 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
708 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
710 ovfops_op_map [STACK_MAX] = {
711 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
714 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
716 ovf2ops_op_map [STACK_MAX] = {
717 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
720 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
722 ovf3ops_op_map [STACK_MAX] = {
723 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
726 /* handles from CEE_BEQ to CEE_BLT_UN */
728 beqops_op_map [STACK_MAX] = {
729 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
732 /* handles from CEE_CEQ to CEE_CLT_UN */
734 ceqops_op_map [STACK_MAX] = {
735 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
739 * Sets ins->type (the type on the eval stack) according to the
740 * type of the opcode and the arguments to it.
741 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
743 * FIXME: this function sets ins->type unconditionally in some cases, but
744 * it should set it to invalid for some types (a conv.x on an object)
747 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
749 switch (ins->opcode) {
756 /* FIXME: check unverifiable args for STACK_MP */
757 ins->type = bin_num_table [src1->type] [src2->type];
758 ins->opcode += binops_op_map [ins->type];
765 ins->type = bin_int_table [src1->type] [src2->type];
766 ins->opcode += binops_op_map [ins->type];
771 ins->type = shift_table [src1->type] [src2->type];
772 ins->opcode += binops_op_map [ins->type];
777 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
778 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
779 ins->opcode = OP_LCOMPARE;
780 else if (src1->type == STACK_R8)
781 ins->opcode = OP_FCOMPARE;
783 ins->opcode = OP_ICOMPARE;
785 case OP_ICOMPARE_IMM:
786 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
787 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
788 ins->opcode = OP_LCOMPARE_IMM;
800 ins->opcode += beqops_op_map [src1->type];
803 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
804 ins->opcode += ceqops_op_map [src1->type];
810 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
815 ins->type = neg_table [src1->type];
816 ins->opcode += unops_op_map [ins->type];
819 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
820 ins->type = src1->type;
822 ins->type = STACK_INV;
823 ins->opcode += unops_op_map [ins->type];
829 ins->type = STACK_I4;
830 ins->opcode += unops_op_map [src1->type];
833 ins->type = STACK_R8;
834 switch (src1->type) {
837 ins->opcode = OP_ICONV_TO_R_UN;
840 ins->opcode = OP_LCONV_TO_R_UN;
844 case CEE_CONV_OVF_I1:
845 case CEE_CONV_OVF_U1:
846 case CEE_CONV_OVF_I2:
847 case CEE_CONV_OVF_U2:
848 case CEE_CONV_OVF_I4:
849 case CEE_CONV_OVF_U4:
850 ins->type = STACK_I4;
851 ins->opcode += ovf3ops_op_map [src1->type];
853 case CEE_CONV_OVF_I_UN:
854 case CEE_CONV_OVF_U_UN:
855 ins->type = STACK_PTR;
856 ins->opcode += ovf2ops_op_map [src1->type];
858 case CEE_CONV_OVF_I1_UN:
859 case CEE_CONV_OVF_I2_UN:
860 case CEE_CONV_OVF_I4_UN:
861 case CEE_CONV_OVF_U1_UN:
862 case CEE_CONV_OVF_U2_UN:
863 case CEE_CONV_OVF_U4_UN:
864 ins->type = STACK_I4;
865 ins->opcode += ovf2ops_op_map [src1->type];
868 ins->type = STACK_PTR;
869 switch (src1->type) {
871 ins->opcode = OP_ICONV_TO_U;
875 #if SIZEOF_REGISTER == 8
876 ins->opcode = OP_LCONV_TO_U;
878 ins->opcode = OP_MOVE;
882 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_FCONV_TO_U;
891 ins->type = STACK_I8;
892 ins->opcode += unops_op_map [src1->type];
894 case CEE_CONV_OVF_I8:
895 case CEE_CONV_OVF_U8:
896 ins->type = STACK_I8;
897 ins->opcode += ovf3ops_op_map [src1->type];
899 case CEE_CONV_OVF_U8_UN:
900 case CEE_CONV_OVF_I8_UN:
901 ins->type = STACK_I8;
902 ins->opcode += ovf2ops_op_map [src1->type];
906 ins->type = STACK_R8;
907 ins->opcode += unops_op_map [src1->type];
910 ins->type = STACK_R8;
914 ins->type = STACK_I4;
915 ins->opcode += ovfops_op_map [src1->type];
920 ins->type = STACK_PTR;
921 ins->opcode += ovfops_op_map [src1->type];
929 ins->type = bin_num_table [src1->type] [src2->type];
930 ins->opcode += ovfops_op_map [src1->type];
931 if (ins->type == STACK_R8)
932 ins->type = STACK_INV;
934 case OP_LOAD_MEMBASE:
935 ins->type = STACK_PTR;
937 case OP_LOADI1_MEMBASE:
938 case OP_LOADU1_MEMBASE:
939 case OP_LOADI2_MEMBASE:
940 case OP_LOADU2_MEMBASE:
941 case OP_LOADI4_MEMBASE:
942 case OP_LOADU4_MEMBASE:
943 ins->type = STACK_PTR;
945 case OP_LOADI8_MEMBASE:
946 ins->type = STACK_I8;
948 case OP_LOADR4_MEMBASE:
949 case OP_LOADR8_MEMBASE:
950 ins->type = STACK_R8;
953 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
957 if (ins->type == STACK_MP)
958 ins->klass = mono_defaults.object_class;
963 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
969 param_table [STACK_MAX] [STACK_MAX] = {
974 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
978 switch (args->type) {
988 for (i = 0; i < sig->param_count; ++i) {
989 switch (args [i].type) {
993 if (!sig->params [i]->byref)
997 if (sig->params [i]->byref)
999 switch (sig->params [i]->type) {
1000 case MONO_TYPE_CLASS:
1001 case MONO_TYPE_STRING:
1002 case MONO_TYPE_OBJECT:
1003 case MONO_TYPE_SZARRAY:
1004 case MONO_TYPE_ARRAY:
1011 if (sig->params [i]->byref)
1013 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1022 /*if (!param_table [args [i].type] [sig->params [i]->type])
1030 * When we need a pointer to the current domain many times in a method, we
1031 * call mono_domain_get() once and we store the result in a local variable.
1032 * This function returns the variable that represents the MonoDomain*.
1034 inline static MonoInst *
1035 mono_get_domainvar (MonoCompile *cfg)
1037 if (!cfg->domainvar)
1038 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1039 return cfg->domainvar;
1043 * The got_var contains the address of the Global Offset Table when AOT
1046 inline static MonoInst *
1047 mono_get_got_var (MonoCompile *cfg)
1049 #ifdef MONO_ARCH_NEED_GOT_VAR
1050 if (!cfg->compile_aot)
1052 if (!cfg->got_var) {
1053 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1055 return cfg->got_var;
1062 mono_get_vtable_var (MonoCompile *cfg)
1064 g_assert (cfg->generic_sharing_context);
1066 if (!cfg->rgctx_var) {
1067 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1068 /* force the var to be stack allocated */
1069 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1072 return cfg->rgctx_var;
1076 type_from_stack_type (MonoInst *ins) {
1077 switch (ins->type) {
1078 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1079 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1080 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1081 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1083 return &ins->klass->this_arg;
1084 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1085 case STACK_VTYPE: return &ins->klass->byval_arg;
1087 g_error ("stack type %d to monotype not handled\n", ins->type);
1092 static G_GNUC_UNUSED int
1093 type_to_stack_type (MonoType *t)
1095 switch (mono_type_get_underlying_type (t)->type) {
1098 case MONO_TYPE_BOOLEAN:
1101 case MONO_TYPE_CHAR:
1108 case MONO_TYPE_FNPTR:
1110 case MONO_TYPE_CLASS:
1111 case MONO_TYPE_STRING:
1112 case MONO_TYPE_OBJECT:
1113 case MONO_TYPE_SZARRAY:
1114 case MONO_TYPE_ARRAY:
1122 case MONO_TYPE_VALUETYPE:
1123 case MONO_TYPE_TYPEDBYREF:
1125 case MONO_TYPE_GENERICINST:
1126 if (mono_type_generic_inst_is_valuetype (t))
1132 g_assert_not_reached ();
1139 array_access_to_klass (int opcode)
1143 return mono_defaults.byte_class;
1145 return mono_defaults.uint16_class;
1148 return mono_defaults.int_class;
1151 return mono_defaults.sbyte_class;
1154 return mono_defaults.int16_class;
1157 return mono_defaults.int32_class;
1159 return mono_defaults.uint32_class;
1162 return mono_defaults.int64_class;
1165 return mono_defaults.single_class;
1168 return mono_defaults.double_class;
1169 case CEE_LDELEM_REF:
1170 case CEE_STELEM_REF:
1171 return mono_defaults.object_class;
1173 g_assert_not_reached ();
1179 * We try to share variables when possible
1182 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1187 /* inlining can result in deeper stacks */
1188 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1189 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1191 pos = ins->type - 1 + slot * STACK_MAX;
1193 switch (ins->type) {
1200 if ((vnum = cfg->intvars [pos]))
1201 return cfg->varinfo [vnum];
1202 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1203 cfg->intvars [pos] = res->inst_c0;
1206 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1212 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1215 * Don't use this if a generic_context is set, since that means AOT can't
1216 * look up the method using just the image+token.
1217 * table == 0 means this is a reference made from a wrapper.
1219 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1220 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1221 jump_info_token->image = image;
1222 jump_info_token->token = token;
1223 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1228 * This function is called to handle items that are left on the evaluation stack
1229 * at basic block boundaries. What happens is that we save the values to local variables
1230 * and we reload them later when first entering the target basic block (with the
1231 * handle_loaded_temps () function).
1232 * A single joint point will use the same variables (stored in the array bb->out_stack or
1233 * bb->in_stack, if the basic block is before or after the joint point).
1235 * This function needs to be called _before_ emitting the last instruction of
1236 * the bb (i.e. before emitting a branch).
1237 * If the stack merge fails at a join point, cfg->unverifiable is set.
1240 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1243 MonoBasicBlock *bb = cfg->cbb;
1244 MonoBasicBlock *outb;
1245 MonoInst *inst, **locals;
1250 if (cfg->verbose_level > 3)
1251 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1252 if (!bb->out_scount) {
1253 bb->out_scount = count;
1254 //printf ("bblock %d has out:", bb->block_num);
1256 for (i = 0; i < bb->out_count; ++i) {
1257 outb = bb->out_bb [i];
1258 /* exception handlers are linked, but they should not be considered for stack args */
1259 if (outb->flags & BB_EXCEPTION_HANDLER)
1261 //printf (" %d", outb->block_num);
1262 if (outb->in_stack) {
1264 bb->out_stack = outb->in_stack;
1270 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1271 for (i = 0; i < count; ++i) {
1273 * try to reuse temps already allocated for this purpouse, if they occupy the same
1274 * stack slot and if they are of the same type.
1275 * This won't cause conflicts since if 'local' is used to
1276 * store one of the values in the in_stack of a bblock, then
1277 * the same variable will be used for the same outgoing stack
1279 * This doesn't work when inlining methods, since the bblocks
1280 * in the inlined methods do not inherit their in_stack from
1281 * the bblock they are inlined to. See bug #58863 for an
1284 if (cfg->inlined_method)
1285 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1287 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1292 for (i = 0; i < bb->out_count; ++i) {
1293 outb = bb->out_bb [i];
1294 /* exception handlers are linked, but they should not be considered for stack args */
1295 if (outb->flags & BB_EXCEPTION_HANDLER)
1297 if (outb->in_scount) {
1298 if (outb->in_scount != bb->out_scount) {
1299 cfg->unverifiable = TRUE;
1302 continue; /* check they are the same locals */
1304 outb->in_scount = count;
1305 outb->in_stack = bb->out_stack;
1308 locals = bb->out_stack;
1310 for (i = 0; i < count; ++i) {
1311 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1312 inst->cil_code = sp [i]->cil_code;
1313 sp [i] = locals [i];
1314 if (cfg->verbose_level > 3)
1315 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1319 * It is possible that the out bblocks already have in_stack assigned, and
1320 * the in_stacks differ. In this case, we will store to all the different
1327 /* Find a bblock which has a different in_stack */
1329 while (bindex < bb->out_count) {
1330 outb = bb->out_bb [bindex];
1331 /* exception handlers are linked, but they should not be considered for stack args */
1332 if (outb->flags & BB_EXCEPTION_HANDLER) {
1336 if (outb->in_stack != locals) {
1337 for (i = 0; i < count; ++i) {
1338 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1339 inst->cil_code = sp [i]->cil_code;
1340 sp [i] = locals [i];
1341 if (cfg->verbose_level > 3)
1342 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1344 locals = outb->in_stack;
1353 /* Emit code which loads interface_offsets [klass->interface_id]
1354 * The array is stored in memory before vtable.
1357 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1359 if (cfg->compile_aot) {
1360 int ioffset_reg = alloc_preg (cfg);
1361 int iid_reg = alloc_preg (cfg);
1363 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1364 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1365 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1368 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1373 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1374 * stored in "klass_reg" implements the interface "klass".
1377 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1379 int ibitmap_reg = alloc_preg (cfg);
1380 int ibitmap_byte_reg = alloc_preg (cfg);
1382 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1384 if (cfg->compile_aot) {
1385 int iid_reg = alloc_preg (cfg);
1386 int shifted_iid_reg = alloc_preg (cfg);
1387 int ibitmap_byte_address_reg = alloc_preg (cfg);
1388 int masked_iid_reg = alloc_preg (cfg);
1389 int iid_one_bit_reg = alloc_preg (cfg);
1390 int iid_bit_reg = alloc_preg (cfg);
1391 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1392 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1393 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1394 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1396 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1397 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1398 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1400 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1401 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1406 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1407 * stored in "vtable_reg" implements the interface "klass".
1410 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1412 int ibitmap_reg = alloc_preg (cfg);
1413 int ibitmap_byte_reg = alloc_preg (cfg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1417 if (cfg->compile_aot) {
1418 int iid_reg = alloc_preg (cfg);
1419 int shifted_iid_reg = alloc_preg (cfg);
1420 int ibitmap_byte_address_reg = alloc_preg (cfg);
1421 int masked_iid_reg = alloc_preg (cfg);
1422 int iid_one_bit_reg = alloc_preg (cfg);
1423 int iid_bit_reg = alloc_preg (cfg);
1424 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1425 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1426 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1427 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1428 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1429 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1430 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1431 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1433 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1434 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1439 * Emit code which checks whenever the interface id of @klass is smaller than
1440 * than the value given by max_iid_reg.
1443 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1444 MonoBasicBlock *false_target)
1446 if (cfg->compile_aot) {
1447 int iid_reg = alloc_preg (cfg);
1448 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1449 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1452 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1454 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1456 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1459 /* Same as above, but obtains max_iid from a vtable */
1461 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1462 MonoBasicBlock *false_target)
1464 int max_iid_reg = alloc_preg (cfg);
1466 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1467 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1470 /* Same as above, but obtains max_iid from a klass */
1472 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1473 MonoBasicBlock *false_target)
1475 int max_iid_reg = alloc_preg (cfg);
1477 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1478 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1482 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1484 int idepth_reg = alloc_preg (cfg);
1485 int stypes_reg = alloc_preg (cfg);
1486 int stype = alloc_preg (cfg);
1488 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1489 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1490 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1491 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1493 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1494 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1495 if (cfg->compile_aot) {
1496 int const_reg = alloc_preg (cfg);
1497 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1498 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1502 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1506 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1508 int intf_reg = alloc_preg (cfg);
1510 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1511 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1514 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1516 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1520 * Variant of the above that takes a register to the class, not the vtable.
1523 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1525 int intf_bit_reg = alloc_preg (cfg);
1527 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1528 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1531 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1533 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1537 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1539 if (cfg->compile_aot) {
1540 int const_reg = alloc_preg (cfg);
1541 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1542 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1544 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1546 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1550 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1552 if (cfg->compile_aot) {
1553 int const_reg = alloc_preg (cfg);
1554 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1555 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1559 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1563 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1566 int rank_reg = alloc_preg (cfg);
1567 int eclass_reg = alloc_preg (cfg);
1569 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1571 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1572 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1574 if (klass->cast_class == mono_defaults.object_class) {
1575 int parent_reg = alloc_preg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1577 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1578 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1579 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1580 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1581 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1582 } else if (klass->cast_class == mono_defaults.enum_class) {
1583 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1584 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1585 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1587 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1588 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1591 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1592 /* Check that the object is a vector too */
1593 int bounds_reg = alloc_preg (cfg);
1594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1596 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1599 int idepth_reg = alloc_preg (cfg);
1600 int stypes_reg = alloc_preg (cfg);
1601 int stype = alloc_preg (cfg);
1603 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1604 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1606 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1608 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1610 mini_emit_class_check (cfg, stype, klass);
1615 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1619 g_assert (val == 0);
1624 if ((size <= 4) && (size <= align)) {
1627 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1630 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1633 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1635 #if SIZEOF_REGISTER == 8
1637 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1643 val_reg = alloc_preg (cfg);
1645 if (SIZEOF_REGISTER == 8)
1646 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1648 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1651 /* This could be optimized further if neccesary */
1653 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1660 #if !NO_UNALIGNED_ACCESS
1661 if (SIZEOF_REGISTER == 8) {
1663 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1668 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1681 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1686 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1692 #endif /* DISABLE_JIT */
1695 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1703 /* This could be optimized further if neccesary */
1705 cur_reg = alloc_preg (cfg);
1706 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1707 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1714 #if !NO_UNALIGNED_ACCESS
1715 if (SIZEOF_REGISTER == 8) {
1717 cur_reg = alloc_preg (cfg);
1718 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1719 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1728 cur_reg = alloc_preg (cfg);
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1736 cur_reg = alloc_preg (cfg);
1737 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1738 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1744 cur_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1756 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1759 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1762 type = mini_get_basic_type_from_generic (gsctx, type);
1763 switch (type->type) {
1764 case MONO_TYPE_VOID:
1765 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1768 case MONO_TYPE_BOOLEAN:
1771 case MONO_TYPE_CHAR:
1774 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1778 case MONO_TYPE_FNPTR:
1779 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1780 case MONO_TYPE_CLASS:
1781 case MONO_TYPE_STRING:
1782 case MONO_TYPE_OBJECT:
1783 case MONO_TYPE_SZARRAY:
1784 case MONO_TYPE_ARRAY:
1785 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1788 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1791 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1792 case MONO_TYPE_VALUETYPE:
1793 if (type->data.klass->enumtype) {
1794 type = mono_class_enum_basetype (type->data.klass);
1797 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1798 case MONO_TYPE_TYPEDBYREF:
1799 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1800 case MONO_TYPE_GENERICINST:
1801 type = &type->data.generic_class->container_class->byval_arg;
1804 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1810 * target_type_is_incompatible:
1811 * @cfg: MonoCompile context
1813 * Check that the item @arg on the evaluation stack can be stored
1814 * in the target type (can be a local, or field, etc).
1815 * The cfg arg can be used to check if we need verification or just
1818 * Returns: non-0 value if arg can't be stored on a target.
1821 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1823 MonoType *simple_type;
1826 if (target->byref) {
1827 /* FIXME: check that the pointed to types match */
1828 if (arg->type == STACK_MP)
1829 return arg->klass != mono_class_from_mono_type (target);
1830 if (arg->type == STACK_PTR)
1835 simple_type = mono_type_get_underlying_type (target);
1836 switch (simple_type->type) {
1837 case MONO_TYPE_VOID:
1841 case MONO_TYPE_BOOLEAN:
1844 case MONO_TYPE_CHAR:
1847 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1851 /* STACK_MP is needed when setting pinned locals */
1852 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1857 case MONO_TYPE_FNPTR:
1858 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1861 case MONO_TYPE_CLASS:
1862 case MONO_TYPE_STRING:
1863 case MONO_TYPE_OBJECT:
1864 case MONO_TYPE_SZARRAY:
1865 case MONO_TYPE_ARRAY:
1866 if (arg->type != STACK_OBJ)
1868 /* FIXME: check type compatibility */
1872 if (arg->type != STACK_I8)
1877 if (arg->type != STACK_R8)
1880 case MONO_TYPE_VALUETYPE:
1881 if (arg->type != STACK_VTYPE)
1883 klass = mono_class_from_mono_type (simple_type);
1884 if (klass != arg->klass)
1887 case MONO_TYPE_TYPEDBYREF:
1888 if (arg->type != STACK_VTYPE)
1890 klass = mono_class_from_mono_type (simple_type);
1891 if (klass != arg->klass)
1894 case MONO_TYPE_GENERICINST:
1895 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1896 if (arg->type != STACK_VTYPE)
1898 klass = mono_class_from_mono_type (simple_type);
1899 if (klass != arg->klass)
1903 if (arg->type != STACK_OBJ)
1905 /* FIXME: check type compatibility */
1909 case MONO_TYPE_MVAR:
1910 /* FIXME: all the arguments must be references for now,
1911 * later look inside cfg and see if the arg num is
1912 * really a reference
1914 g_assert (cfg->generic_sharing_context);
1915 if (arg->type != STACK_OBJ)
1919 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1925 * Prepare arguments for passing to a function call.
1926 * Return a non-zero value if the arguments can't be passed to the given
1928 * The type checks are not yet complete and some conversions may need
1929 * casts on 32 or 64 bit architectures.
1931 * FIXME: implement this using target_type_is_incompatible ()
1934 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1936 MonoType *simple_type;
1940 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1944 for (i = 0; i < sig->param_count; ++i) {
1945 if (sig->params [i]->byref) {
1946 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1950 simple_type = sig->params [i];
1951 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1953 switch (simple_type->type) {
1954 case MONO_TYPE_VOID:
1959 case MONO_TYPE_BOOLEAN:
1962 case MONO_TYPE_CHAR:
1965 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1971 case MONO_TYPE_FNPTR:
1972 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1975 case MONO_TYPE_CLASS:
1976 case MONO_TYPE_STRING:
1977 case MONO_TYPE_OBJECT:
1978 case MONO_TYPE_SZARRAY:
1979 case MONO_TYPE_ARRAY:
1980 if (args [i]->type != STACK_OBJ)
1985 if (args [i]->type != STACK_I8)
1990 if (args [i]->type != STACK_R8)
1993 case MONO_TYPE_VALUETYPE:
1994 if (simple_type->data.klass->enumtype) {
1995 simple_type = mono_class_enum_basetype (simple_type->data.klass);
1998 if (args [i]->type != STACK_VTYPE)
2001 case MONO_TYPE_TYPEDBYREF:
2002 if (args [i]->type != STACK_VTYPE)
2005 case MONO_TYPE_GENERICINST:
2006 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2010 g_error ("unknown type 0x%02x in check_call_signature",
2018 callvirt_to_call (int opcode)
2023 case OP_VOIDCALLVIRT:
2032 g_assert_not_reached ();
2039 callvirt_to_call_membase (int opcode)
2043 return OP_CALL_MEMBASE;
2044 case OP_VOIDCALLVIRT:
2045 return OP_VOIDCALL_MEMBASE;
2047 return OP_FCALL_MEMBASE;
2049 return OP_LCALL_MEMBASE;
2051 return OP_VCALL_MEMBASE;
2053 g_assert_not_reached ();
2059 #ifdef MONO_ARCH_HAVE_IMT
2061 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2063 #ifdef MONO_ARCH_IMT_REG
2064 int method_reg = alloc_preg (cfg);
2067 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2068 } else if (cfg->compile_aot) {
2069 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2072 MONO_INST_NEW (cfg, ins, OP_PCONST);
2073 ins->inst_p0 = call->method;
2074 ins->dreg = method_reg;
2075 MONO_ADD_INS (cfg->cbb, ins);
2078 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2080 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2085 static MonoJumpInfo *
2086 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2088 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2092 ji->data.target = target;
2097 inline static MonoInst*
2098 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2100 inline static MonoCallInst *
2101 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2102 MonoInst **args, int calli, int virtual)
2105 #ifdef MONO_ARCH_SOFT_FLOAT
2109 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2112 call->signature = sig;
2114 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2116 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2117 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2120 temp->backend.is_pinvoke = sig->pinvoke;
2123 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2124 * address of return value to increase optimization opportunities.
2125 * Before vtype decomposition, the dreg of the call ins itself represents the
2126 * fact the call modifies the return value. After decomposition, the call will
2127 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2128 * will be transformed into an LDADDR.
2130 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2131 loada->dreg = alloc_preg (cfg);
2132 loada->inst_p0 = temp;
2133 /* We reference the call too since call->dreg could change during optimization */
2134 loada->inst_p1 = call;
2135 MONO_ADD_INS (cfg->cbb, loada);
2137 call->inst.dreg = temp->dreg;
2139 call->vret_var = loada;
2140 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2141 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2143 #ifdef MONO_ARCH_SOFT_FLOAT
2145 * If the call has a float argument, we would need to do an r8->r4 conversion using
2146 * an icall, but that cannot be done during the call sequence since it would clobber
2147 * the call registers + the stack. So we do it before emitting the call.
2149 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2151 MonoInst *in = call->args [i];
2153 if (i >= sig->hasthis)
2154 t = sig->params [i - sig->hasthis];
2156 t = &mono_defaults.int_class->byval_arg;
2157 t = mono_type_get_underlying_type (t);
2159 if (!t->byref && t->type == MONO_TYPE_R4) {
2160 MonoInst *iargs [1];
2164 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2166 /* The result will be in an int vreg */
2167 call->args [i] = conv;
2172 mono_arch_emit_call (cfg, call);
2174 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2175 cfg->flags |= MONO_CFG_HAS_CALLS;
2180 inline static MonoInst*
2181 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2183 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE);
2185 call->inst.sreg1 = addr->dreg;
2187 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2189 return (MonoInst*)call;
2192 inline static MonoInst*
2193 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2195 #ifdef MONO_ARCH_RGCTX_REG
2200 rgctx_reg = mono_alloc_preg (cfg);
2201 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2203 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2205 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2206 cfg->uses_rgctx_reg = TRUE;
2207 call->rgctx_reg = TRUE;
2209 return (MonoInst*)call;
2211 g_assert_not_reached ();
2217 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2218 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2220 gboolean virtual = this != NULL;
2221 gboolean enable_for_aot = TRUE;
2224 if (method->string_ctor) {
2225 /* Create the real signature */
2226 /* FIXME: Cache these */
2227 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2228 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2233 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual);
2235 if (this && sig->hasthis &&
2236 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2237 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this)) {
2238 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2240 call->method = method;
2242 call->inst.flags |= MONO_INST_HAS_METHOD;
2243 call->inst.inst_left = this;
2246 int vtable_reg, slot_reg, this_reg;
2248 this_reg = this->dreg;
2250 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2251 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2252 /* Make a call to delegate->invoke_impl */
2253 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2254 call->inst.inst_basereg = this_reg;
2255 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2256 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2258 return (MonoInst*)call;
2262 if ((!cfg->compile_aot || enable_for_aot) &&
2263 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2264 (MONO_METHOD_IS_FINAL (method) &&
2265 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK))) {
2267 * the method is not virtual, we just need to ensure this is not null
2268 * and then we can call the method directly.
2270 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2271 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2274 if (!method->string_ctor) {
2275 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2276 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2277 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2280 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2282 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2284 return (MonoInst*)call;
2287 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2289 * the method is virtual, but we can statically dispatch since either
2290 * it's class or the method itself are sealed.
2291 * But first we need to ensure it's not a null reference.
2293 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
2294 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, this_reg);
2295 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, this_reg);
2297 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2298 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2300 return (MonoInst*)call;
2303 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2305 vtable_reg = alloc_preg (cfg);
2306 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2307 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2309 #ifdef MONO_ARCH_HAVE_IMT
2311 guint32 imt_slot = mono_method_get_imt_slot (method);
2312 emit_imt_argument (cfg, call, imt_arg);
2313 slot_reg = vtable_reg;
2314 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2317 if (slot_reg == -1) {
2318 slot_reg = alloc_preg (cfg);
2319 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2320 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2323 slot_reg = vtable_reg;
2324 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2325 (mono_method_get_vtable_index (method) * SIZEOF_VOID_P);
2326 #ifdef MONO_ARCH_HAVE_IMT
2328 g_assert (mono_method_signature (method)->generic_param_count);
2329 emit_imt_argument (cfg, call, imt_arg);
2334 call->inst.sreg1 = slot_reg;
2335 call->virtual = TRUE;
2338 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2340 return (MonoInst*)call;
2344 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2345 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2352 #ifdef MONO_ARCH_RGCTX_REG
2353 rgctx_reg = mono_alloc_preg (cfg);
2354 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2359 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2361 call = (MonoCallInst*)ins;
2363 #ifdef MONO_ARCH_RGCTX_REG
2364 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2365 cfg->uses_rgctx_reg = TRUE;
2366 call->rgctx_reg = TRUE;
2375 static inline MonoInst*
2376 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2378 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2382 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2389 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE);
2392 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2394 return (MonoInst*)call;
2397 inline static MonoInst*
2398 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2400 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2404 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2408 * mono_emit_abs_call:
2410 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2412 inline static MonoInst*
2413 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2414 MonoMethodSignature *sig, MonoInst **args)
2416 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2420 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2423 if (cfg->abs_patches == NULL)
2424 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2425 g_hash_table_insert (cfg->abs_patches, ji, ji);
2426 ins = mono_emit_native_call (cfg, ji, sig, args);
2427 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2432 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2434 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2435 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2439 * Native code might return non register sized integers
2440 * without initializing the upper bits.
2442 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2443 case OP_LOADI1_MEMBASE:
2444 widen_op = OP_ICONV_TO_I1;
2446 case OP_LOADU1_MEMBASE:
2447 widen_op = OP_ICONV_TO_U1;
2449 case OP_LOADI2_MEMBASE:
2450 widen_op = OP_ICONV_TO_I2;
2452 case OP_LOADU2_MEMBASE:
2453 widen_op = OP_ICONV_TO_U2;
2459 if (widen_op != -1) {
2460 int dreg = alloc_preg (cfg);
2463 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2464 widen->type = ins->type;
2474 get_memcpy_method (void)
2476 static MonoMethod *memcpy_method = NULL;
2477 if (!memcpy_method) {
2478 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2480 g_error ("Old corlib found. Install a new one");
2482 return memcpy_method;
2486 * Emit code to copy a valuetype of type @klass whose address is stored in
2487 * @src->dreg to memory whose address is stored at @dest->dreg.
2490 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2492 MonoInst *iargs [3];
2495 MonoMethod *memcpy_method;
2499 * This check breaks with spilled vars... need to handle it during verification anyway.
2500 * g_assert (klass && klass == src->klass && klass == dest->klass);
2504 n = mono_class_native_size (klass, &align);
2506 n = mono_class_value_size (klass, &align);
2508 #if HAVE_WRITE_BARRIERS
2509 /* if native is true there should be no references in the struct */
2510 if (klass->has_references && !native) {
2511 /* Avoid barriers when storing to the stack */
2512 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2513 (dest->opcode == OP_LDADDR))) {
2516 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2518 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2523 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2524 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2525 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2529 EMIT_NEW_ICONST (cfg, iargs [2], n);
2531 memcpy_method = get_memcpy_method ();
2532 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2537 get_memset_method (void)
2539 static MonoMethod *memset_method = NULL;
2540 if (!memset_method) {
2541 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2543 g_error ("Old corlib found. Install a new one");
2545 return memset_method;
2549 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2551 MonoInst *iargs [3];
2554 MonoMethod *memset_method;
2556 /* FIXME: Optimize this for the case when dest is an LDADDR */
2558 mono_class_init (klass);
2559 n = mono_class_value_size (klass, &align);
2561 if (n <= sizeof (gpointer) * 5) {
2562 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2565 memset_method = get_memset_method ();
2567 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2568 EMIT_NEW_ICONST (cfg, iargs [2], n);
2569 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2574 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2576 MonoInst *this = NULL;
2578 g_assert (cfg->generic_sharing_context);
2580 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2581 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2582 !method->klass->valuetype)
2583 EMIT_NEW_ARGLOAD (cfg, this, 0);
2585 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2586 MonoInst *mrgctx_loc, *mrgctx_var;
2589 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2591 mrgctx_loc = mono_get_vtable_var (cfg);
2592 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2595 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2596 MonoInst *vtable_loc, *vtable_var;
2600 vtable_loc = mono_get_vtable_var (cfg);
2601 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2603 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2604 MonoInst *mrgctx_var = vtable_var;
2607 vtable_reg = alloc_preg (cfg);
2608 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2609 vtable_var->type = STACK_PTR;
2615 int vtable_reg, res_reg;
2617 vtable_reg = alloc_preg (cfg);
2618 res_reg = alloc_preg (cfg);
2619 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2624 static MonoJumpInfoRgctxEntry *
2625 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2627 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2628 res->method = method;
2629 res->in_mrgctx = in_mrgctx;
2630 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2631 res->data->type = patch_type;
2632 res->data->data.target = patch_data;
2633 res->info_type = info_type;
2638 static inline MonoInst*
2639 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2641 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2645 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2646 MonoClass *klass, int rgctx_type)
2648 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2649 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2651 return emit_rgctx_fetch (cfg, rgctx, entry);
2655 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2656 MonoMethod *cmethod, int rgctx_type)
2658 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2659 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2661 return emit_rgctx_fetch (cfg, rgctx, entry);
2665 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2666 MonoClassField *field, int rgctx_type)
2668 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2669 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2671 return emit_rgctx_fetch (cfg, rgctx, entry);
2675 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2677 int vtable_reg = alloc_preg (cfg);
2678 int context_used = 0;
2680 if (cfg->generic_sharing_context)
2681 context_used = mono_class_check_context_used (array_class);
2683 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2685 if (cfg->opt & MONO_OPT_SHARED) {
2686 int class_reg = alloc_preg (cfg);
2687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2688 if (cfg->compile_aot) {
2689 int klass_reg = alloc_preg (cfg);
2690 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2691 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2693 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2695 } else if (context_used) {
2696 MonoInst *vtable_ins;
2698 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2699 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2701 if (cfg->compile_aot) {
2702 int vt_reg = alloc_preg (cfg);
2703 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, mono_class_vtable (cfg->domain, array_class));
2704 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2706 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, mono_class_vtable (cfg->domain, array_class));
2710 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2714 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2716 if (mini_get_debug_options ()->better_cast_details) {
2717 int to_klass_reg = alloc_preg (cfg);
2718 int vtable_reg = alloc_preg (cfg);
2719 int klass_reg = alloc_preg (cfg);
2720 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2723 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2727 MONO_ADD_INS (cfg->cbb, tls_get);
2728 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2729 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2731 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2732 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2733 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2738 reset_cast_details (MonoCompile *cfg)
2740 /* Reset the variables holding the cast details */
2741 if (mini_get_debug_options ()->better_cast_details) {
2742 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2744 MONO_ADD_INS (cfg->cbb, tls_get);
2745 /* It is enough to reset the from field */
2746 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2751 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2752 * generic code is generated.
2755 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2757 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2760 MonoInst *rgctx, *addr;
2762 /* FIXME: What if the class is shared? We might not
2763 have to get the address of the method from the
2765 addr = emit_get_rgctx_method (cfg, context_used, method,
2766 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2768 rgctx = emit_get_rgctx (cfg, method, context_used);
2770 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2772 return mono_emit_method_call (cfg, method, &val, NULL);
2777 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2781 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2782 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2783 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2784 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2786 obj_reg = sp [0]->dreg;
2787 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2788 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2790 /* FIXME: generics */
2791 g_assert (klass->rank == 0);
2794 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2795 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2797 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2798 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2801 MonoInst *element_class;
2803 /* This assertion is from the unboxcast insn */
2804 g_assert (klass->rank == 0);
2806 element_class = emit_get_rgctx_klass (cfg, context_used,
2807 klass->element_class, MONO_RGCTX_INFO_KLASS);
2809 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2810 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2812 save_cast_details (cfg, klass->element_class, obj_reg);
2813 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2814 reset_cast_details (cfg);
2817 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2818 MONO_ADD_INS (cfg->cbb, add);
2819 add->type = STACK_MP;
2826 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2828 MonoInst *iargs [2];
2831 if (cfg->opt & MONO_OPT_SHARED) {
2832 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2833 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2835 alloc_ftn = mono_object_new;
2836 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2837 /* This happens often in argument checking code, eg. throw new FooException... */
2838 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2839 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2840 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2842 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2843 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2846 if (managed_alloc) {
2847 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2848 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2850 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2852 guint32 lw = vtable->klass->instance_size;
2853 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2854 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2855 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
2858 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2862 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2866 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
2869 MonoInst *iargs [2];
2870 MonoMethod *managed_alloc = NULL;
2874 FIXME: we cannot get managed_alloc here because we can't get
2875 the class's vtable (because it's not a closed class)
2877 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2878 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2881 if (cfg->opt & MONO_OPT_SHARED) {
2882 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2883 iargs [1] = data_inst;
2884 alloc_ftn = mono_object_new;
2886 if (managed_alloc) {
2887 iargs [0] = data_inst;
2888 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2891 iargs [0] = data_inst;
2892 alloc_ftn = mono_object_new_specific;
2895 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
2899 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
2901 MonoInst *alloc, *ins;
2903 if (mono_class_is_nullable (klass)) {
2904 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2905 return mono_emit_method_call (cfg, method, &val, NULL);
2908 alloc = handle_alloc (cfg, klass, TRUE);
2910 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2916 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
2918 MonoInst *alloc, *ins;
2920 if (mono_class_is_nullable (klass)) {
2921 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
2922 /* FIXME: What if the class is shared? We might not
2923 have to get the method address from the RGCTX. */
2924 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
2925 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2926 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2928 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2930 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
2932 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
2939 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2941 MonoBasicBlock *is_null_bb;
2942 int obj_reg = src->dreg;
2943 int vtable_reg = alloc_preg (cfg);
2945 NEW_BBLOCK (cfg, is_null_bb);
2947 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
2948 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
2950 save_cast_details (cfg, klass, obj_reg);
2952 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2953 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2954 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
2956 int klass_reg = alloc_preg (cfg);
2958 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2960 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
2961 /* the remoting code is broken, access the class for now */
2963 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
2964 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
2966 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2967 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
2969 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2971 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2972 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
2976 MONO_START_BB (cfg, is_null_bb);
2978 reset_cast_details (cfg);
2984 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
2987 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
2988 int obj_reg = src->dreg;
2989 int vtable_reg = alloc_preg (cfg);
2990 int res_reg = alloc_preg (cfg);
2992 NEW_BBLOCK (cfg, is_null_bb);
2993 NEW_BBLOCK (cfg, false_bb);
2994 NEW_BBLOCK (cfg, end_bb);
2996 /* Do the assignment at the beginning, so the other assignment can be if converted */
2997 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
2998 ins->type = STACK_OBJ;
3001 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3002 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3004 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3005 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3006 /* the is_null_bb target simply copies the input register to the output */
3007 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3009 int klass_reg = alloc_preg (cfg);
3011 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3014 int rank_reg = alloc_preg (cfg);
3015 int eclass_reg = alloc_preg (cfg);
3017 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3018 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3019 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3020 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3021 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3022 if (klass->cast_class == mono_defaults.object_class) {
3023 int parent_reg = alloc_preg (cfg);
3024 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3025 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3026 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3027 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3028 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3029 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3030 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3031 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3032 } else if (klass->cast_class == mono_defaults.enum_class) {
3033 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3034 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3035 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3036 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3038 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3039 /* Check that the object is a vector too */
3040 int bounds_reg = alloc_preg (cfg);
3041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3042 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3043 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3046 /* the is_null_bb target simply copies the input register to the output */
3047 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3049 } else if (mono_class_is_nullable (klass)) {
3050 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3051 /* the is_null_bb target simply copies the input register to the output */
3052 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3054 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3055 /* the remoting code is broken, access the class for now */
3057 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3058 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3060 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3063 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3064 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3066 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3067 /* the is_null_bb target simply copies the input register to the output */
3068 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3073 MONO_START_BB (cfg, false_bb);
3075 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3076 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3078 MONO_START_BB (cfg, is_null_bb);
3080 MONO_START_BB (cfg, end_bb);
3086 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3088 /* This opcode takes as input an object reference and a class, and returns:
3089 0) if the object is an instance of the class,
3090 1) if the object is not instance of the class,
3091 2) if the object is a proxy whose type cannot be determined */
3094 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3095 int obj_reg = src->dreg;
3096 int dreg = alloc_ireg (cfg);
3098 int klass_reg = alloc_preg (cfg);
3100 NEW_BBLOCK (cfg, true_bb);
3101 NEW_BBLOCK (cfg, false_bb);
3102 NEW_BBLOCK (cfg, false2_bb);
3103 NEW_BBLOCK (cfg, end_bb);
3104 NEW_BBLOCK (cfg, no_proxy_bb);
3106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3109 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3110 NEW_BBLOCK (cfg, interface_fail_bb);
3112 tmp_reg = alloc_preg (cfg);
3113 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3114 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3115 MONO_START_BB (cfg, interface_fail_bb);
3116 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3118 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3120 tmp_reg = alloc_preg (cfg);
3121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3123 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3125 tmp_reg = alloc_preg (cfg);
3126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3127 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3129 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3130 tmp_reg = alloc_preg (cfg);
3131 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3134 tmp_reg = alloc_preg (cfg);
3135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3136 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3137 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3139 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3140 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3142 MONO_START_BB (cfg, no_proxy_bb);
3144 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3147 MONO_START_BB (cfg, false_bb);
3149 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3150 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3152 MONO_START_BB (cfg, false2_bb);
3154 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3155 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3157 MONO_START_BB (cfg, true_bb);
3159 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3161 MONO_START_BB (cfg, end_bb);
3164 MONO_INST_NEW (cfg, ins, OP_ICONST);
3166 ins->type = STACK_I4;
3172 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3174 /* This opcode takes as input an object reference and a class, and returns:
3175 0) if the object is an instance of the class,
3176 1) if the object is a proxy whose type cannot be determined
3177 an InvalidCastException exception is thrown otherwhise*/
3180 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3181 int obj_reg = src->dreg;
3182 int dreg = alloc_ireg (cfg);
3183 int tmp_reg = alloc_preg (cfg);
3184 int klass_reg = alloc_preg (cfg);
3186 NEW_BBLOCK (cfg, end_bb);
3187 NEW_BBLOCK (cfg, ok_result_bb);
3189 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3190 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3192 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3193 NEW_BBLOCK (cfg, interface_fail_bb);
3195 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3196 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3197 MONO_START_BB (cfg, interface_fail_bb);
3198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3200 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3202 tmp_reg = alloc_preg (cfg);
3203 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3204 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3205 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3207 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3208 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3211 NEW_BBLOCK (cfg, no_proxy_bb);
3213 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3214 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3215 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3217 tmp_reg = alloc_preg (cfg);
3218 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3219 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3221 tmp_reg = alloc_preg (cfg);
3222 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3223 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3224 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3226 NEW_BBLOCK (cfg, fail_1_bb);
3228 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3230 MONO_START_BB (cfg, fail_1_bb);
3232 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3233 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3235 MONO_START_BB (cfg, no_proxy_bb);
3237 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3240 MONO_START_BB (cfg, ok_result_bb);
3242 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3244 MONO_START_BB (cfg, end_bb);
3247 MONO_INST_NEW (cfg, ins, OP_ICONST);
3249 ins->type = STACK_I4;
3254 static G_GNUC_UNUSED MonoInst*
3255 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method)
3257 gpointer *trampoline;
3258 MonoInst *obj, *method_ins, *tramp_ins;
3262 obj = handle_alloc (cfg, klass, FALSE);
3264 /* Inline the contents of mono_delegate_ctor */
3266 /* Set target field */
3267 /* Optimize away setting of NULL target */
3268 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3271 /* Set method field */
3272 EMIT_NEW_METHODCONST (cfg, method_ins, method);
3273 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3276 * To avoid looking up the compiled code belonging to the target method
3277 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3278 * store it, and we fill it after the method has been compiled.
3280 if (!cfg->compile_aot && !method->dynamic) {
3281 MonoInst *code_slot_ins;
3283 domain = mono_domain_get ();
3284 mono_domain_lock (domain);
3285 if (!domain_jit_info (domain)->method_code_hash)
3286 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3287 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3289 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3290 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3292 mono_domain_unlock (domain);
3294 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3298 /* Set invoke_impl field */
3299 if (cfg->compile_aot) {
3300 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3302 trampoline = mono_create_delegate_trampoline (klass);
3303 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3307 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3313 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3315 MonoJitICallInfo *info;
3317 /* Need to register the icall so it gets an icall wrapper */
3318 info = mono_get_array_new_va_icall (rank);
3320 cfg->flags |= MONO_CFG_HAS_VARARGS;
3322 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3323 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3327 mono_emit_load_got_addr (MonoCompile *cfg)
3329 MonoInst *getaddr, *dummy_use;
3331 if (!cfg->got_var || cfg->got_var_allocated)
3334 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3335 getaddr->dreg = cfg->got_var->dreg;
3337 /* Add it to the start of the first bblock */
3338 if (cfg->bb_entry->code) {
3339 getaddr->next = cfg->bb_entry->code;
3340 cfg->bb_entry->code = getaddr;
3343 MONO_ADD_INS (cfg->bb_entry, getaddr);
3345 cfg->got_var_allocated = TRUE;
3348 * Add a dummy use to keep the got_var alive, since real uses might
3349 * only be generated by the back ends.
3350 * Add it to end_bblock, so the variable's lifetime covers the whole
3352 * It would be better to make the usage of the got var explicit in all
3353 * cases when the backend needs it (i.e. calls, throw etc.), so this
3354 * wouldn't be needed.
3356 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3357 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3360 static int inline_limit;
3361 static gboolean inline_limit_inited;
3364 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3366 MonoMethodHeader *header;
3368 #ifdef MONO_ARCH_SOFT_FLOAT
3369 MonoMethodSignature *sig = mono_method_signature (method);
3373 if (cfg->generic_sharing_context)
3376 #ifdef MONO_ARCH_HAVE_LMF_OPS
3377 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3378 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3379 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3383 if (method->is_inflated)
3384 /* Avoid inflating the header */
3385 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3387 header = mono_method_get_header (method);
3389 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3390 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3391 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3392 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3393 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3394 (method->klass->marshalbyref) ||
3395 !header || header->num_clauses)
3398 /* also consider num_locals? */
3399 /* Do the size check early to avoid creating vtables */
3400 if (!inline_limit_inited) {
3401 if (getenv ("MONO_INLINELIMIT"))
3402 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3404 inline_limit = INLINE_LENGTH_LIMIT;
3405 inline_limit_inited = TRUE;
3407 if (header->code_size >= inline_limit)
3411 * if we can initialize the class of the method right away, we do,
3412 * otherwise we don't allow inlining if the class needs initialization,
3413 * since it would mean inserting a call to mono_runtime_class_init()
3414 * inside the inlined code
3416 if (!(cfg->opt & MONO_OPT_SHARED)) {
3417 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3418 if (cfg->run_cctors && method->klass->has_cctor) {
3419 if (!method->klass->runtime_info)
3420 /* No vtable created yet */
3422 vtable = mono_class_vtable (cfg->domain, method->klass);
3425 /* This makes so that inline cannot trigger */
3426 /* .cctors: too many apps depend on them */
3427 /* running with a specific order... */
3428 if (! vtable->initialized)
3430 mono_runtime_class_init (vtable);
3432 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3433 if (!method->klass->runtime_info)
3434 /* No vtable created yet */
3436 vtable = mono_class_vtable (cfg->domain, method->klass);
3439 if (!vtable->initialized)
3444 * If we're compiling for shared code
3445 * the cctor will need to be run at aot method load time, for example,
3446 * or at the end of the compilation of the inlining method.
3448 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3453 * CAS - do not inline methods with declarative security
3454 * Note: this has to be before any possible return TRUE;
3456 if (mono_method_has_declsec (method))
3459 #ifdef MONO_ARCH_SOFT_FLOAT
3461 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3463 for (i = 0; i < sig->param_count; ++i)
3464 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3472 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3474 if (vtable->initialized && !cfg->compile_aot)
3477 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3480 if (!mono_class_needs_cctor_run (vtable->klass, method))
3483 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3484 /* The initialization is already done before the method is called */
3491 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3495 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3497 mono_class_init (klass);
3498 size = mono_class_array_element_size (klass);
3500 mult_reg = alloc_preg (cfg);
3501 array_reg = arr->dreg;
3502 index_reg = index->dreg;
3504 #if SIZEOF_REGISTER == 8
3505 /* The array reg is 64 bits but the index reg is only 32 */
3506 index2_reg = alloc_preg (cfg);
3507 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3509 if (index->type == STACK_I8) {
3510 index2_reg = alloc_preg (cfg);
3511 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3513 index2_reg = index_reg;
3517 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3519 #if defined(__i386__) || defined(__x86_64__)
3520 if (size == 1 || size == 2 || size == 4 || size == 8) {
3521 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3523 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3524 ins->type = STACK_PTR;
3530 add_reg = alloc_preg (cfg);
3532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3533 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3534 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3535 ins->type = STACK_PTR;
3536 MONO_ADD_INS (cfg->cbb, ins);
3541 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3543 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3545 int bounds_reg = alloc_preg (cfg);
3546 int add_reg = alloc_preg (cfg);
3547 int mult_reg = alloc_preg (cfg);
3548 int mult2_reg = alloc_preg (cfg);
3549 int low1_reg = alloc_preg (cfg);
3550 int low2_reg = alloc_preg (cfg);
3551 int high1_reg = alloc_preg (cfg);
3552 int high2_reg = alloc_preg (cfg);
3553 int realidx1_reg = alloc_preg (cfg);
3554 int realidx2_reg = alloc_preg (cfg);
3555 int sum_reg = alloc_preg (cfg);
3560 mono_class_init (klass);
3561 size = mono_class_array_element_size (klass);
3563 index1 = index_ins1->dreg;
3564 index2 = index_ins2->dreg;
3566 /* range checking */
3567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3568 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3570 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3571 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3572 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3574 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3575 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3576 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3579 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3580 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3581 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3582 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3583 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3584 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3586 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3587 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3589 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3590 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3592 ins->type = STACK_MP;
3594 MONO_ADD_INS (cfg->cbb, ins);
3601 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3605 MonoMethod *addr_method;
3608 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3611 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3613 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3614 /* emit_ldelema_2 depends on OP_LMUL */
3615 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3616 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3620 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3621 addr_method = mono_marshal_get_array_address (rank, element_size);
3622 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3628 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3630 MonoInst *ins = NULL;
3632 static MonoClass *runtime_helpers_class = NULL;
3633 if (! runtime_helpers_class)
3634 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3635 "System.Runtime.CompilerServices", "RuntimeHelpers");
3637 if (cmethod->klass == mono_defaults.string_class) {
3638 if (strcmp (cmethod->name, "get_Chars") == 0) {
3639 int dreg = alloc_ireg (cfg);
3640 int index_reg = alloc_preg (cfg);
3641 int mult_reg = alloc_preg (cfg);
3642 int add_reg = alloc_preg (cfg);
3644 #if SIZEOF_REGISTER == 8
3645 /* The array reg is 64 bits but the index reg is only 32 */
3646 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3648 index_reg = args [1]->dreg;
3650 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3652 #if defined(__i386__) || defined(__x86_64__)
3653 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3654 add_reg = ins->dreg;
3655 /* Avoid a warning */
3657 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3660 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3661 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3662 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3663 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3665 type_from_op (ins, NULL, NULL);
3667 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3668 int dreg = alloc_ireg (cfg);
3669 /* Decompose later to allow more optimizations */
3670 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3671 ins->type = STACK_I4;
3672 cfg->cbb->has_array_access = TRUE;
3673 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3676 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3677 int mult_reg = alloc_preg (cfg);
3678 int add_reg = alloc_preg (cfg);
3680 /* The corlib functions check for oob already. */
3681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3682 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3683 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3686 } else if (cmethod->klass == mono_defaults.object_class) {
3688 if (strcmp (cmethod->name, "GetType") == 0) {
3689 int dreg = alloc_preg (cfg);
3690 int vt_reg = alloc_preg (cfg);
3691 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3692 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3693 type_from_op (ins, NULL, NULL);
3696 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3697 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3698 int dreg = alloc_ireg (cfg);
3699 int t1 = alloc_ireg (cfg);
3701 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3702 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3703 ins->type = STACK_I4;
3707 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3708 MONO_INST_NEW (cfg, ins, OP_NOP);
3709 MONO_ADD_INS (cfg->cbb, ins);
3713 } else if (cmethod->klass == mono_defaults.array_class) {
3714 if (cmethod->name [0] != 'g')
3717 if (strcmp (cmethod->name, "get_Rank") == 0) {
3718 int dreg = alloc_ireg (cfg);
3719 int vtable_reg = alloc_preg (cfg);
3720 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3721 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3722 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3723 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3724 type_from_op (ins, NULL, NULL);
3727 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3728 int dreg = alloc_ireg (cfg);
3730 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3731 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3732 type_from_op (ins, NULL, NULL);
3737 } else if (cmethod->klass == runtime_helpers_class) {
3739 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3740 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3744 } else if (cmethod->klass == mono_defaults.thread_class) {
3745 if (strcmp (cmethod->name, "get_CurrentThread") == 0 && (ins = mono_arch_get_thread_intrinsic (cfg))) {
3746 ins->dreg = alloc_preg (cfg);
3747 ins->type = STACK_OBJ;
3748 MONO_ADD_INS (cfg->cbb, ins);
3750 } else if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3751 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3752 MONO_ADD_INS (cfg->cbb, ins);
3754 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3755 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3756 MONO_ADD_INS (cfg->cbb, ins);
3759 } else if (cmethod->klass == mono_defaults.monitor_class) {
3760 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3761 if (strcmp (cmethod->name, "Enter") == 0) {
3764 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3765 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3766 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3767 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3769 return (MonoInst*)call;
3770 } else if (strcmp (cmethod->name, "Exit") == 0) {
3773 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
3774 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3775 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3776 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
3778 return (MonoInst*)call;
3780 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
3781 MonoMethod *fast_method = NULL;
3783 /* Avoid infinite recursion */
3784 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
3785 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
3786 strcmp (cfg->method->name, "FastMonitorExit") == 0))
3789 if (strcmp (cmethod->name, "Enter") == 0 ||
3790 strcmp (cmethod->name, "Exit") == 0)
3791 fast_method = mono_monitor_get_fast_path (cmethod);
3795 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
3797 } else if (mini_class_is_system_array (cmethod->klass) &&
3798 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
3799 MonoInst *addr, *store, *load;
3800 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
3802 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
3803 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
3804 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
3806 } else if (cmethod->klass->image == mono_defaults.corlib &&
3807 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
3808 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
3811 #if SIZEOF_REGISTER == 8
3812 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
3813 /* 64 bit reads are already atomic */
3814 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
3815 ins->dreg = mono_alloc_preg (cfg);
3816 ins->inst_basereg = args [0]->dreg;
3817 ins->inst_offset = 0;
3818 MONO_ADD_INS (cfg->cbb, ins);
3822 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
3823 if (strcmp (cmethod->name, "Increment") == 0) {
3824 MonoInst *ins_iconst;
3827 if (fsig->params [0]->type == MONO_TYPE_I4)
3828 opcode = OP_ATOMIC_ADD_NEW_I4;
3829 #if SIZEOF_REGISTER == 8
3830 else if (fsig->params [0]->type == MONO_TYPE_I8)
3831 opcode = OP_ATOMIC_ADD_NEW_I8;
3834 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3835 ins_iconst->inst_c0 = 1;
3836 ins_iconst->dreg = mono_alloc_ireg (cfg);
3837 MONO_ADD_INS (cfg->cbb, ins_iconst);
3839 MONO_INST_NEW (cfg, ins, opcode);
3840 ins->dreg = mono_alloc_ireg (cfg);
3841 ins->inst_basereg = args [0]->dreg;
3842 ins->inst_offset = 0;
3843 ins->sreg2 = ins_iconst->dreg;
3844 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3845 MONO_ADD_INS (cfg->cbb, ins);
3847 } else if (strcmp (cmethod->name, "Decrement") == 0) {
3848 MonoInst *ins_iconst;
3851 if (fsig->params [0]->type == MONO_TYPE_I4)
3852 opcode = OP_ATOMIC_ADD_NEW_I4;
3853 #if SIZEOF_REGISTER == 8
3854 else if (fsig->params [0]->type == MONO_TYPE_I8)
3855 opcode = OP_ATOMIC_ADD_NEW_I8;
3858 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
3859 ins_iconst->inst_c0 = -1;
3860 ins_iconst->dreg = mono_alloc_ireg (cfg);
3861 MONO_ADD_INS (cfg->cbb, ins_iconst);
3863 MONO_INST_NEW (cfg, ins, opcode);
3864 ins->dreg = mono_alloc_ireg (cfg);
3865 ins->inst_basereg = args [0]->dreg;
3866 ins->inst_offset = 0;
3867 ins->sreg2 = ins_iconst->dreg;
3868 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3869 MONO_ADD_INS (cfg->cbb, ins);
3871 } else if (strcmp (cmethod->name, "Add") == 0) {
3874 if (fsig->params [0]->type == MONO_TYPE_I4)
3875 opcode = OP_ATOMIC_ADD_NEW_I4;
3876 #if SIZEOF_REGISTER == 8
3877 else if (fsig->params [0]->type == MONO_TYPE_I8)
3878 opcode = OP_ATOMIC_ADD_NEW_I8;
3882 MONO_INST_NEW (cfg, ins, opcode);
3883 ins->dreg = mono_alloc_ireg (cfg);
3884 ins->inst_basereg = args [0]->dreg;
3885 ins->inst_offset = 0;
3886 ins->sreg2 = args [1]->dreg;
3887 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
3888 MONO_ADD_INS (cfg->cbb, ins);
3891 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
3893 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
3894 if (strcmp (cmethod->name, "Exchange") == 0) {
3897 if (fsig->params [0]->type == MONO_TYPE_I4)
3898 opcode = OP_ATOMIC_EXCHANGE_I4;
3899 #if SIZEOF_REGISTER == 8
3900 else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
3901 (fsig->params [0]->type == MONO_TYPE_I) ||
3902 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3903 opcode = OP_ATOMIC_EXCHANGE_I8;
3905 else if ((fsig->params [0]->type == MONO_TYPE_I) ||
3906 (fsig->params [0]->type == MONO_TYPE_OBJECT))
3907 opcode = OP_ATOMIC_EXCHANGE_I4;
3912 MONO_INST_NEW (cfg, ins, opcode);
3913 ins->dreg = mono_alloc_ireg (cfg);
3914 ins->inst_basereg = args [0]->dreg;
3915 ins->inst_offset = 0;
3916 ins->sreg2 = args [1]->dreg;
3917 MONO_ADD_INS (cfg->cbb, ins);
3919 switch (fsig->params [0]->type) {
3921 ins->type = STACK_I4;
3925 ins->type = STACK_I8;
3927 case MONO_TYPE_OBJECT:
3928 ins->type = STACK_OBJ;
3931 g_assert_not_reached ();
3934 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
3936 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
3937 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
3939 if (fsig->params [1]->type == MONO_TYPE_I4)
3941 else if (fsig->params [1]->type == MONO_TYPE_I || MONO_TYPE_IS_REFERENCE (fsig->params [1]))
3942 size = sizeof (gpointer);
3943 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
3946 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
3947 ins->dreg = alloc_ireg (cfg);
3948 ins->sreg1 = args [0]->dreg;
3949 ins->sreg2 = args [1]->dreg;
3950 ins->sreg3 = args [2]->dreg;
3951 ins->type = STACK_I4;
3952 MONO_ADD_INS (cfg->cbb, ins);
3953 } else if (size == 8) {
3954 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
3955 ins->dreg = alloc_ireg (cfg);
3956 ins->sreg1 = args [0]->dreg;
3957 ins->sreg2 = args [1]->dreg;
3958 ins->sreg3 = args [2]->dreg;
3959 ins->type = STACK_I8;
3960 MONO_ADD_INS (cfg->cbb, ins);
3962 /* g_assert_not_reached (); */
3965 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
3969 } else if (cmethod->klass->image == mono_defaults.corlib) {
3970 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
3971 && strcmp (cmethod->klass->name, "Debugger") == 0) {
3972 MONO_INST_NEW (cfg, ins, OP_BREAK);
3973 MONO_ADD_INS (cfg->cbb, ins);
3976 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
3977 && strcmp (cmethod->klass->name, "Environment") == 0) {
3978 #ifdef PLATFORM_WIN32
3979 EMIT_NEW_ICONST (cfg, ins, 1);
3981 EMIT_NEW_ICONST (cfg, ins, 0);
3985 } else if (cmethod->klass == mono_defaults.math_class) {
3987 * There is general branches code for Min/Max, but it does not work for
3989 * http://everything2.com/?node_id=1051618
3993 #ifdef MONO_ARCH_SIMD_INTRINSICS
3994 if (cfg->opt & MONO_OPT_SIMD) {
3995 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4001 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4005 * This entry point could be used later for arbitrary method
4008 inline static MonoInst*
4009 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4010 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4012 if (method->klass == mono_defaults.string_class) {
4013 /* managed string allocation support */
4014 if (strcmp (method->name, "InternalAllocateStr") == 0) {
4015 MonoInst *iargs [2];
4016 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4017 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4020 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4021 iargs [1] = args [0];
4022 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4029 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4031 MonoInst *store, *temp;
4034 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4035 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4038 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4039 * would be different than the MonoInst's used to represent arguments, and
4040 * the ldelema implementation can't deal with that.
4041 * Solution: When ldelema is used on an inline argument, create a var for
4042 * it, emit ldelema on that var, and emit the saving code below in
4043 * inline_method () if needed.
4045 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4046 cfg->args [i] = temp;
4047 /* This uses cfg->args [i] which is set by the preceeding line */
4048 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4049 store->cil_code = sp [0]->cil_code;
4054 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4055 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4057 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4059 check_inline_called_method_name_limit (MonoMethod *called_method)
4062 static char *limit = NULL;
4064 if (limit == NULL) {
4065 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4067 if (limit_string != NULL)
4068 limit = limit_string;
4070 limit = (char *) "";
4073 if (limit [0] != '\0') {
4074 char *called_method_name = mono_method_full_name (called_method, TRUE);
4076 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4077 g_free (called_method_name);
4079 //return (strncmp_result <= 0);
4080 return (strncmp_result == 0);
4087 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4089 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4092 static char *limit = NULL;
4094 if (limit == NULL) {
4095 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4096 if (limit_string != NULL) {
4097 limit = limit_string;
4099 limit = (char *) "";
4103 if (limit [0] != '\0') {
4104 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4106 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4107 g_free (caller_method_name);
4109 //return (strncmp_result <= 0);
4110 return (strncmp_result == 0);
4118 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4119 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4121 MonoInst *ins, *rvar = NULL;
4122 MonoMethodHeader *cheader;
4123 MonoBasicBlock *ebblock, *sbblock;
4125 MonoMethod *prev_inlined_method;
4126 MonoInst **prev_locals, **prev_args;
4127 MonoType **prev_arg_types;
4128 guint prev_real_offset;
4129 GHashTable *prev_cbb_hash;
4130 MonoBasicBlock **prev_cil_offset_to_bb;
4131 MonoBasicBlock *prev_cbb;
4132 unsigned char* prev_cil_start;
4133 guint32 prev_cil_offset_to_bb_len;
4134 MonoMethod *prev_current_method;
4135 MonoGenericContext *prev_generic_context;
4136 gboolean ret_var_set, prev_ret_var_set;
4138 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4140 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4141 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4144 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4145 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4149 if (cfg->verbose_level > 2)
4150 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4152 if (!cmethod->inline_info) {
4153 mono_jit_stats.inlineable_methods++;
4154 cmethod->inline_info = 1;
4156 /* allocate space to store the return value */
4157 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4158 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4161 /* allocate local variables */
4162 cheader = mono_method_get_header (cmethod);
4163 prev_locals = cfg->locals;
4164 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4165 for (i = 0; i < cheader->num_locals; ++i)
4166 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4168 /* allocate start and end blocks */
4169 /* This is needed so if the inline is aborted, we can clean up */
4170 NEW_BBLOCK (cfg, sbblock);
4171 sbblock->real_offset = real_offset;
4173 NEW_BBLOCK (cfg, ebblock);
4174 ebblock->block_num = cfg->num_bblocks++;
4175 ebblock->real_offset = real_offset;
4177 prev_args = cfg->args;
4178 prev_arg_types = cfg->arg_types;
4179 prev_inlined_method = cfg->inlined_method;
4180 cfg->inlined_method = cmethod;
4181 cfg->ret_var_set = FALSE;
4182 prev_real_offset = cfg->real_offset;
4183 prev_cbb_hash = cfg->cbb_hash;
4184 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4185 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4186 prev_cil_start = cfg->cil_start;
4187 prev_cbb = cfg->cbb;
4188 prev_current_method = cfg->current_method;
4189 prev_generic_context = cfg->generic_context;
4190 prev_ret_var_set = cfg->ret_var_set;
4192 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4194 ret_var_set = cfg->ret_var_set;
4196 cfg->inlined_method = prev_inlined_method;
4197 cfg->real_offset = prev_real_offset;
4198 cfg->cbb_hash = prev_cbb_hash;
4199 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4200 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4201 cfg->cil_start = prev_cil_start;
4202 cfg->locals = prev_locals;
4203 cfg->args = prev_args;
4204 cfg->arg_types = prev_arg_types;
4205 cfg->current_method = prev_current_method;
4206 cfg->generic_context = prev_generic_context;
4207 cfg->ret_var_set = prev_ret_var_set;
4209 if ((costs >= 0 && costs < 60) || inline_allways) {
4210 if (cfg->verbose_level > 2)
4211 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4213 mono_jit_stats.inlined_methods++;
4215 /* always add some code to avoid block split failures */
4216 MONO_INST_NEW (cfg, ins, OP_NOP);
4217 MONO_ADD_INS (prev_cbb, ins);
4219 prev_cbb->next_bb = sbblock;
4220 link_bblock (cfg, prev_cbb, sbblock);
4223 * Get rid of the begin and end bblocks if possible to aid local
4226 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4228 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4229 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4231 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4232 MonoBasicBlock *prev = ebblock->in_bb [0];
4233 mono_merge_basic_blocks (cfg, prev, ebblock);
4235 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4236 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4237 cfg->cbb = prev_cbb;
4245 * If the inlined method contains only a throw, then the ret var is not
4246 * set, so set it to a dummy value.
4249 static double r8_0 = 0.0;
4251 switch (rvar->type) {
4253 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4256 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4261 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4264 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4265 ins->type = STACK_R8;
4266 ins->inst_p0 = (void*)&r8_0;
4267 ins->dreg = rvar->dreg;
4268 MONO_ADD_INS (cfg->cbb, ins);
4271 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4274 g_assert_not_reached ();
4278 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4283 if (cfg->verbose_level > 2)
4284 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4285 cfg->exception_type = MONO_EXCEPTION_NONE;
4286 mono_loader_clear_error ();
4288 /* This gets rid of the newly added bblocks */
4289 cfg->cbb = prev_cbb;
4295 * Some of these comments may well be out-of-date.
4296 * Design decisions: we do a single pass over the IL code (and we do bblock
4297 * splitting/merging in the few cases when it's required: a back jump to an IL
4298 * address that was not already seen as bblock starting point).
4299 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4300 * Complex operations are decomposed in simpler ones right away. We need to let the
4301 * arch-specific code peek and poke inside this process somehow (except when the
4302 * optimizations can take advantage of the full semantic info of coarse opcodes).
4303 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4304 * MonoInst->opcode initially is the IL opcode or some simplification of that
4305 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4306 * opcode with value bigger than OP_LAST.
4307 * At this point the IR can be handed over to an interpreter, a dumb code generator
4308 * or to the optimizing code generator that will translate it to SSA form.
4310 * Profiling directed optimizations.
4311 * We may compile by default with few or no optimizations and instrument the code
4312 * or the user may indicate what methods to optimize the most either in a config file
4313 * or through repeated runs where the compiler applies offline the optimizations to
4314 * each method and then decides if it was worth it.
4317 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4318 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4319 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4320 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4321 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4322 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4323 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4324 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4326 /* offset from br.s -> br like opcodes */
4327 #define BIG_BRANCH_OFFSET 13
4330 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4332 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4334 return b == NULL || b == bb;
4338 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4340 unsigned char *ip = start;
4341 unsigned char *target;
4344 MonoBasicBlock *bblock;
4345 const MonoOpcode *opcode;
4348 cli_addr = ip - start;
4349 i = mono_opcode_value ((const guint8 **)&ip, end);
4352 opcode = &mono_opcodes [i];
4353 switch (opcode->argument) {
4354 case MonoInlineNone:
4357 case MonoInlineString:
4358 case MonoInlineType:
4359 case MonoInlineField:
4360 case MonoInlineMethod:
4363 case MonoShortInlineR:
4370 case MonoShortInlineVar:
4371 case MonoShortInlineI:
4374 case MonoShortInlineBrTarget:
4375 target = start + cli_addr + 2 + (signed char)ip [1];
4376 GET_BBLOCK (cfg, bblock, target);
4379 GET_BBLOCK (cfg, bblock, ip);
4381 case MonoInlineBrTarget:
4382 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4383 GET_BBLOCK (cfg, bblock, target);
4386 GET_BBLOCK (cfg, bblock, ip);
4388 case MonoInlineSwitch: {
4389 guint32 n = read32 (ip + 1);
4392 cli_addr += 5 + 4 * n;
4393 target = start + cli_addr;
4394 GET_BBLOCK (cfg, bblock, target);
4396 for (j = 0; j < n; ++j) {
4397 target = start + cli_addr + (gint32)read32 (ip);
4398 GET_BBLOCK (cfg, bblock, target);
4408 g_assert_not_reached ();
4411 if (i == CEE_THROW) {
4412 unsigned char *bb_start = ip - 1;
4414 /* Find the start of the bblock containing the throw */
4416 while ((bb_start >= start) && !bblock) {
4417 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4421 bblock->out_of_line = 1;
4430 static inline MonoMethod *
4431 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4435 if (m->wrapper_type != MONO_WRAPPER_NONE)
4436 return mono_method_get_wrapper_data (m, token);
4438 method = mono_get_method_full (m->klass->image, token, klass, context);
4443 static inline MonoMethod *
4444 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4446 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4448 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4454 static inline MonoClass*
4455 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4459 if (method->wrapper_type != MONO_WRAPPER_NONE)
4460 klass = mono_method_get_wrapper_data (method, token);
4462 klass = mono_class_get_full (method->klass->image, token, context);
4464 mono_class_init (klass);
4469 * Returns TRUE if the JIT should abort inlining because "callee"
4470 * is influenced by security attributes.
4473 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4477 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4481 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4482 if (result == MONO_JIT_SECURITY_OK)
4485 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4486 /* Generate code to throw a SecurityException before the actual call/link */
4487 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4490 NEW_ICONST (cfg, args [0], 4);
4491 NEW_METHODCONST (cfg, args [1], caller);
4492 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4493 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4494 /* don't hide previous results */
4495 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4496 cfg->exception_data = result;
4504 method_access_exception (void)
4506 static MonoMethod *method = NULL;
4509 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4510 method = mono_class_get_method_from_name (secman->securitymanager,
4511 "MethodAccessException", 2);
4518 emit_throw_method_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4519 MonoBasicBlock *bblock, unsigned char *ip)
4521 MonoMethod *thrower = method_access_exception ();
4524 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4525 EMIT_NEW_METHODCONST (cfg, args [1], callee);
4526 mono_emit_method_call (cfg, thrower, args, NULL);
4530 field_access_exception (void)
4532 static MonoMethod *method = NULL;
4535 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4536 method = mono_class_get_method_from_name (secman->securitymanager,
4537 "FieldAccessException", 2);
4544 emit_throw_field_access_exception (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4545 MonoBasicBlock *bblock, unsigned char *ip)
4547 MonoMethod *thrower = field_access_exception ();
4550 EMIT_NEW_METHODCONST (cfg, args [0], caller);
4551 EMIT_NEW_METHODCONST (cfg, args [1], field);
4552 mono_emit_method_call (cfg, thrower, args, NULL);
4556 * Return the original method is a wrapper is specified. We can only access
4557 * the custom attributes from the original method.
4560 get_original_method (MonoMethod *method)
4562 if (method->wrapper_type == MONO_WRAPPER_NONE)
4565 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4566 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4569 /* in other cases we need to find the original method */
4570 return mono_marshal_method_from_wrapper (method);
4574 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4575 MonoBasicBlock *bblock, unsigned char *ip)
4577 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4578 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4581 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4582 caller = get_original_method (caller);
4586 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4587 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4588 emit_throw_field_access_exception (cfg, caller, field, bblock, ip);
4592 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4593 MonoBasicBlock *bblock, unsigned char *ip)
4595 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4596 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4599 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4600 caller = get_original_method (caller);
4604 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4605 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4606 emit_throw_method_access_exception (cfg, caller, callee, bblock, ip);
4610 * Check that the IL instructions at ip are the array initialization
4611 * sequence and return the pointer to the data and the size.
4614 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4617 * newarr[System.Int32]
4619 * ldtoken field valuetype ...
4620 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4622 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4623 guint32 token = read32 (ip + 7);
4624 guint32 field_token = read32 (ip + 2);
4625 guint32 field_index = field_token & 0xffffff;
4627 const char *data_ptr;
4629 MonoMethod *cmethod;
4630 MonoClass *dummy_class;
4631 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4637 *out_field_token = field_token;
4639 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4642 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4644 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4645 case MONO_TYPE_BOOLEAN:
4649 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4650 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
4651 case MONO_TYPE_CHAR:
4661 return NULL; /* stupid ARM FP swapped format */
4671 if (size > mono_type_size (field->type, &dummy_align))
4674 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4675 if (!method->klass->image->dynamic) {
4676 field_index = read32 (ip + 2) & 0xffffff;
4677 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4678 data_ptr = mono_image_rva_map (method->klass->image, rva);
4679 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4680 /* for aot code we do the lookup on load */
4681 if (aot && data_ptr)
4682 return GUINT_TO_POINTER (rva);
4684 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4686 data_ptr = mono_field_get_data (field);
4694 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4696 char *method_fname = mono_method_full_name (method, TRUE);
4699 if (mono_method_get_header (method)->code_size == 0)
4700 method_code = g_strdup ("method body is empty.");
4702 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4703 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4704 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4705 g_free (method_fname);
4706 g_free (method_code);
4710 set_exception_object (MonoCompile *cfg, MonoException *exception)
4712 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4713 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4714 cfg->exception_ptr = exception;
4718 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4722 if (cfg->generic_sharing_context)
4723 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4725 type = &klass->byval_arg;
4726 return MONO_TYPE_IS_REFERENCE (type);
4730 * mono_decompose_array_access_opts:
4732 * Decompose array access opcodes.
4733 * This should be in decompose.c, but it emits calls so it has to stay here until
4734 * the old JIT is gone.
4737 mono_decompose_array_access_opts (MonoCompile *cfg)
4739 MonoBasicBlock *bb, *first_bb;
4742 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4743 * can be executed anytime. It should be run before decompose_long
4747 * Create a dummy bblock and emit code into it so we can use the normal
4748 * code generation macros.
4750 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4751 first_bb = cfg->cbb;
4753 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4755 MonoInst *prev = NULL;
4757 MonoInst *iargs [3];
4760 if (!bb->has_array_access)
4763 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4765 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4771 for (ins = bb->code; ins; ins = ins->next) {
4772 switch (ins->opcode) {
4774 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
4775 G_STRUCT_OFFSET (MonoArray, max_length));
4776 MONO_ADD_INS (cfg->cbb, dest);
4778 case OP_BOUNDS_CHECK:
4779 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
4782 if (cfg->opt & MONO_OPT_SHARED) {
4783 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4784 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
4785 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
4786 iargs [2]->dreg = ins->sreg1;
4788 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
4789 dest->dreg = ins->dreg;
4791 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
4794 NEW_VTABLECONST (cfg, iargs [0], vtable);
4795 MONO_ADD_INS (cfg->cbb, iargs [0]);
4796 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
4797 iargs [1]->dreg = ins->sreg1;
4799 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
4800 dest->dreg = ins->dreg;
4804 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
4805 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
4806 MONO_ADD_INS (cfg->cbb, dest);
4812 g_assert (cfg->cbb == first_bb);
4814 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
4815 /* Replace the original instruction with the new code sequence */
4817 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
4818 first_bb->code = first_bb->last_ins = NULL;
4819 first_bb->in_count = first_bb->out_count = 0;
4820 cfg->cbb = first_bb;
4827 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
4837 #ifdef MONO_ARCH_SOFT_FLOAT
4840 * mono_decompose_soft_float:
4842 * Soft float support on ARM. We store each double value in a pair of integer vregs,
4843 * similar to long support on 32 bit platforms. 32 bit float values require special
4844 * handling when used as locals, arguments, and in calls.
4845 * One big problem with soft-float is that there are few r4 test cases in our test suite.
4848 mono_decompose_soft_float (MonoCompile *cfg)
4850 MonoBasicBlock *bb, *first_bb;
4853 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
4857 * Create a dummy bblock and emit code into it so we can use the normal
4858 * code generation macros.
4860 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4861 first_bb = cfg->cbb;
4863 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4865 MonoInst *prev = NULL;
4868 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
4870 cfg->cbb->code = cfg->cbb->last_ins = NULL;
4876 for (ins = bb->code; ins; ins = ins->next) {
4877 const char *spec = INS_INFO (ins->opcode);
4879 /* Most fp operations are handled automatically by opcode emulation */
4881 switch (ins->opcode) {
4884 d.vald = *(double*)ins->inst_p0;
4885 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4890 /* We load the r8 value */
4891 d.vald = *(float*)ins->inst_p0;
4892 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
4896 ins->opcode = OP_LMOVE;
4899 ins->opcode = OP_MOVE;
4900 ins->sreg1 = ins->sreg1 + 1;
4903 ins->opcode = OP_MOVE;
4904 ins->sreg1 = ins->sreg1 + 2;
4907 int reg = ins->sreg1;
4909 ins->opcode = OP_SETLRET;
4911 ins->sreg1 = reg + 1;
4912 ins->sreg2 = reg + 2;
4915 case OP_LOADR8_MEMBASE:
4916 ins->opcode = OP_LOADI8_MEMBASE;
4918 case OP_STORER8_MEMBASE_REG:
4919 ins->opcode = OP_STOREI8_MEMBASE_REG;
4921 case OP_STORER4_MEMBASE_REG: {
4922 MonoInst *iargs [2];
4925 /* Arg 1 is the double value */
4926 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
4927 iargs [0]->dreg = ins->sreg1;
4929 /* Arg 2 is the address to store to */
4930 addr_reg = mono_alloc_preg (cfg);
4931 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
4932 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
4936 case OP_LOADR4_MEMBASE: {
4937 MonoInst *iargs [1];
4941 addr_reg = mono_alloc_preg (cfg);
4942 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
4943 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4944 conv->dreg = ins->dreg;
4949 case OP_FCALL_MEMBASE: {
4950 MonoCallInst *call = (MonoCallInst*)ins;
4951 if (call->signature->ret->type == MONO_TYPE_R4) {
4952 MonoCallInst *call2;
4953 MonoInst *iargs [1];
4956 /* Convert the call into a call returning an int */
4957 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
4958 memcpy (call2, call, sizeof (MonoCallInst));
4959 switch (ins->opcode) {
4961 call2->inst.opcode = OP_CALL;
4964 call2->inst.opcode = OP_CALL_REG;
4966 case OP_FCALL_MEMBASE:
4967 call2->inst.opcode = OP_CALL_MEMBASE;
4970 g_assert_not_reached ();
4972 call2->inst.dreg = mono_alloc_ireg (cfg);
4973 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
4975 /* FIXME: Optimize this */
4977 /* Emit an r4->r8 conversion */
4978 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
4979 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
4980 conv->dreg = ins->dreg;
4982 switch (ins->opcode) {
4984 ins->opcode = OP_LCALL;
4987 ins->opcode = OP_LCALL_REG;
4989 case OP_FCALL_MEMBASE:
4990 ins->opcode = OP_LCALL_MEMBASE;
4993 g_assert_not_reached ();
4999 MonoJitICallInfo *info;
5000 MonoInst *iargs [2];
5001 MonoInst *call, *cmp, *br;
5003 /* Convert fcompare+fbcc to icall+icompare+beq */
5005 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5008 /* Create dummy MonoInst's for the arguments */
5009 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5010 iargs [0]->dreg = ins->sreg1;
5011 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5012 iargs [1]->dreg = ins->sreg2;
5014 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5016 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5017 cmp->sreg1 = call->dreg;
5019 MONO_ADD_INS (cfg->cbb, cmp);
5021 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5022 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5023 br->inst_true_bb = ins->next->inst_true_bb;
5024 br->inst_false_bb = ins->next->inst_false_bb;
5025 MONO_ADD_INS (cfg->cbb, br);
5027 /* The call sequence might include fp ins */
5030 /* Skip fbcc or fccc */
5031 NULLIFY_INS (ins->next);
5039 MonoJitICallInfo *info;
5040 MonoInst *iargs [2];
5043 /* Convert fccc to icall+icompare+iceq */
5045 info = mono_find_jit_opcode_emulation (ins->opcode);
5048 /* Create dummy MonoInst's for the arguments */
5049 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5050 iargs [0]->dreg = ins->sreg1;
5051 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5052 iargs [1]->dreg = ins->sreg2;
5054 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5056 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5057 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5059 /* The call sequence might include fp ins */
5064 MonoInst *iargs [2];
5065 MonoInst *call, *cmp;
5067 /* Convert to icall+icompare+cond_exc+move */
5069 /* Create dummy MonoInst's for the arguments */
5070 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5071 iargs [0]->dreg = ins->sreg1;
5073 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5075 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5076 cmp->sreg1 = call->dreg;
5078 MONO_ADD_INS (cfg->cbb, cmp);
5080 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5082 /* Do the assignment if the value is finite */
5083 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5089 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5090 mono_print_ins (ins);
5091 g_assert_not_reached ();
5096 g_assert (cfg->cbb == first_bb);
5098 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5099 /* Replace the original instruction with the new code sequence */
5101 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5102 first_bb->code = first_bb->last_ins = NULL;
5103 first_bb->in_count = first_bb->out_count = 0;
5104 cfg->cbb = first_bb;
5111 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5114 mono_decompose_long_opts (cfg);
5120 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5123 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5124 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5125 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5126 /* Optimize reg-reg moves away */
5128 * Can't optimize other opcodes, since sp[0] might point to
5129 * the last ins of a decomposed opcode.
5131 sp [0]->dreg = (cfg)->locals [n]->dreg;
5133 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5138 * ldloca inhibits many optimizations so try to get rid of it in common
5141 static inline unsigned char *
5142 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5151 local = read16 (ip + 2);
5155 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5156 gboolean skip = FALSE;
5158 /* From the INITOBJ case */
5159 token = read32 (ip + 2);
5160 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5161 CHECK_TYPELOAD (klass);
5162 if (generic_class_is_reference_type (cfg, klass)) {
5163 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5164 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5165 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5166 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5167 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5180 is_exception_class (MonoClass *class)
5183 if (class == mono_defaults.exception_class)
5185 class = class->parent;
5191 * mono_method_to_ir:
5193 * Translate the .net IL into linear IR.
5196 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5197 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5198 guint inline_offset, gboolean is_virtual_call)
5200 MonoInst *ins, **sp, **stack_start;
5201 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5202 MonoMethod *cmethod, *method_definition;
5203 MonoInst **arg_array;
5204 MonoMethodHeader *header;
5206 guint32 token, ins_flag;
5208 MonoClass *constrained_call = NULL;
5209 unsigned char *ip, *end, *target, *err_pos;
5210 static double r8_0 = 0.0;
5211 MonoMethodSignature *sig;
5212 MonoGenericContext *generic_context = NULL;
5213 MonoGenericContainer *generic_container = NULL;
5214 MonoType **param_types;
5215 int i, n, start_new_bblock, dreg;
5216 int num_calls = 0, inline_costs = 0;
5217 int breakpoint_id = 0;
5219 MonoBoolean security, pinvoke;
5220 MonoSecurityManager* secman = NULL;
5221 MonoDeclSecurityActions actions;
5222 GSList *class_inits = NULL;
5223 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5226 /* serialization and xdomain stuff may need access to private fields and methods */
5227 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5228 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5229 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5230 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5231 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5232 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5234 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5236 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5237 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5238 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5239 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5241 image = method->klass->image;
5242 header = mono_method_get_header (method);
5243 generic_container = mono_method_get_generic_container (method);
5244 sig = mono_method_signature (method);
5245 num_args = sig->hasthis + sig->param_count;
5246 ip = (unsigned char*)header->code;
5247 cfg->cil_start = ip;
5248 end = ip + header->code_size;
5249 mono_jit_stats.cil_code_size += header->code_size;
5251 method_definition = method;
5252 while (method_definition->is_inflated) {
5253 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5254 method_definition = imethod->declaring;
5257 /* SkipVerification is not allowed if core-clr is enabled */
5258 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5260 dont_verify_stloc = TRUE;
5263 if (!dont_verify && mini_method_verify (cfg, method_definition))
5264 goto exception_exit;
5266 if (mono_debug_using_mono_debugger ())
5267 cfg->keep_cil_nops = TRUE;
5269 if (sig->is_inflated)
5270 generic_context = mono_method_get_context (method);
5271 else if (generic_container)
5272 generic_context = &generic_container->context;
5273 cfg->generic_context = generic_context;
5275 if (!cfg->generic_sharing_context)
5276 g_assert (!sig->has_type_parameters);
5278 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5279 g_assert (method->is_inflated);
5280 g_assert (mono_method_get_context (method)->method_inst);
5282 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5283 g_assert (sig->generic_param_count);
5285 if (cfg->method == method) {
5286 cfg->real_offset = 0;
5288 cfg->real_offset = inline_offset;
5291 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5292 cfg->cil_offset_to_bb_len = header->code_size;
5294 cfg->current_method = method;
5296 if (cfg->verbose_level > 2)
5297 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5299 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5301 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5302 for (n = 0; n < sig->param_count; ++n)
5303 param_types [n + sig->hasthis] = sig->params [n];
5304 cfg->arg_types = param_types;
5306 dont_inline = g_list_prepend (dont_inline, method);
5307 if (cfg->method == method) {
5309 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5310 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5313 NEW_BBLOCK (cfg, start_bblock);
5314 cfg->bb_entry = start_bblock;
5315 start_bblock->cil_code = NULL;
5316 start_bblock->cil_length = 0;
5319 NEW_BBLOCK (cfg, end_bblock);
5320 cfg->bb_exit = end_bblock;
5321 end_bblock->cil_code = NULL;
5322 end_bblock->cil_length = 0;
5323 g_assert (cfg->num_bblocks == 2);
5325 arg_array = cfg->args;
5327 if (header->num_clauses) {
5328 cfg->spvars = g_hash_table_new (NULL, NULL);
5329 cfg->exvars = g_hash_table_new (NULL, NULL);
5331 /* handle exception clauses */
5332 for (i = 0; i < header->num_clauses; ++i) {
5333 MonoBasicBlock *try_bb;
5334 MonoExceptionClause *clause = &header->clauses [i];
5335 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5336 try_bb->real_offset = clause->try_offset;
5337 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5338 tblock->real_offset = clause->handler_offset;
5339 tblock->flags |= BB_EXCEPTION_HANDLER;
5341 link_bblock (cfg, try_bb, tblock);
5343 if (*(ip + clause->handler_offset) == CEE_POP)
5344 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5346 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5347 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5348 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5349 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5350 MONO_ADD_INS (tblock, ins);
5352 /* todo: is a fault block unsafe to optimize? */
5353 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5354 tblock->flags |= BB_EXCEPTION_UNSAFE;
5358 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5360 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5362 /* catch and filter blocks get the exception object on the stack */
5363 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5364 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5365 MonoInst *dummy_use;
5367 /* mostly like handle_stack_args (), but just sets the input args */
5368 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5369 tblock->in_scount = 1;
5370 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5371 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5374 * Add a dummy use for the exvar so its liveness info will be
5378 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5380 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5381 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5382 tblock->flags |= BB_EXCEPTION_HANDLER;
5383 tblock->real_offset = clause->data.filter_offset;
5384 tblock->in_scount = 1;
5385 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5386 /* The filter block shares the exvar with the handler block */
5387 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5388 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5389 MONO_ADD_INS (tblock, ins);
5393 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5394 clause->data.catch_class &&
5395 cfg->generic_sharing_context &&
5396 mono_class_check_context_used (clause->data.catch_class)) {
5398 * In shared generic code with catch
5399 * clauses containing type variables
5400 * the exception handling code has to
5401 * be able to get to the rgctx.
5402 * Therefore we have to make sure that
5403 * the vtable/mrgctx argument (for
5404 * static or generic methods) or the
5405 * "this" argument (for non-static
5406 * methods) are live.
5408 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5409 mini_method_get_context (method)->method_inst ||
5410 method->klass->valuetype) {
5411 mono_get_vtable_var (cfg);
5413 MonoInst *dummy_use;
5415 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5420 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5421 cfg->cbb = start_bblock;
5422 cfg->args = arg_array;
5423 mono_save_args (cfg, sig, inline_args);
5426 /* FIRST CODE BLOCK */
5427 NEW_BBLOCK (cfg, bblock);
5428 bblock->cil_code = ip;
5432 ADD_BBLOCK (cfg, bblock);
5434 if (cfg->method == method) {
5435 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5436 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5437 MONO_INST_NEW (cfg, ins, OP_BREAK);
5438 MONO_ADD_INS (bblock, ins);
5442 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5443 secman = mono_security_manager_get_methods ();
5445 security = (secman && mono_method_has_declsec (method));
5446 /* at this point having security doesn't mean we have any code to generate */
5447 if (security && (cfg->method == method)) {
5448 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5449 * And we do not want to enter the next section (with allocation) if we
5450 * have nothing to generate */
5451 security = mono_declsec_get_demands (method, &actions);
5454 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5455 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5457 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5458 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5459 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5461 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5462 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5466 mono_custom_attrs_free (custom);
5469 custom = mono_custom_attrs_from_class (wrapped->klass);
5470 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5474 mono_custom_attrs_free (custom);
5477 /* not a P/Invoke after all */
5482 if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5483 /* we use a separate basic block for the initialization code */
5484 NEW_BBLOCK (cfg, init_localsbb);
5485 cfg->bb_init = init_localsbb;
5486 init_localsbb->real_offset = cfg->real_offset;
5487 start_bblock->next_bb = init_localsbb;
5488 init_localsbb->next_bb = bblock;
5489 link_bblock (cfg, start_bblock, init_localsbb);
5490 link_bblock (cfg, init_localsbb, bblock);
5492 cfg->cbb = init_localsbb;
5494 start_bblock->next_bb = bblock;
5495 link_bblock (cfg, start_bblock, bblock);
5498 /* at this point we know, if security is TRUE, that some code needs to be generated */
5499 if (security && (cfg->method == method)) {
5502 mono_jit_stats.cas_demand_generation++;
5504 if (actions.demand.blob) {
5505 /* Add code for SecurityAction.Demand */
5506 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5507 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5508 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5509 mono_emit_method_call (cfg, secman->demand, args, NULL);
5511 if (actions.noncasdemand.blob) {
5512 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5513 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5514 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5515 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5516 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5517 mono_emit_method_call (cfg, secman->demand, args, NULL);
5519 if (actions.demandchoice.blob) {
5520 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5521 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5522 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5523 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5524 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5528 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5530 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5533 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5534 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5535 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5536 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5537 if (!(method->klass && method->klass->image &&
5538 mono_security_core_clr_is_platform_image (method->klass->image))) {
5539 emit_throw_method_access_exception (cfg, method, wrapped, bblock, ip);
5545 if (header->code_size == 0)
5548 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5553 if (cfg->method == method)
5554 mono_debug_init_method (cfg, bblock, breakpoint_id);
5556 for (n = 0; n < header->num_locals; ++n) {
5557 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5562 /* We force the vtable variable here for all shared methods
5563 for the possibility that they might show up in a stack
5564 trace where their exact instantiation is needed. */
5565 if (cfg->generic_sharing_context && method == cfg->method) {
5566 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5567 mini_method_get_context (method)->method_inst ||
5568 method->klass->valuetype) {
5569 mono_get_vtable_var (cfg);
5571 /* FIXME: Is there a better way to do this?
5572 We need the variable live for the duration
5573 of the whole method. */
5574 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5578 /* add a check for this != NULL to inlined methods */
5579 if (is_virtual_call) {
5582 NEW_ARGLOAD (cfg, arg_ins, 0);
5583 MONO_ADD_INS (cfg->cbb, arg_ins);
5584 cfg->flags |= MONO_CFG_HAS_CHECK_THIS;
5585 MONO_EMIT_NEW_UNALU (cfg, OP_CHECK_THIS, -1, arg_ins->dreg);
5586 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, arg_ins->dreg);
5589 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5590 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5593 start_new_bblock = 0;
5597 if (cfg->method == method)
5598 cfg->real_offset = ip - header->code;
5600 cfg->real_offset = inline_offset;
5605 if (start_new_bblock) {
5606 bblock->cil_length = ip - bblock->cil_code;
5607 if (start_new_bblock == 2) {
5608 g_assert (ip == tblock->cil_code);
5610 GET_BBLOCK (cfg, tblock, ip);
5612 bblock->next_bb = tblock;
5615 start_new_bblock = 0;
5616 for (i = 0; i < bblock->in_scount; ++i) {
5617 if (cfg->verbose_level > 3)
5618 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5619 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5623 g_slist_free (class_inits);
5626 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5627 link_bblock (cfg, bblock, tblock);
5628 if (sp != stack_start) {
5629 handle_stack_args (cfg, stack_start, sp - stack_start);
5631 CHECK_UNVERIFIABLE (cfg);
5633 bblock->next_bb = tblock;
5636 for (i = 0; i < bblock->in_scount; ++i) {
5637 if (cfg->verbose_level > 3)
5638 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5639 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5642 g_slist_free (class_inits);
5647 bblock->real_offset = cfg->real_offset;
5649 if ((cfg->method == method) && cfg->coverage_info) {
5650 guint32 cil_offset = ip - header->code;
5651 cfg->coverage_info->data [cil_offset].cil_code = ip;
5653 /* TODO: Use an increment here */
5654 #if defined(__i386__)
5655 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5656 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5658 MONO_ADD_INS (cfg->cbb, ins);
5660 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5661 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5665 if (cfg->verbose_level > 3)
5666 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5670 if (cfg->keep_cil_nops)
5671 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5673 MONO_INST_NEW (cfg, ins, OP_NOP);
5675 MONO_ADD_INS (bblock, ins);
5678 MONO_INST_NEW (cfg, ins, OP_BREAK);
5680 MONO_ADD_INS (bblock, ins);
5686 CHECK_STACK_OVF (1);
5687 n = (*ip)-CEE_LDARG_0;
5689 EMIT_NEW_ARGLOAD (cfg, ins, n);
5697 CHECK_STACK_OVF (1);
5698 n = (*ip)-CEE_LDLOC_0;
5700 EMIT_NEW_LOCLOAD (cfg, ins, n);
5709 n = (*ip)-CEE_STLOC_0;
5712 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5714 emit_stloc_ir (cfg, sp, header, n);
5721 CHECK_STACK_OVF (1);
5724 EMIT_NEW_ARGLOAD (cfg, ins, n);
5730 CHECK_STACK_OVF (1);
5733 NEW_ARGLOADA (cfg, ins, n);
5734 MONO_ADD_INS (cfg->cbb, ins);
5744 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
5746 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
5751 CHECK_STACK_OVF (1);
5754 EMIT_NEW_LOCLOAD (cfg, ins, n);
5758 case CEE_LDLOCA_S: {
5759 unsigned char *tmp_ip;
5761 CHECK_STACK_OVF (1);
5762 CHECK_LOCAL (ip [1]);
5764 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
5770 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
5779 CHECK_LOCAL (ip [1]);
5780 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
5782 emit_stloc_ir (cfg, sp, header, ip [1]);
5787 CHECK_STACK_OVF (1);
5788 EMIT_NEW_PCONST (cfg, ins, NULL);
5789 ins->type = STACK_OBJ;
5794 CHECK_STACK_OVF (1);
5795 EMIT_NEW_ICONST (cfg, ins, -1);
5808 CHECK_STACK_OVF (1);
5809 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
5815 CHECK_STACK_OVF (1);
5817 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
5823 CHECK_STACK_OVF (1);
5824 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
5830 CHECK_STACK_OVF (1);
5831 MONO_INST_NEW (cfg, ins, OP_I8CONST);
5832 ins->type = STACK_I8;
5833 ins->dreg = alloc_dreg (cfg, STACK_I8);
5835 ins->inst_l = (gint64)read64 (ip);
5836 MONO_ADD_INS (bblock, ins);
5842 /* FIXME: we should really allocate this only late in the compilation process */
5843 f = mono_domain_alloc (cfg->domain, sizeof (float));
5845 CHECK_STACK_OVF (1);
5846 MONO_INST_NEW (cfg, ins, OP_R4CONST);
5847 ins->type = STACK_R8;
5848 ins->dreg = alloc_dreg (cfg, STACK_R8);
5852 MONO_ADD_INS (bblock, ins);
5860 /* FIXME: we should really allocate this only late in the compilation process */
5861 d = mono_domain_alloc (cfg->domain, sizeof (double));
5863 CHECK_STACK_OVF (1);
5864 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5865 ins->type = STACK_R8;
5866 ins->dreg = alloc_dreg (cfg, STACK_R8);
5870 MONO_ADD_INS (bblock, ins);
5877 MonoInst *temp, *store;
5879 CHECK_STACK_OVF (1);
5883 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
5884 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
5886 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5889 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
5902 if (sp [0]->type == STACK_R8)
5903 /* we need to pop the value from the x86 FP stack */
5904 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
5913 if (stack_start != sp)
5915 token = read32 (ip + 1);
5916 /* FIXME: check the signature matches */
5917 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
5922 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
5923 GENERIC_SHARING_FAILURE (CEE_JMP);
5925 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5926 CHECK_CFG_EXCEPTION;
5930 MonoMethodSignature *fsig = mono_method_signature (cmethod);
5933 /* Handle tail calls similarly to calls */
5934 n = fsig->param_count + fsig->hasthis;
5936 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
5937 call->method = cmethod;
5938 call->tail_call = TRUE;
5939 call->signature = mono_method_signature (cmethod);
5940 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
5941 call->inst.inst_p0 = cmethod;
5942 for (i = 0; i < n; ++i)
5943 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
5945 mono_arch_emit_call (cfg, call);
5946 MONO_ADD_INS (bblock, (MonoInst*)call);
5949 for (i = 0; i < num_args; ++i)
5950 /* Prevent arguments from being optimized away */
5951 arg_array [i]->flags |= MONO_INST_VOLATILE;
5953 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
5954 ins = (MonoInst*)call;
5955 ins->inst_p0 = cmethod;
5956 MONO_ADD_INS (bblock, ins);
5960 start_new_bblock = 1;
5965 case CEE_CALLVIRT: {
5966 MonoInst *addr = NULL;
5967 MonoMethodSignature *fsig = NULL;
5969 int virtual = *ip == CEE_CALLVIRT;
5970 int calli = *ip == CEE_CALLI;
5971 gboolean pass_imt_from_rgctx = FALSE;
5972 MonoInst *imt_arg = NULL;
5973 gboolean pass_vtable = FALSE;
5974 gboolean pass_mrgctx = FALSE;
5975 MonoInst *vtable_arg = NULL;
5976 gboolean check_this = FALSE;
5979 token = read32 (ip + 1);
5986 if (method->wrapper_type != MONO_WRAPPER_NONE)
5987 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
5989 fsig = mono_metadata_parse_signature (image, token);
5991 n = fsig->param_count + fsig->hasthis;
5993 MonoMethod *cil_method;
5995 if (method->wrapper_type != MONO_WRAPPER_NONE) {
5996 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
5997 cil_method = cmethod;
5998 } else if (constrained_call) {
5999 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6001 * This is needed since get_method_constrained can't find
6002 * the method in klass representing a type var.
6003 * The type var is guaranteed to be a reference type in this
6006 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6007 cil_method = cmethod;
6008 g_assert (!cmethod->klass->valuetype);
6010 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6013 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6014 cil_method = cmethod;
6019 if (!dont_verify && !cfg->skip_visibility) {
6020 MonoMethod *target_method = cil_method;
6021 if (method->is_inflated) {
6022 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6024 if (!mono_method_can_access_method (method_definition, target_method) &&
6025 !mono_method_can_access_method (method, cil_method))
6026 METHOD_ACCESS_FAILURE;
6029 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6030 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6032 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6033 /* MS.NET seems to silently convert this to a callvirt */
6036 if (!cmethod->klass->inited)
6037 if (!mono_class_init (cmethod->klass))
6040 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6041 mini_class_is_system_array (cmethod->klass)) {
6042 array_rank = cmethod->klass->rank;
6043 fsig = mono_method_signature (cmethod);
6045 if (mono_method_signature (cmethod)->pinvoke) {
6046 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6047 check_for_pending_exc, FALSE);
6048 fsig = mono_method_signature (wrapper);
6049 } else if (constrained_call) {
6050 fsig = mono_method_signature (cmethod);
6052 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6056 mono_save_token_info (cfg, image, token, cil_method);
6058 n = fsig->param_count + fsig->hasthis;
6060 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6061 if (check_linkdemand (cfg, method, cmethod))
6063 CHECK_CFG_EXCEPTION;
6066 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6067 g_assert_not_reached ();
6070 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6073 if (!cfg->generic_sharing_context && cmethod)
6074 g_assert (!mono_method_check_context_used (cmethod));
6078 //g_assert (!virtual || fsig->hasthis);
6082 if (constrained_call) {
6084 * We have the `constrained.' prefix opcode.
6086 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6090 * The type parameter is instantiated as a valuetype,
6091 * but that type doesn't override the method we're
6092 * calling, so we need to box `this'.
6094 dreg = alloc_dreg (cfg, STACK_VTYPE);
6095 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADV_MEMBASE, dreg, sp [0]->dreg, 0);
6096 ins->klass = constrained_call;
6097 sp [0] = handle_box (cfg, ins, constrained_call);
6098 } else if (!constrained_call->valuetype) {
6099 int dreg = alloc_preg (cfg);
6102 * The type parameter is instantiated as a reference
6103 * type. We have a managed pointer on the stack, so
6104 * we need to dereference it here.
6106 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6107 ins->type = STACK_OBJ;
6109 } else if (cmethod->klass->valuetype)
6111 constrained_call = NULL;
6114 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6118 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6119 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6120 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6121 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6122 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6125 * Pass vtable iff target method might
6126 * be shared, which means that sharing
6127 * is enabled for its class and its
6128 * context is sharable (and it's not a
6131 if (sharing_enabled && context_sharable &&
6132 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6136 if (cmethod && mini_method_get_context (cmethod) &&
6137 mini_method_get_context (cmethod)->method_inst) {
6138 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6139 MonoGenericContext *context = mini_method_get_context (cmethod);
6140 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6142 g_assert (!pass_vtable);
6144 if (sharing_enabled && context_sharable)
6148 if (cfg->generic_sharing_context && cmethod) {
6149 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6151 context_used = mono_method_check_context_used (cmethod);
6153 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6154 /* Generic method interface
6155 calls are resolved via a
6156 helper function and don't
6158 if (!cmethod_context || !cmethod_context->method_inst)
6159 pass_imt_from_rgctx = TRUE;
6163 * If a shared method calls another
6164 * shared method then the caller must
6165 * have a generic sharing context
6166 * because the magic trampoline
6167 * requires it. FIXME: We shouldn't
6168 * have to force the vtable/mrgctx
6169 * variable here. Instead there
6170 * should be a flag in the cfg to
6171 * request a generic sharing context.
6174 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6175 mono_get_vtable_var (cfg);
6180 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6182 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6184 CHECK_TYPELOAD (cmethod->klass);
6185 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6190 g_assert (!vtable_arg);
6193 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6195 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
6198 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6199 MONO_METHOD_IS_FINAL (cmethod)) {
6206 if (pass_imt_from_rgctx) {
6207 g_assert (!pass_vtable);
6210 imt_arg = emit_get_rgctx_method (cfg, context_used,
6211 cmethod, MONO_RGCTX_INFO_METHOD);
6217 MONO_INST_NEW (cfg, check, OP_CHECK_THIS);
6218 check->sreg1 = sp [0]->dreg;
6219 MONO_ADD_INS (cfg->cbb, check);
6222 /* Calling virtual generic methods */
6223 if (cmethod && virtual &&
6224 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6225 !(MONO_METHOD_IS_FINAL (cmethod) &&
6226 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6227 mono_method_signature (cmethod)->generic_param_count) {
6228 MonoInst *this_temp, *this_arg_temp, *store;
6229 MonoInst *iargs [4];
6231 g_assert (mono_method_signature (cmethod)->is_inflated);
6233 /* Prevent inlining of methods that contain indirect calls */
6236 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6237 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6238 g_assert (!imt_arg);
6240 imt_arg = emit_get_rgctx_method (cfg, context_used,
6241 cmethod, MONO_RGCTX_INFO_METHOD);
6244 g_assert (cmethod->is_inflated);
6245 EMIT_NEW_METHODCONST (cfg, imt_arg, cmethod);
6247 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6251 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6252 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6253 MONO_ADD_INS (bblock, store);
6255 /* FIXME: This should be a managed pointer */
6256 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6258 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6260 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6261 cmethod, MONO_RGCTX_INFO_METHOD);
6262 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6263 addr = mono_emit_jit_icall (cfg,
6264 mono_helper_compile_generic_method, iargs);
6266 EMIT_NEW_METHODCONST (cfg, iargs [1], cmethod);
6267 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6268 addr = mono_emit_jit_icall (cfg, mono_helper_compile_generic_method, iargs);
6271 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6273 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6276 if (!MONO_TYPE_IS_VOID (fsig->ret))
6277 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6285 /* FIXME: runtime generic context pointer for jumps? */
6286 /* FIXME: handle this for generic sharing eventually */
6287 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
6288 (mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod))) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret)) {
6291 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6294 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6295 call->tail_call = TRUE;
6296 call->method = cmethod;
6297 call->signature = mono_method_signature (cmethod);
6300 /* Handle tail calls similarly to calls */
6301 call->inst.opcode = OP_TAILCALL;
6303 mono_arch_emit_call (cfg, call);
6306 * We implement tail calls by storing the actual arguments into the
6307 * argument variables, then emitting a CEE_JMP.
6309 for (i = 0; i < n; ++i) {
6310 /* Prevent argument from being register allocated */
6311 arg_array [i]->flags |= MONO_INST_VOLATILE;
6312 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6316 ins = (MonoInst*)call;
6317 ins->inst_p0 = cmethod;
6318 ins->inst_p1 = arg_array [0];
6319 MONO_ADD_INS (bblock, ins);
6320 link_bblock (cfg, bblock, end_bblock);
6321 start_new_bblock = 1;
6322 /* skip CEE_RET as well */
6328 /* Conversion to a JIT intrinsic */
6329 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6330 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6331 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6342 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6343 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6344 mono_method_check_inlining (cfg, cmethod) &&
6345 !g_list_find (dont_inline, cmethod)) {
6347 gboolean allways = FALSE;
6349 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6350 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6351 /* Prevent inlining of methods that call wrappers */
6353 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6357 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6359 cfg->real_offset += 5;
6362 if (!MONO_TYPE_IS_VOID (fsig->ret))
6363 /* *sp is already set by inline_method */
6366 inline_costs += costs;
6372 inline_costs += 10 * num_calls++;
6374 /* Tail recursion elimination */
6375 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6376 gboolean has_vtargs = FALSE;
6379 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6382 /* keep it simple */
6383 for (i = fsig->param_count - 1; i >= 0; i--) {
6384 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6389 for (i = 0; i < n; ++i)
6390 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6391 MONO_INST_NEW (cfg, ins, OP_BR);
6392 MONO_ADD_INS (bblock, ins);
6393 tblock = start_bblock->out_bb [0];
6394 link_bblock (cfg, bblock, tblock);
6395 ins->inst_target_bb = tblock;
6396 start_new_bblock = 1;
6398 /* skip the CEE_RET, too */
6399 if (ip_in_bb (cfg, bblock, ip + 5))
6409 /* Generic sharing */
6410 /* FIXME: only do this for generic methods if
6411 they are not shared! */
6412 if (context_used && !imt_arg && !array_rank &&
6413 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6414 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6415 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6416 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6419 g_assert (cfg->generic_sharing_context && cmethod);
6423 * We are compiling a call to a
6424 * generic method from shared code,
6425 * which means that we have to look up
6426 * the method in the rgctx and do an
6429 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6432 /* Indirect calls */
6434 g_assert (!imt_arg);
6436 if (*ip == CEE_CALL)
6437 g_assert (context_used);
6438 else if (*ip == CEE_CALLI)
6439 g_assert (!vtable_arg);
6441 /* FIXME: what the hell is this??? */
6442 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6443 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6445 /* Prevent inlining of methods with indirect calls */
6449 #ifdef MONO_ARCH_RGCTX_REG
6451 int rgctx_reg = mono_alloc_preg (cfg);
6453 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6454 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6455 call = (MonoCallInst*)ins;
6456 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6457 cfg->uses_rgctx_reg = TRUE;
6458 call->rgctx_reg = TRUE;
6463 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6465 * Instead of emitting an indirect call, emit a direct call
6466 * with the contents of the aotconst as the patch info.
6468 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6471 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6474 if (!MONO_TYPE_IS_VOID (fsig->ret))
6475 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6486 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6487 if (sp [fsig->param_count]->type == STACK_OBJ) {
6488 MonoInst *iargs [2];
6491 iargs [1] = sp [fsig->param_count];
6493 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6496 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6497 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6498 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6499 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6501 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6504 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6505 if (!cmethod->klass->element_class->valuetype && !readonly)
6506 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6509 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6512 g_assert_not_reached ();
6520 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6522 if (!MONO_TYPE_IS_VOID (fsig->ret))
6523 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6533 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6535 } else if (imt_arg) {
6536 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6538 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6541 if (!MONO_TYPE_IS_VOID (fsig->ret))
6542 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6549 if (cfg->method != method) {
6550 /* return from inlined method */
6552 * If in_count == 0, that means the ret is unreachable due to
6553 * being preceeded by a throw. In that case, inline_method () will
6554 * handle setting the return value
6555 * (test case: test_0_inline_throw ()).
6557 if (return_var && cfg->cbb->in_count) {
6561 //g_assert (returnvar != -1);
6562 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6563 cfg->ret_var_set = TRUE;
6567 MonoType *ret_type = mono_method_signature (method)->ret;
6569 g_assert (!return_var);
6572 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6575 if (!cfg->vret_addr) {
6578 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6580 EMIT_NEW_RETLOADA (cfg, ret_addr);
6582 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6583 ins->klass = mono_class_from_mono_type (ret_type);
6586 #ifdef MONO_ARCH_SOFT_FLOAT
6587 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6588 MonoInst *iargs [1];
6592 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6593 mono_arch_emit_setret (cfg, method, conv);
6595 mono_arch_emit_setret (cfg, method, *sp);
6598 mono_arch_emit_setret (cfg, method, *sp);
6603 if (sp != stack_start)
6605 MONO_INST_NEW (cfg, ins, OP_BR);
6607 ins->inst_target_bb = end_bblock;
6608 MONO_ADD_INS (bblock, ins);
6609 link_bblock (cfg, bblock, end_bblock);
6610 start_new_bblock = 1;
6614 MONO_INST_NEW (cfg, ins, OP_BR);
6616 target = ip + 1 + (signed char)(*ip);
6618 GET_BBLOCK (cfg, tblock, target);
6619 link_bblock (cfg, bblock, tblock);
6620 ins->inst_target_bb = tblock;
6621 if (sp != stack_start) {
6622 handle_stack_args (cfg, stack_start, sp - stack_start);
6624 CHECK_UNVERIFIABLE (cfg);
6626 MONO_ADD_INS (bblock, ins);
6627 start_new_bblock = 1;
6628 inline_costs += BRANCH_COST;
6642 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6644 target = ip + 1 + *(signed char*)ip;
6650 inline_costs += BRANCH_COST;
6654 MONO_INST_NEW (cfg, ins, OP_BR);
6657 target = ip + 4 + (gint32)read32(ip);
6659 GET_BBLOCK (cfg, tblock, target);
6660 link_bblock (cfg, bblock, tblock);
6661 ins->inst_target_bb = tblock;
6662 if (sp != stack_start) {
6663 handle_stack_args (cfg, stack_start, sp - stack_start);
6665 CHECK_UNVERIFIABLE (cfg);
6668 MONO_ADD_INS (bblock, ins);
6670 start_new_bblock = 1;
6671 inline_costs += BRANCH_COST;
6678 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
6679 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
6680 guint32 opsize = is_short ? 1 : 4;
6682 CHECK_OPSIZE (opsize);
6684 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
6687 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
6692 GET_BBLOCK (cfg, tblock, target);
6693 link_bblock (cfg, bblock, tblock);
6694 GET_BBLOCK (cfg, tblock, ip);
6695 link_bblock (cfg, bblock, tblock);
6697 if (sp != stack_start) {
6698 handle_stack_args (cfg, stack_start, sp - stack_start);
6699 CHECK_UNVERIFIABLE (cfg);
6702 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
6703 cmp->sreg1 = sp [0]->dreg;
6704 type_from_op (cmp, sp [0], NULL);
6707 #if SIZEOF_REGISTER == 4
6708 if (cmp->opcode == OP_LCOMPARE_IMM) {
6709 /* Convert it to OP_LCOMPARE */
6710 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6711 ins->type = STACK_I8;
6712 ins->dreg = alloc_dreg (cfg, STACK_I8);
6714 MONO_ADD_INS (bblock, ins);
6715 cmp->opcode = OP_LCOMPARE;
6716 cmp->sreg2 = ins->dreg;
6719 MONO_ADD_INS (bblock, cmp);
6721 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
6722 type_from_op (ins, sp [0], NULL);
6723 MONO_ADD_INS (bblock, ins);
6724 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
6725 GET_BBLOCK (cfg, tblock, target);
6726 ins->inst_true_bb = tblock;
6727 GET_BBLOCK (cfg, tblock, ip);
6728 ins->inst_false_bb = tblock;
6729 start_new_bblock = 2;
6732 inline_costs += BRANCH_COST;
6747 MONO_INST_NEW (cfg, ins, *ip);
6749 target = ip + 4 + (gint32)read32(ip);
6755 inline_costs += BRANCH_COST;
6759 MonoBasicBlock **targets;
6760 MonoBasicBlock *default_bblock;
6761 MonoJumpInfoBBTable *table;
6762 int offset_reg = alloc_preg (cfg);
6763 int target_reg = alloc_preg (cfg);
6764 int table_reg = alloc_preg (cfg);
6765 int sum_reg = alloc_preg (cfg);
6766 gboolean use_op_switch;
6770 n = read32 (ip + 1);
6773 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
6777 CHECK_OPSIZE (n * sizeof (guint32));
6778 target = ip + n * sizeof (guint32);
6780 GET_BBLOCK (cfg, default_bblock, target);
6782 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
6783 for (i = 0; i < n; ++i) {
6784 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
6785 targets [i] = tblock;
6789 if (sp != stack_start) {
6791 * Link the current bb with the targets as well, so handle_stack_args
6792 * will set their in_stack correctly.
6794 link_bblock (cfg, bblock, default_bblock);
6795 for (i = 0; i < n; ++i)
6796 link_bblock (cfg, bblock, targets [i]);
6798 handle_stack_args (cfg, stack_start, sp - stack_start);
6800 CHECK_UNVERIFIABLE (cfg);
6803 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
6804 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
6807 for (i = 0; i < n; ++i)
6808 link_bblock (cfg, bblock, targets [i]);
6810 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
6811 table->table = targets;
6812 table->table_size = n;
6814 use_op_switch = FALSE;
6816 /* ARM implements SWITCH statements differently */
6817 /* FIXME: Make it use the generic implementation */
6818 if (!cfg->compile_aot)
6819 use_op_switch = TRUE;
6822 if (COMPILE_LLVM (cfg))
6823 use_op_switch = TRUE;
6825 if (use_op_switch) {
6826 MONO_INST_NEW (cfg, ins, OP_SWITCH);
6827 ins->sreg1 = src1->dreg;
6828 ins->inst_p0 = table;
6829 ins->inst_many_bb = targets;
6830 ins->klass = GUINT_TO_POINTER (n);
6831 MONO_ADD_INS (cfg->cbb, ins);
6833 if (sizeof (gpointer) == 8)
6834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
6836 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
6838 #if SIZEOF_REGISTER == 8
6839 /* The upper word might not be zero, and we add it to a 64 bit address later */
6840 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
6843 if (cfg->compile_aot) {
6844 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
6846 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
6847 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
6848 ins->inst_p0 = table;
6849 ins->dreg = table_reg;
6850 MONO_ADD_INS (cfg->cbb, ins);
6853 /* FIXME: Use load_memindex */
6854 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
6855 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
6856 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
6858 start_new_bblock = 1;
6859 inline_costs += (BRANCH_COST * 2);
6879 dreg = alloc_freg (cfg);
6882 dreg = alloc_lreg (cfg);
6885 dreg = alloc_preg (cfg);
6888 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
6889 ins->type = ldind_type [*ip - CEE_LDIND_I1];
6890 ins->flags |= ins_flag;
6892 MONO_ADD_INS (bblock, ins);
6907 #if HAVE_WRITE_BARRIERS
6908 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
6909 /* insert call to write barrier */
6910 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
6911 mono_emit_method_call (cfg, write_barrier, sp, NULL);
6918 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
6919 ins->flags |= ins_flag;
6921 MONO_ADD_INS (bblock, ins);
6929 MONO_INST_NEW (cfg, ins, (*ip));
6931 ins->sreg1 = sp [0]->dreg;
6932 ins->sreg2 = sp [1]->dreg;
6933 type_from_op (ins, sp [0], sp [1]);
6935 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6937 /* Use the immediate opcodes if possible */
6938 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
6939 int imm_opcode = mono_op_to_op_imm (ins->opcode);
6940 if (imm_opcode != -1) {
6941 ins->opcode = imm_opcode;
6942 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6945 sp [1]->opcode = OP_NOP;
6949 MONO_ADD_INS ((cfg)->cbb, (ins));
6951 *sp++ = mono_decompose_opcode (cfg, ins);
6968 MONO_INST_NEW (cfg, ins, (*ip));
6970 ins->sreg1 = sp [0]->dreg;
6971 ins->sreg2 = sp [1]->dreg;
6972 type_from_op (ins, sp [0], sp [1]);
6974 ADD_WIDEN_OP (ins, sp [0], sp [1]);
6975 ins->dreg = alloc_dreg ((cfg), (ins)->type);
6977 /* FIXME: Pass opcode to is_inst_imm */
6979 /* Use the immediate opcodes if possible */
6980 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
6983 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
6984 if (imm_opcode != -1) {
6985 ins->opcode = imm_opcode;
6986 if (sp [1]->opcode == OP_I8CONST) {
6987 #if SIZEOF_REGISTER == 8
6988 ins->inst_imm = sp [1]->inst_l;
6990 ins->inst_ls_word = sp [1]->inst_ls_word;
6991 ins->inst_ms_word = sp [1]->inst_ms_word;
6995 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
6998 /* Might be followed by an instruction added by ADD_WIDEN_OP */
6999 if (sp [1]->next == NULL)
7000 sp [1]->opcode = OP_NOP;
7003 MONO_ADD_INS ((cfg)->cbb, (ins));
7005 *sp++ = mono_decompose_opcode (cfg, ins);
7018 case CEE_CONV_OVF_I8:
7019 case CEE_CONV_OVF_U8:
7023 /* Special case this earlier so we have long constants in the IR */
7024 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7025 int data = sp [-1]->inst_c0;
7026 sp [-1]->opcode = OP_I8CONST;
7027 sp [-1]->type = STACK_I8;
7028 #if SIZEOF_REGISTER == 8
7029 if ((*ip) == CEE_CONV_U8)
7030 sp [-1]->inst_c0 = (guint32)data;
7032 sp [-1]->inst_c0 = data;
7034 sp [-1]->inst_ls_word = data;
7035 if ((*ip) == CEE_CONV_U8)
7036 sp [-1]->inst_ms_word = 0;
7038 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7040 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7047 case CEE_CONV_OVF_I4:
7048 case CEE_CONV_OVF_I1:
7049 case CEE_CONV_OVF_I2:
7050 case CEE_CONV_OVF_I:
7051 case CEE_CONV_OVF_U:
7054 if (sp [-1]->type == STACK_R8) {
7055 ADD_UNOP (CEE_CONV_OVF_I8);
7062 case CEE_CONV_OVF_U1:
7063 case CEE_CONV_OVF_U2:
7064 case CEE_CONV_OVF_U4:
7067 if (sp [-1]->type == STACK_R8) {
7068 ADD_UNOP (CEE_CONV_OVF_U8);
7075 case CEE_CONV_OVF_I1_UN:
7076 case CEE_CONV_OVF_I2_UN:
7077 case CEE_CONV_OVF_I4_UN:
7078 case CEE_CONV_OVF_I8_UN:
7079 case CEE_CONV_OVF_U1_UN:
7080 case CEE_CONV_OVF_U2_UN:
7081 case CEE_CONV_OVF_U4_UN:
7082 case CEE_CONV_OVF_U8_UN:
7083 case CEE_CONV_OVF_I_UN:
7084 case CEE_CONV_OVF_U_UN:
7094 case CEE_ADD_OVF_UN:
7096 case CEE_MUL_OVF_UN:
7098 case CEE_SUB_OVF_UN:
7106 token = read32 (ip + 1);
7107 klass = mini_get_class (method, token, generic_context);
7108 CHECK_TYPELOAD (klass);
7110 if (generic_class_is_reference_type (cfg, klass)) {
7111 MonoInst *store, *load;
7112 int dreg = alloc_preg (cfg);
7114 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7115 load->flags |= ins_flag;
7116 MONO_ADD_INS (cfg->cbb, load);
7118 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7119 store->flags |= ins_flag;
7120 MONO_ADD_INS (cfg->cbb, store);
7122 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7134 token = read32 (ip + 1);
7135 klass = mini_get_class (method, token, generic_context);
7136 CHECK_TYPELOAD (klass);
7138 /* Optimize the common ldobj+stloc combination */
7148 loc_index = ip [5] - CEE_STLOC_0;
7155 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7156 CHECK_LOCAL (loc_index);
7158 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7159 ins->dreg = cfg->locals [loc_index]->dreg;
7165 /* Optimize the ldobj+stobj combination */
7166 /* The reference case ends up being a load+store anyway */
7167 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 9) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7172 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7179 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7188 CHECK_STACK_OVF (1);
7190 n = read32 (ip + 1);
7192 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7193 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7194 ins->type = STACK_OBJ;
7197 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7198 MonoInst *iargs [1];
7200 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7201 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7203 if (cfg->opt & MONO_OPT_SHARED) {
7204 MonoInst *iargs [3];
7206 if (cfg->compile_aot) {
7207 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7209 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7210 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7211 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7212 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7213 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7215 if (bblock->out_of_line) {
7216 MonoInst *iargs [2];
7218 if (image == mono_defaults.corlib) {
7220 * Avoid relocations in AOT and save some space by using a
7221 * version of helper_ldstr specialized to mscorlib.
7223 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7224 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7226 /* Avoid creating the string object */
7227 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7228 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7229 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7233 if (cfg->compile_aot) {
7234 NEW_LDSTRCONST (cfg, ins, image, n);
7236 MONO_ADD_INS (bblock, ins);
7239 NEW_PCONST (cfg, ins, NULL);
7240 ins->type = STACK_OBJ;
7241 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7243 MONO_ADD_INS (bblock, ins);
7252 MonoInst *iargs [2];
7253 MonoMethodSignature *fsig;
7256 MonoInst *vtable_arg = NULL;
7259 token = read32 (ip + 1);
7260 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7263 fsig = mono_method_get_signature (cmethod, image, token);
7265 mono_save_token_info (cfg, image, token, cmethod);
7267 if (!mono_class_init (cmethod->klass))
7270 if (cfg->generic_sharing_context)
7271 context_used = mono_method_check_context_used (cmethod);
7273 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7274 if (check_linkdemand (cfg, method, cmethod))
7276 CHECK_CFG_EXCEPTION;
7277 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7278 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7281 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7282 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7283 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7285 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7286 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7288 EMIT_NEW_METHOD_RGCTX_CONST (cfg, vtable_arg, cmethod);
7292 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7293 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7295 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7297 CHECK_TYPELOAD (cmethod->klass);
7298 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7303 n = fsig->param_count;
7307 * Generate smaller code for the common newobj <exception> instruction in
7308 * argument checking code.
7310 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7311 is_exception_class (cmethod->klass) && n <= 2 &&
7312 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7313 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7314 MonoInst *iargs [3];
7316 g_assert (!vtable_arg);
7320 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7323 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7327 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7332 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7335 g_assert_not_reached ();
7343 /* move the args to allow room for 'this' in the first position */
7349 /* check_call_signature () requires sp[0] to be set */
7350 this_ins.type = STACK_OBJ;
7352 if (check_call_signature (cfg, fsig, sp))
7357 if (mini_class_is_system_array (cmethod->klass)) {
7358 g_assert (!vtable_arg);
7361 *sp = emit_get_rgctx_method (cfg, context_used,
7362 cmethod, MONO_RGCTX_INFO_METHOD);
7364 EMIT_NEW_METHODCONST (cfg, *sp, cmethod);
7367 /* Avoid varargs in the common case */
7368 if (fsig->param_count == 1)
7369 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7370 else if (fsig->param_count == 2)
7371 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7373 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7374 } else if (cmethod->string_ctor) {
7375 g_assert (!context_used);
7376 g_assert (!vtable_arg);
7377 /* we simply pass a null pointer */
7378 EMIT_NEW_PCONST (cfg, *sp, NULL);
7379 /* now call the string ctor */
7380 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7382 MonoInst* callvirt_this_arg = NULL;
7384 if (cmethod->klass->valuetype) {
7385 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7386 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7387 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7392 * The code generated by mini_emit_virtual_call () expects
7393 * iargs [0] to be a boxed instance, but luckily the vcall
7394 * will be transformed into a normal call there.
7396 } else if (context_used) {
7400 if (cfg->opt & MONO_OPT_SHARED)
7401 rgctx_info = MONO_RGCTX_INFO_KLASS;
7403 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7404 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7406 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7409 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7411 CHECK_TYPELOAD (cmethod->klass);
7414 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7415 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7416 * As a workaround, we call class cctors before allocating objects.
7418 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7419 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7420 if (cfg->verbose_level > 2)
7421 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7422 class_inits = g_slist_prepend (class_inits, vtable);
7425 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7430 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7432 /* Now call the actual ctor */
7433 /* Avoid virtual calls to ctors if possible */
7434 if (cmethod->klass->marshalbyref)
7435 callvirt_this_arg = sp [0];
7437 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7438 mono_method_check_inlining (cfg, cmethod) &&
7439 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7440 !g_list_find (dont_inline, cmethod)) {
7443 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7444 cfg->real_offset += 5;
7447 inline_costs += costs - 5;
7450 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7452 } else if (context_used &&
7453 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7454 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7455 MonoInst *cmethod_addr;
7457 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7458 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7460 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7463 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7464 callvirt_this_arg, NULL, vtable_arg);
7465 if (mono_method_is_generic_sharable_impl (cmethod, TRUE) && ((MonoCallInst*)ins)->method->wrapper_type == MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)
7466 GENERIC_SHARING_FAILURE (*ip);
7470 if (alloc == NULL) {
7472 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7473 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7487 token = read32 (ip + 1);
7488 klass = mini_get_class (method, token, generic_context);
7489 CHECK_TYPELOAD (klass);
7490 if (sp [0]->type != STACK_OBJ)
7493 if (cfg->generic_sharing_context)
7494 context_used = mono_class_check_context_used (klass);
7503 args [1] = emit_get_rgctx_klass (cfg, context_used,
7504 klass, MONO_RGCTX_INFO_KLASS);
7506 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7510 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7511 MonoMethod *mono_castclass;
7512 MonoInst *iargs [1];
7515 mono_castclass = mono_marshal_get_castclass (klass);
7518 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7519 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7520 g_assert (costs > 0);
7523 cfg->real_offset += 5;
7528 inline_costs += costs;
7531 ins = handle_castclass (cfg, klass, *sp);
7541 token = read32 (ip + 1);
7542 klass = mini_get_class (method, token, generic_context);
7543 CHECK_TYPELOAD (klass);
7544 if (sp [0]->type != STACK_OBJ)
7547 if (cfg->generic_sharing_context)
7548 context_used = mono_class_check_context_used (klass);
7557 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7559 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7563 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7564 MonoMethod *mono_isinst;
7565 MonoInst *iargs [1];
7568 mono_isinst = mono_marshal_get_isinst (klass);
7571 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7572 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7573 g_assert (costs > 0);
7576 cfg->real_offset += 5;
7581 inline_costs += costs;
7584 ins = handle_isinst (cfg, klass, *sp);
7591 case CEE_UNBOX_ANY: {
7595 token = read32 (ip + 1);
7596 klass = mini_get_class (method, token, generic_context);
7597 CHECK_TYPELOAD (klass);
7599 mono_save_token_info (cfg, image, token, klass);
7601 if (cfg->generic_sharing_context)
7602 context_used = mono_class_check_context_used (klass);
7604 if (generic_class_is_reference_type (cfg, klass)) {
7607 MonoInst *iargs [2];
7612 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7613 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7617 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7618 MonoMethod *mono_castclass;
7619 MonoInst *iargs [1];
7622 mono_castclass = mono_marshal_get_castclass (klass);
7625 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7626 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7628 g_assert (costs > 0);
7631 cfg->real_offset += 5;
7635 inline_costs += costs;
7637 ins = handle_castclass (cfg, klass, *sp);
7645 if (mono_class_is_nullable (klass)) {
7646 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
7653 ins = handle_unbox (cfg, klass, sp, context_used);
7659 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7672 token = read32 (ip + 1);
7673 klass = mini_get_class (method, token, generic_context);
7674 CHECK_TYPELOAD (klass);
7676 mono_save_token_info (cfg, image, token, klass);
7678 if (cfg->generic_sharing_context)
7679 context_used = mono_class_check_context_used (klass);
7681 if (generic_class_is_reference_type (cfg, klass)) {
7687 if (klass == mono_defaults.void_class)
7689 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
7691 /* frequent check in generic code: box (struct), brtrue */
7692 if (!mono_class_is_nullable (klass) &&
7693 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
7694 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
7696 MONO_INST_NEW (cfg, ins, OP_BR);
7697 if (*ip == CEE_BRTRUE_S) {
7700 target = ip + 1 + (signed char)(*ip);
7705 target = ip + 4 + (gint)(read32 (ip));
7708 GET_BBLOCK (cfg, tblock, target);
7709 link_bblock (cfg, bblock, tblock);
7710 ins->inst_target_bb = tblock;
7711 GET_BBLOCK (cfg, tblock, ip);
7713 * This leads to some inconsistency, since the two bblocks are
7714 * not really connected, but it is needed for handling stack
7715 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
7716 * FIXME: This should only be needed if sp != stack_start, but that
7717 * doesn't work for some reason (test failure in mcs/tests on x86).
7719 link_bblock (cfg, bblock, tblock);
7720 if (sp != stack_start) {
7721 handle_stack_args (cfg, stack_start, sp - stack_start);
7723 CHECK_UNVERIFIABLE (cfg);
7725 MONO_ADD_INS (bblock, ins);
7726 start_new_bblock = 1;
7734 if (cfg->opt & MONO_OPT_SHARED)
7735 rgctx_info = MONO_RGCTX_INFO_KLASS;
7737 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7738 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
7739 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
7741 *sp++ = handle_box (cfg, val, klass);
7752 token = read32 (ip + 1);
7753 klass = mini_get_class (method, token, generic_context);
7754 CHECK_TYPELOAD (klass);
7756 mono_save_token_info (cfg, image, token, klass);
7758 if (cfg->generic_sharing_context)
7759 context_used = mono_class_check_context_used (klass);
7761 if (mono_class_is_nullable (klass)) {
7764 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
7765 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
7769 ins = handle_unbox (cfg, klass, sp, context_used);
7779 MonoClassField *field;
7783 if (*ip == CEE_STFLD) {
7790 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
7792 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
7795 token = read32 (ip + 1);
7796 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7797 field = mono_method_get_wrapper_data (method, token);
7798 klass = field->parent;
7801 field = mono_field_from_token (image, token, &klass, generic_context);
7805 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7806 FIELD_ACCESS_FAILURE;
7807 mono_class_init (klass);
7809 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
7810 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
7811 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7812 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7815 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
7816 if (*ip == CEE_STFLD) {
7817 if (target_type_is_incompatible (cfg, field->type, sp [1]))
7819 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7820 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
7821 MonoInst *iargs [5];
7824 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7825 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7826 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
7830 if (cfg->opt & MONO_OPT_INLINE) {
7831 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
7832 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7833 g_assert (costs > 0);
7835 cfg->real_offset += 5;
7838 inline_costs += costs;
7840 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
7845 #if HAVE_WRITE_BARRIERS
7846 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
7847 /* insert call to write barrier */
7848 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7849 MonoInst *iargs [2];
7852 dreg = alloc_preg (cfg);
7853 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7855 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
7859 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
7861 store->flags |= ins_flag;
7868 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
7869 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
7870 MonoInst *iargs [4];
7873 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
7874 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
7875 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
7876 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
7877 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
7878 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7880 g_assert (costs > 0);
7882 cfg->real_offset += 5;
7886 inline_costs += costs;
7888 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
7892 if (sp [0]->type == STACK_VTYPE) {
7895 /* Have to compute the address of the variable */
7897 var = get_vreg_to_inst (cfg, sp [0]->dreg);
7899 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
7901 g_assert (var->klass == klass);
7903 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
7907 if (*ip == CEE_LDFLDA) {
7908 dreg = alloc_preg (cfg);
7910 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
7911 ins->klass = mono_class_from_mono_type (field->type);
7912 ins->type = STACK_MP;
7917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
7918 load->flags |= ins_flag;
7929 MonoClassField *field;
7930 gpointer addr = NULL;
7931 gboolean is_special_static;
7934 token = read32 (ip + 1);
7936 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7937 field = mono_method_get_wrapper_data (method, token);
7938 klass = field->parent;
7941 field = mono_field_from_token (image, token, &klass, generic_context);
7944 mono_class_init (klass);
7945 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
7946 FIELD_ACCESS_FAILURE;
7948 /* if the class is Critical then transparent code cannot access it's fields */
7949 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
7950 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
7953 * We can only support shared generic static
7954 * field access on architectures where the
7955 * trampoline code has been extended to handle
7956 * the generic class init.
7958 #ifndef MONO_ARCH_VTABLE_REG
7959 GENERIC_SHARING_FAILURE (*ip);
7962 if (cfg->generic_sharing_context)
7963 context_used = mono_class_check_context_used (klass);
7965 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
7967 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
7968 * to be called here.
7970 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
7971 mono_class_vtable (cfg->domain, klass);
7972 CHECK_TYPELOAD (klass);
7974 mono_domain_lock (cfg->domain);
7975 if (cfg->domain->special_static_fields)
7976 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
7977 mono_domain_unlock (cfg->domain);
7979 is_special_static = mono_class_field_is_special_static (field);
7981 /* Generate IR to compute the field address */
7983 if ((cfg->opt & MONO_OPT_SHARED) ||
7984 (cfg->compile_aot && is_special_static) ||
7985 (context_used && is_special_static)) {
7986 MonoInst *iargs [2];
7988 g_assert (field->parent);
7989 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7991 iargs [1] = emit_get_rgctx_field (cfg, context_used,
7992 field, MONO_RGCTX_INFO_CLASS_FIELD);
7994 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
7996 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
7997 } else if (context_used) {
7998 MonoInst *static_data;
8001 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8002 method->klass->name_space, method->klass->name, method->name,
8003 depth, field->offset);
8006 if (mono_class_needs_cctor_run (klass, method)) {
8010 vtable = emit_get_rgctx_klass (cfg, context_used,
8011 klass, MONO_RGCTX_INFO_VTABLE);
8013 // FIXME: This doesn't work since it tries to pass the argument
8014 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8016 * The vtable pointer is always passed in a register regardless of
8017 * the calling convention, so assign it manually, and make a call
8018 * using a signature without parameters.
8020 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8021 #ifdef MONO_ARCH_VTABLE_REG
8022 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8023 cfg->uses_vtable_reg = TRUE;
8030 * The pointer we're computing here is
8032 * super_info.static_data + field->offset
8034 static_data = emit_get_rgctx_klass (cfg, context_used,
8035 klass, MONO_RGCTX_INFO_STATIC_DATA);
8037 if (field->offset == 0) {
8040 int addr_reg = mono_alloc_preg (cfg);
8041 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8043 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8044 MonoInst *iargs [2];
8046 g_assert (field->parent);
8047 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8048 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8049 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8051 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8053 CHECK_TYPELOAD (klass);
8055 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8056 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8057 if (cfg->verbose_level > 2)
8058 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8059 class_inits = g_slist_prepend (class_inits, vtable);
8061 if (cfg->run_cctors) {
8063 /* This makes so that inline cannot trigger */
8064 /* .cctors: too many apps depend on them */
8065 /* running with a specific order... */
8066 if (! vtable->initialized)
8068 ex = mono_runtime_class_init_full (vtable, FALSE);
8070 set_exception_object (cfg, ex);
8071 goto exception_exit;
8075 addr = (char*)vtable->data + field->offset;
8077 if (cfg->compile_aot)
8078 EMIT_NEW_SFLDACONST (cfg, ins, field);
8080 EMIT_NEW_PCONST (cfg, ins, addr);
8083 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8084 * This could be later optimized to do just a couple of
8085 * memory dereferences with constant offsets.
8087 MonoInst *iargs [1];
8088 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8089 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8093 /* Generate IR to do the actual load/store operation */
8095 if (*ip == CEE_LDSFLDA) {
8096 ins->klass = mono_class_from_mono_type (field->type);
8097 ins->type = STACK_PTR;
8099 } else if (*ip == CEE_STSFLD) {
8104 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8105 store->flags |= ins_flag;
8107 gboolean is_const = FALSE;
8108 MonoVTable *vtable = NULL;
8110 if (!context_used) {
8111 vtable = mono_class_vtable (cfg->domain, klass);
8112 CHECK_TYPELOAD (klass);
8114 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8115 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8116 gpointer addr = (char*)vtable->data + field->offset;
8117 int ro_type = field->type->type;
8118 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8119 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8121 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8124 case MONO_TYPE_BOOLEAN:
8126 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8130 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8133 case MONO_TYPE_CHAR:
8135 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8139 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8144 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8148 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8151 #ifndef HAVE_MOVING_COLLECTOR
8154 case MONO_TYPE_STRING:
8155 case MONO_TYPE_OBJECT:
8156 case MONO_TYPE_CLASS:
8157 case MONO_TYPE_SZARRAY:
8159 case MONO_TYPE_FNPTR:
8160 case MONO_TYPE_ARRAY:
8161 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8162 type_to_eval_stack_type ((cfg), field->type, *sp);
8168 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8173 case MONO_TYPE_VALUETYPE:
8183 CHECK_STACK_OVF (1);
8185 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8186 load->flags |= ins_flag;
8199 token = read32 (ip + 1);
8200 klass = mini_get_class (method, token, generic_context);
8201 CHECK_TYPELOAD (klass);
8202 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8203 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8214 const char *data_ptr;
8216 guint32 field_token;
8222 token = read32 (ip + 1);
8224 klass = mini_get_class (method, token, generic_context);
8225 CHECK_TYPELOAD (klass);
8227 if (cfg->generic_sharing_context)
8228 context_used = mono_class_check_context_used (klass);
8233 /* FIXME: Decompose later to help abcrem */
8236 args [0] = emit_get_rgctx_klass (cfg, context_used,
8237 mono_array_class_get (klass, 1), MONO_RGCTX_INFO_VTABLE);
8242 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8244 if (cfg->opt & MONO_OPT_SHARED) {
8245 /* Decompose now to avoid problems with references to the domainvar */
8246 MonoInst *iargs [3];
8248 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8249 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8252 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8254 /* Decompose later since it is needed by abcrem */
8255 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8256 ins->dreg = alloc_preg (cfg);
8257 ins->sreg1 = sp [0]->dreg;
8258 ins->inst_newa_class = klass;
8259 ins->type = STACK_OBJ;
8261 MONO_ADD_INS (cfg->cbb, ins);
8262 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8263 cfg->cbb->has_array_access = TRUE;
8265 /* Needed so mono_emit_load_get_addr () gets called */
8266 mono_get_got_var (cfg);
8276 * we inline/optimize the initialization sequence if possible.
8277 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8278 * for small sizes open code the memcpy
8279 * ensure the rva field is big enough
8281 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8282 MonoMethod *memcpy_method = get_memcpy_method ();
8283 MonoInst *iargs [3];
8284 int add_reg = alloc_preg (cfg);
8286 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8287 if (cfg->compile_aot) {
8288 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8290 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8292 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8293 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8302 if (sp [0]->type != STACK_OBJ)
8305 dreg = alloc_preg (cfg);
8306 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8307 ins->dreg = alloc_preg (cfg);
8308 ins->sreg1 = sp [0]->dreg;
8309 ins->type = STACK_I4;
8310 MONO_ADD_INS (cfg->cbb, ins);
8311 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8312 cfg->cbb->has_array_access = TRUE;
8320 if (sp [0]->type != STACK_OBJ)
8323 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8325 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8326 CHECK_TYPELOAD (klass);
8327 /* we need to make sure that this array is exactly the type it needs
8328 * to be for correctness. the wrappers are lax with their usage
8329 * so we need to ignore them here
8331 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly)
8332 mini_emit_check_array_type (cfg, sp [0], mono_array_class_get (klass, 1));
8335 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8339 case CEE_LDELEM_ANY:
8350 case CEE_LDELEM_REF: {
8356 if (*ip == CEE_LDELEM_ANY) {
8358 token = read32 (ip + 1);
8359 klass = mini_get_class (method, token, generic_context);
8360 CHECK_TYPELOAD (klass);
8361 mono_class_init (klass);
8364 klass = array_access_to_klass (*ip);
8366 if (sp [0]->type != STACK_OBJ)
8369 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8371 if (sp [1]->opcode == OP_ICONST) {
8372 int array_reg = sp [0]->dreg;
8373 int index_reg = sp [1]->dreg;
8374 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8376 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8377 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8379 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8380 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8383 if (*ip == CEE_LDELEM_ANY)
8396 case CEE_STELEM_REF:
8397 case CEE_STELEM_ANY: {
8403 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8405 if (*ip == CEE_STELEM_ANY) {
8407 token = read32 (ip + 1);
8408 klass = mini_get_class (method, token, generic_context);
8409 CHECK_TYPELOAD (klass);
8410 mono_class_init (klass);
8413 klass = array_access_to_klass (*ip);
8415 if (sp [0]->type != STACK_OBJ)
8418 /* storing a NULL doesn't need any of the complex checks in stelemref */
8419 if (generic_class_is_reference_type (cfg, klass) &&
8420 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8421 MonoMethod* helper = mono_marshal_get_stelemref ();
8422 MonoInst *iargs [3];
8424 if (sp [0]->type != STACK_OBJ)
8426 if (sp [2]->type != STACK_OBJ)
8433 mono_emit_method_call (cfg, helper, iargs, NULL);
8435 if (sp [1]->opcode == OP_ICONST) {
8436 int array_reg = sp [0]->dreg;
8437 int index_reg = sp [1]->dreg;
8438 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8440 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8441 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8443 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8444 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8448 if (*ip == CEE_STELEM_ANY)
8455 case CEE_CKFINITE: {
8459 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8460 ins->sreg1 = sp [0]->dreg;
8461 ins->dreg = alloc_freg (cfg);
8462 ins->type = STACK_R8;
8463 MONO_ADD_INS (bblock, ins);
8465 *sp++ = mono_decompose_opcode (cfg, ins);
8470 case CEE_REFANYVAL: {
8471 MonoInst *src_var, *src;
8473 int klass_reg = alloc_preg (cfg);
8474 int dreg = alloc_preg (cfg);
8477 MONO_INST_NEW (cfg, ins, *ip);
8480 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8481 CHECK_TYPELOAD (klass);
8482 mono_class_init (klass);
8484 if (cfg->generic_sharing_context)
8485 context_used = mono_class_check_context_used (klass);
8488 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8490 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8491 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8492 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8495 MonoInst *klass_ins;
8497 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8498 klass, MONO_RGCTX_INFO_KLASS);
8501 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8502 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8504 mini_emit_class_check (cfg, klass_reg, klass);
8506 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8507 ins->type = STACK_MP;
8512 case CEE_MKREFANY: {
8513 MonoInst *loc, *addr;
8516 MONO_INST_NEW (cfg, ins, *ip);
8519 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8520 CHECK_TYPELOAD (klass);
8521 mono_class_init (klass);
8523 if (cfg->generic_sharing_context)
8524 context_used = mono_class_check_context_used (klass);
8526 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8527 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8530 MonoInst *const_ins;
8531 int type_reg = alloc_preg (cfg);
8533 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8536 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8537 } else if (cfg->compile_aot) {
8538 int const_reg = alloc_preg (cfg);
8539 int type_reg = alloc_preg (cfg);
8541 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8542 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8544 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8546 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8547 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8549 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8551 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8552 ins->type = STACK_VTYPE;
8553 ins->klass = mono_defaults.typed_reference_class;
8560 MonoClass *handle_class;
8562 CHECK_STACK_OVF (1);
8565 n = read32 (ip + 1);
8567 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8568 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8569 handle = mono_method_get_wrapper_data (method, n);
8570 handle_class = mono_method_get_wrapper_data (method, n + 1);
8571 if (handle_class == mono_defaults.typehandle_class)
8572 handle = &((MonoClass*)handle)->byval_arg;
8575 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8579 mono_class_init (handle_class);
8580 if (cfg->generic_sharing_context) {
8581 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8582 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8583 /* This case handles ldtoken
8584 of an open type, like for
8587 } else if (handle_class == mono_defaults.typehandle_class) {
8588 /* If we get a MONO_TYPE_CLASS
8589 then we need to provide the
8591 instantiation of it. */
8592 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8595 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8596 } else if (handle_class == mono_defaults.fieldhandle_class)
8597 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8598 else if (handle_class == mono_defaults.methodhandle_class)
8599 context_used = mono_method_check_context_used (handle);
8601 g_assert_not_reached ();
8604 if ((cfg->opt & MONO_OPT_SHARED) &&
8605 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8606 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8607 MonoInst *addr, *vtvar, *iargs [3];
8608 int method_context_used;
8610 if (cfg->generic_sharing_context)
8611 method_context_used = mono_method_check_context_used (method);
8613 method_context_used = 0;
8615 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8617 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
8618 EMIT_NEW_ICONST (cfg, iargs [1], n);
8619 if (method_context_used) {
8620 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
8621 method, MONO_RGCTX_INFO_METHOD);
8622 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
8624 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
8625 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
8627 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8629 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8631 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8633 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
8634 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
8635 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
8636 (cmethod->klass == mono_defaults.monotype_class->parent) &&
8637 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
8638 MonoClass *tclass = mono_class_from_mono_type (handle);
8640 mono_class_init (tclass);
8642 ins = emit_get_rgctx_klass (cfg, context_used,
8643 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
8644 } else if (cfg->compile_aot) {
8645 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
8647 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
8649 ins->type = STACK_OBJ;
8650 ins->klass = cmethod->klass;
8653 MonoInst *addr, *vtvar;
8655 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
8658 if (handle_class == mono_defaults.typehandle_class) {
8659 ins = emit_get_rgctx_klass (cfg, context_used,
8660 mono_class_from_mono_type (handle),
8661 MONO_RGCTX_INFO_TYPE);
8662 } else if (handle_class == mono_defaults.methodhandle_class) {
8663 ins = emit_get_rgctx_method (cfg, context_used,
8664 handle, MONO_RGCTX_INFO_METHOD);
8665 } else if (handle_class == mono_defaults.fieldhandle_class) {
8666 ins = emit_get_rgctx_field (cfg, context_used,
8667 handle, MONO_RGCTX_INFO_CLASS_FIELD);
8669 g_assert_not_reached ();
8671 } else if (cfg->compile_aot) {
8672 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
8674 EMIT_NEW_PCONST (cfg, ins, handle);
8676 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
8677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
8678 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
8688 MONO_INST_NEW (cfg, ins, OP_THROW);
8690 ins->sreg1 = sp [0]->dreg;
8692 bblock->out_of_line = TRUE;
8693 MONO_ADD_INS (bblock, ins);
8694 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
8695 MONO_ADD_INS (bblock, ins);
8698 link_bblock (cfg, bblock, end_bblock);
8699 start_new_bblock = 1;
8701 case CEE_ENDFINALLY:
8702 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
8703 MONO_ADD_INS (bblock, ins);
8705 start_new_bblock = 1;
8708 * Control will leave the method so empty the stack, otherwise
8709 * the next basic block will start with a nonempty stack.
8711 while (sp != stack_start) {
8719 if (*ip == CEE_LEAVE) {
8721 target = ip + 5 + (gint32)read32(ip + 1);
8724 target = ip + 2 + (signed char)(ip [1]);
8727 /* empty the stack */
8728 while (sp != stack_start) {
8733 * If this leave statement is in a catch block, check for a
8734 * pending exception, and rethrow it if necessary.
8736 for (i = 0; i < header->num_clauses; ++i) {
8737 MonoExceptionClause *clause = &header->clauses [i];
8740 * Use <= in the final comparison to handle clauses with multiple
8741 * leave statements, like in bug #78024.
8742 * The ordering of the exception clauses guarantees that we find the
8745 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len)) {
8747 MonoBasicBlock *dont_throw;
8752 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
8755 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
8757 NEW_BBLOCK (cfg, dont_throw);
8760 * Currently, we allways rethrow the abort exception, despite the
8761 * fact that this is not correct. See thread6.cs for an example.
8762 * But propagating the abort exception is more important than
8763 * getting the sematics right.
8765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
8766 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
8767 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
8769 MONO_START_BB (cfg, dont_throw);
8774 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
8776 for (tmp = handlers; tmp; tmp = tmp->next) {
8778 link_bblock (cfg, bblock, tblock);
8779 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
8780 ins->inst_target_bb = tblock;
8781 MONO_ADD_INS (bblock, ins);
8783 g_list_free (handlers);
8786 MONO_INST_NEW (cfg, ins, OP_BR);
8787 MONO_ADD_INS (bblock, ins);
8788 GET_BBLOCK (cfg, tblock, target);
8789 link_bblock (cfg, bblock, tblock);
8790 ins->inst_target_bb = tblock;
8791 start_new_bblock = 1;
8793 if (*ip == CEE_LEAVE)
8802 * Mono specific opcodes
8804 case MONO_CUSTOM_PREFIX: {
8806 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
8810 case CEE_MONO_ICALL: {
8812 MonoJitICallInfo *info;
8814 token = read32 (ip + 2);
8815 func = mono_method_get_wrapper_data (method, token);
8816 info = mono_find_jit_icall_by_addr (func);
8819 CHECK_STACK (info->sig->param_count);
8820 sp -= info->sig->param_count;
8822 ins = mono_emit_jit_icall (cfg, info->func, sp);
8823 if (!MONO_TYPE_IS_VOID (info->sig->ret))
8827 inline_costs += 10 * num_calls++;
8831 case CEE_MONO_LDPTR: {
8834 CHECK_STACK_OVF (1);
8836 token = read32 (ip + 2);
8838 ptr = mono_method_get_wrapper_data (method, token);
8839 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
8840 MonoJitICallInfo *callinfo;
8841 const char *icall_name;
8843 icall_name = method->name + strlen ("__icall_wrapper_");
8844 g_assert (icall_name);
8845 callinfo = mono_find_jit_icall_by_name (icall_name);
8846 g_assert (callinfo);
8848 if (ptr == callinfo->func) {
8849 /* Will be transformed into an AOTCONST later */
8850 EMIT_NEW_PCONST (cfg, ins, ptr);
8856 /* FIXME: Generalize this */
8857 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
8858 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
8863 EMIT_NEW_PCONST (cfg, ins, ptr);
8866 inline_costs += 10 * num_calls++;
8867 /* Can't embed random pointers into AOT code */
8868 cfg->disable_aot = 1;
8871 case CEE_MONO_ICALL_ADDR: {
8872 MonoMethod *cmethod;
8875 CHECK_STACK_OVF (1);
8877 token = read32 (ip + 2);
8879 cmethod = mono_method_get_wrapper_data (method, token);
8881 if (cfg->compile_aot) {
8882 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
8884 ptr = mono_lookup_internal_call (cmethod);
8886 EMIT_NEW_PCONST (cfg, ins, ptr);
8892 case CEE_MONO_VTADDR: {
8893 MonoInst *src_var, *src;
8899 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8900 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
8905 case CEE_MONO_NEWOBJ: {
8906 MonoInst *iargs [2];
8908 CHECK_STACK_OVF (1);
8910 token = read32 (ip + 2);
8911 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8912 mono_class_init (klass);
8913 NEW_DOMAINCONST (cfg, iargs [0]);
8914 MONO_ADD_INS (cfg->cbb, iargs [0]);
8915 NEW_CLASSCONST (cfg, iargs [1], klass);
8916 MONO_ADD_INS (cfg->cbb, iargs [1]);
8917 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
8919 inline_costs += 10 * num_calls++;
8922 case CEE_MONO_OBJADDR:
8925 MONO_INST_NEW (cfg, ins, OP_MOVE);
8926 ins->dreg = alloc_preg (cfg);
8927 ins->sreg1 = sp [0]->dreg;
8928 ins->type = STACK_MP;
8929 MONO_ADD_INS (cfg->cbb, ins);
8933 case CEE_MONO_LDNATIVEOBJ:
8935 * Similar to LDOBJ, but instead load the unmanaged
8936 * representation of the vtype to the stack.
8941 token = read32 (ip + 2);
8942 klass = mono_method_get_wrapper_data (method, token);
8943 g_assert (klass->valuetype);
8944 mono_class_init (klass);
8947 MonoInst *src, *dest, *temp;
8950 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
8951 temp->backend.is_pinvoke = 1;
8952 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
8953 mini_emit_stobj (cfg, dest, src, klass, TRUE);
8955 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
8956 dest->type = STACK_VTYPE;
8957 dest->klass = klass;
8963 case CEE_MONO_RETOBJ: {
8965 * Same as RET, but return the native representation of a vtype
8968 g_assert (cfg->ret);
8969 g_assert (mono_method_signature (method)->pinvoke);
8974 token = read32 (ip + 2);
8975 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
8977 if (!cfg->vret_addr) {
8978 g_assert (cfg->ret_var_is_local);
8980 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
8982 EMIT_NEW_RETLOADA (cfg, ins);
8984 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
8986 if (sp != stack_start)
8989 MONO_INST_NEW (cfg, ins, OP_BR);
8990 ins->inst_target_bb = end_bblock;
8991 MONO_ADD_INS (bblock, ins);
8992 link_bblock (cfg, bblock, end_bblock);
8993 start_new_bblock = 1;
8997 case CEE_MONO_CISINST:
8998 case CEE_MONO_CCASTCLASS: {
9003 token = read32 (ip + 2);
9004 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9005 if (ip [1] == CEE_MONO_CISINST)
9006 ins = handle_cisinst (cfg, klass, sp [0]);
9008 ins = handle_ccastclass (cfg, klass, sp [0]);
9014 case CEE_MONO_SAVE_LMF:
9015 case CEE_MONO_RESTORE_LMF:
9016 #ifdef MONO_ARCH_HAVE_LMF_OPS
9017 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9018 MONO_ADD_INS (bblock, ins);
9019 cfg->need_lmf_area = TRUE;
9023 case CEE_MONO_CLASSCONST:
9024 CHECK_STACK_OVF (1);
9026 token = read32 (ip + 2);
9027 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9030 inline_costs += 10 * num_calls++;
9032 case CEE_MONO_NOT_TAKEN:
9033 bblock->out_of_line = TRUE;
9037 CHECK_STACK_OVF (1);
9039 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9040 ins->dreg = alloc_preg (cfg);
9041 ins->inst_offset = (gint32)read32 (ip + 2);
9042 ins->type = STACK_PTR;
9043 MONO_ADD_INS (bblock, ins);
9048 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9058 /* somewhat similar to LDTOKEN */
9059 MonoInst *addr, *vtvar;
9060 CHECK_STACK_OVF (1);
9061 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9063 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9064 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9066 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9067 ins->type = STACK_VTYPE;
9068 ins->klass = mono_defaults.argumenthandle_class;
9081 * The following transforms:
9082 * CEE_CEQ into OP_CEQ
9083 * CEE_CGT into OP_CGT
9084 * CEE_CGT_UN into OP_CGT_UN
9085 * CEE_CLT into OP_CLT
9086 * CEE_CLT_UN into OP_CLT_UN
9088 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9090 MONO_INST_NEW (cfg, ins, cmp->opcode);
9092 cmp->sreg1 = sp [0]->dreg;
9093 cmp->sreg2 = sp [1]->dreg;
9094 type_from_op (cmp, sp [0], sp [1]);
9096 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9097 cmp->opcode = OP_LCOMPARE;
9098 else if (sp [0]->type == STACK_R8)
9099 cmp->opcode = OP_FCOMPARE;
9101 cmp->opcode = OP_ICOMPARE;
9102 MONO_ADD_INS (bblock, cmp);
9103 ins->type = STACK_I4;
9104 ins->dreg = alloc_dreg (cfg, ins->type);
9105 type_from_op (ins, sp [0], sp [1]);
9107 if (cmp->opcode == OP_FCOMPARE) {
9109 * The backends expect the fceq opcodes to do the
9112 cmp->opcode = OP_NOP;
9113 ins->sreg1 = cmp->sreg1;
9114 ins->sreg2 = cmp->sreg2;
9116 MONO_ADD_INS (bblock, ins);
9123 MonoMethod *cil_method;
9124 gboolean needs_static_rgctx_invoke;
9126 CHECK_STACK_OVF (1);
9128 n = read32 (ip + 2);
9129 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9132 mono_class_init (cmethod->klass);
9134 mono_save_token_info (cfg, image, n, cmethod);
9136 if (cfg->generic_sharing_context)
9137 context_used = mono_method_check_context_used (cmethod);
9139 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9141 cil_method = cmethod;
9142 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9143 METHOD_ACCESS_FAILURE;
9145 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9146 if (check_linkdemand (cfg, method, cmethod))
9148 CHECK_CFG_EXCEPTION;
9149 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9150 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9154 * Optimize the common case of ldftn+delegate creation
9156 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9157 /* FIXME: SGEN support */
9158 /* FIXME: handle shared static generic methods */
9159 /* FIXME: handle this in shared code */
9160 if (!needs_static_rgctx_invoke && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9161 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9162 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9163 MonoInst *target_ins;
9166 invoke = mono_get_delegate_invoke (ctor_method->klass);
9167 if (!invoke || !mono_method_signature (invoke))
9171 if (cfg->verbose_level > 3)
9172 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9173 target_ins = sp [-1];
9175 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod);
9184 if (needs_static_rgctx_invoke)
9185 cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
9187 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9188 } else if (needs_static_rgctx_invoke) {
9189 EMIT_NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
9191 EMIT_NEW_METHODCONST (cfg, argconst, cmethod);
9193 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9197 inline_costs += 10 * num_calls++;
9200 case CEE_LDVIRTFTN: {
9205 n = read32 (ip + 2);
9206 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9209 mono_class_init (cmethod->klass);
9211 if (cfg->generic_sharing_context)
9212 context_used = mono_method_check_context_used (cmethod);
9214 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9215 if (check_linkdemand (cfg, method, cmethod))
9217 CHECK_CFG_EXCEPTION;
9218 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9219 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9226 args [1] = emit_get_rgctx_method (cfg, context_used,
9227 cmethod, MONO_RGCTX_INFO_METHOD);
9228 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9230 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
9231 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9235 inline_costs += 10 * num_calls++;
9239 CHECK_STACK_OVF (1);
9241 n = read16 (ip + 2);
9243 EMIT_NEW_ARGLOAD (cfg, ins, n);
9248 CHECK_STACK_OVF (1);
9250 n = read16 (ip + 2);
9252 NEW_ARGLOADA (cfg, ins, n);
9253 MONO_ADD_INS (cfg->cbb, ins);
9261 n = read16 (ip + 2);
9263 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9265 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9269 CHECK_STACK_OVF (1);
9271 n = read16 (ip + 2);
9273 EMIT_NEW_LOCLOAD (cfg, ins, n);
9278 unsigned char *tmp_ip;
9279 CHECK_STACK_OVF (1);
9281 n = read16 (ip + 2);
9284 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9290 EMIT_NEW_LOCLOADA (cfg, ins, n);
9299 n = read16 (ip + 2);
9301 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9303 emit_stloc_ir (cfg, sp, header, n);
9310 if (sp != stack_start)
9312 if (cfg->method != method)
9314 * Inlining this into a loop in a parent could lead to
9315 * stack overflows which is different behavior than the
9316 * non-inlined case, thus disable inlining in this case.
9318 goto inline_failure;
9320 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9321 ins->dreg = alloc_preg (cfg);
9322 ins->sreg1 = sp [0]->dreg;
9323 ins->type = STACK_PTR;
9324 MONO_ADD_INS (cfg->cbb, ins);
9326 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9327 if (header->init_locals)
9328 ins->flags |= MONO_INST_INIT;
9333 case CEE_ENDFILTER: {
9334 MonoExceptionClause *clause, *nearest;
9335 int cc, nearest_num;
9339 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9341 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9342 ins->sreg1 = (*sp)->dreg;
9343 MONO_ADD_INS (bblock, ins);
9344 start_new_bblock = 1;
9349 for (cc = 0; cc < header->num_clauses; ++cc) {
9350 clause = &header->clauses [cc];
9351 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9352 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9353 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9359 if ((ip - header->code) != nearest->handler_offset)
9364 case CEE_UNALIGNED_:
9365 ins_flag |= MONO_INST_UNALIGNED;
9366 /* FIXME: record alignment? we can assume 1 for now */
9371 ins_flag |= MONO_INST_VOLATILE;
9375 ins_flag |= MONO_INST_TAILCALL;
9376 cfg->flags |= MONO_CFG_HAS_TAIL;
9377 /* Can't inline tail calls at this time */
9378 inline_costs += 100000;
9385 token = read32 (ip + 2);
9386 klass = mini_get_class (method, token, generic_context);
9387 CHECK_TYPELOAD (klass);
9388 if (generic_class_is_reference_type (cfg, klass))
9389 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9391 mini_emit_initobj (cfg, *sp, NULL, klass);
9395 case CEE_CONSTRAINED_:
9397 token = read32 (ip + 2);
9398 constrained_call = mono_class_get_full (image, token, generic_context);
9399 CHECK_TYPELOAD (constrained_call);
9404 MonoInst *iargs [3];
9408 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9409 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9410 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9411 /* emit_memset only works when val == 0 */
9412 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9417 if (ip [1] == CEE_CPBLK) {
9418 MonoMethod *memcpy_method = get_memcpy_method ();
9419 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9421 MonoMethod *memset_method = get_memset_method ();
9422 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9432 ins_flag |= MONO_INST_NOTYPECHECK;
9434 ins_flag |= MONO_INST_NORANGECHECK;
9435 /* we ignore the no-nullcheck for now since we
9436 * really do it explicitly only when doing callvirt->call
9442 int handler_offset = -1;
9444 for (i = 0; i < header->num_clauses; ++i) {
9445 MonoExceptionClause *clause = &header->clauses [i];
9446 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9447 handler_offset = clause->handler_offset;
9452 bblock->flags |= BB_EXCEPTION_UNSAFE;
9454 g_assert (handler_offset != -1);
9456 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9457 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9458 ins->sreg1 = load->dreg;
9459 MONO_ADD_INS (bblock, ins);
9461 link_bblock (cfg, bblock, end_bblock);
9462 start_new_bblock = 1;
9470 CHECK_STACK_OVF (1);
9472 token = read32 (ip + 2);
9473 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9474 MonoType *type = mono_type_create_from_typespec (image, token);
9475 token = mono_type_size (type, &ialign);
9477 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9478 CHECK_TYPELOAD (klass);
9479 mono_class_init (klass);
9480 token = mono_class_value_size (klass, &align);
9482 EMIT_NEW_ICONST (cfg, ins, token);
9487 case CEE_REFANYTYPE: {
9488 MonoInst *src_var, *src;
9494 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9496 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9497 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9498 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9508 g_error ("opcode 0xfe 0x%02x not handled", ip [1]);
9513 g_error ("opcode 0x%02x not handled", *ip);
9516 if (start_new_bblock != 1)
9519 bblock->cil_length = ip - bblock->cil_code;
9520 bblock->next_bb = end_bblock;
9522 if (cfg->method == method && cfg->domainvar) {
9524 MonoInst *get_domain;
9526 cfg->cbb = init_localsbb;
9528 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9529 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9532 get_domain->dreg = alloc_preg (cfg);
9533 MONO_ADD_INS (cfg->cbb, get_domain);
9535 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9536 MONO_ADD_INS (cfg->cbb, store);
9539 if (cfg->method == method && cfg->got_var)
9540 mono_emit_load_got_addr (cfg);
9542 if (header->init_locals) {
9545 cfg->cbb = init_localsbb;
9547 for (i = 0; i < header->num_locals; ++i) {
9548 MonoType *ptype = header->locals [i];
9549 int t = ptype->type;
9550 dreg = cfg->locals [i]->dreg;
9552 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
9553 t = mono_class_enum_basetype (ptype->data.klass)->type;
9555 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9556 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
9557 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
9558 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
9559 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
9560 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
9561 MONO_INST_NEW (cfg, ins, OP_R8CONST);
9562 ins->type = STACK_R8;
9563 ins->inst_p0 = (void*)&r8_0;
9564 ins->dreg = alloc_dreg (cfg, STACK_R8);
9565 MONO_ADD_INS (init_localsbb, ins);
9566 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
9567 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
9568 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
9569 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
9571 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
9578 if (cfg->method == method) {
9580 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
9581 bb->region = mono_find_block_region (cfg, bb->real_offset);
9583 mono_create_spvar_for_region (cfg, bb->region);
9584 if (cfg->verbose_level > 2)
9585 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
9589 g_slist_free (class_inits);
9590 dont_inline = g_list_remove (dont_inline, method);
9592 if (inline_costs < 0) {
9595 /* Method is too large */
9596 mname = mono_method_full_name (method, TRUE);
9597 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
9598 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
9603 if ((cfg->verbose_level > 2) && (cfg->method == method))
9604 mono_print_code (cfg, "AFTER METHOD-TO-IR");
9606 return inline_costs;
9609 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
9610 g_slist_free (class_inits);
9611 dont_inline = g_list_remove (dont_inline, method);
9615 g_slist_free (class_inits);
9616 dont_inline = g_list_remove (dont_inline, method);
9620 g_slist_free (class_inits);
9621 dont_inline = g_list_remove (dont_inline, method);
9622 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
9626 g_slist_free (class_inits);
9627 dont_inline = g_list_remove (dont_inline, method);
9628 set_exception_type_from_invalid_il (cfg, method, ip);
9633 store_membase_reg_to_store_membase_imm (int opcode)
9636 case OP_STORE_MEMBASE_REG:
9637 return OP_STORE_MEMBASE_IMM;
9638 case OP_STOREI1_MEMBASE_REG:
9639 return OP_STOREI1_MEMBASE_IMM;
9640 case OP_STOREI2_MEMBASE_REG:
9641 return OP_STOREI2_MEMBASE_IMM;
9642 case OP_STOREI4_MEMBASE_REG:
9643 return OP_STOREI4_MEMBASE_IMM;
9644 case OP_STOREI8_MEMBASE_REG:
9645 return OP_STOREI8_MEMBASE_IMM;
9647 g_assert_not_reached ();
9653 #endif /* DISABLE_JIT */
9656 mono_op_to_op_imm (int opcode)
9666 return OP_IDIV_UN_IMM;
9670 return OP_IREM_UN_IMM;
9684 return OP_ISHR_UN_IMM;
9701 return OP_LSHR_UN_IMM;
9704 return OP_COMPARE_IMM;
9706 return OP_ICOMPARE_IMM;
9708 return OP_LCOMPARE_IMM;
9710 case OP_STORE_MEMBASE_REG:
9711 return OP_STORE_MEMBASE_IMM;
9712 case OP_STOREI1_MEMBASE_REG:
9713 return OP_STOREI1_MEMBASE_IMM;
9714 case OP_STOREI2_MEMBASE_REG:
9715 return OP_STOREI2_MEMBASE_IMM;
9716 case OP_STOREI4_MEMBASE_REG:
9717 return OP_STOREI4_MEMBASE_IMM;
9719 #if defined(__i386__) || defined (__x86_64__)
9721 return OP_X86_PUSH_IMM;
9722 case OP_X86_COMPARE_MEMBASE_REG:
9723 return OP_X86_COMPARE_MEMBASE_IMM;
9725 #if defined(__x86_64__)
9726 case OP_AMD64_ICOMPARE_MEMBASE_REG:
9727 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9729 case OP_VOIDCALL_REG:
9738 return OP_LOCALLOC_IMM;
9745 ldind_to_load_membase (int opcode)
9749 return OP_LOADI1_MEMBASE;
9751 return OP_LOADU1_MEMBASE;
9753 return OP_LOADI2_MEMBASE;
9755 return OP_LOADU2_MEMBASE;
9757 return OP_LOADI4_MEMBASE;
9759 return OP_LOADU4_MEMBASE;
9761 return OP_LOAD_MEMBASE;
9763 return OP_LOAD_MEMBASE;
9765 return OP_LOADI8_MEMBASE;
9767 return OP_LOADR4_MEMBASE;
9769 return OP_LOADR8_MEMBASE;
9771 g_assert_not_reached ();
9778 stind_to_store_membase (int opcode)
9782 return OP_STOREI1_MEMBASE_REG;
9784 return OP_STOREI2_MEMBASE_REG;
9786 return OP_STOREI4_MEMBASE_REG;
9789 return OP_STORE_MEMBASE_REG;
9791 return OP_STOREI8_MEMBASE_REG;
9793 return OP_STORER4_MEMBASE_REG;
9795 return OP_STORER8_MEMBASE_REG;
9797 g_assert_not_reached ();
9804 mono_load_membase_to_load_mem (int opcode)
9806 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
9807 #if defined(__i386__) || defined(__x86_64__)
9809 case OP_LOAD_MEMBASE:
9811 case OP_LOADU1_MEMBASE:
9812 return OP_LOADU1_MEM;
9813 case OP_LOADU2_MEMBASE:
9814 return OP_LOADU2_MEM;
9815 case OP_LOADI4_MEMBASE:
9816 return OP_LOADI4_MEM;
9817 case OP_LOADU4_MEMBASE:
9818 return OP_LOADU4_MEM;
9819 #if SIZEOF_REGISTER == 8
9820 case OP_LOADI8_MEMBASE:
9821 return OP_LOADI8_MEM;
9830 op_to_op_dest_membase (int store_opcode, int opcode)
9832 #if defined(__i386__)
9833 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
9838 return OP_X86_ADD_MEMBASE_REG;
9840 return OP_X86_SUB_MEMBASE_REG;
9842 return OP_X86_AND_MEMBASE_REG;
9844 return OP_X86_OR_MEMBASE_REG;
9846 return OP_X86_XOR_MEMBASE_REG;
9849 return OP_X86_ADD_MEMBASE_IMM;
9852 return OP_X86_SUB_MEMBASE_IMM;
9855 return OP_X86_AND_MEMBASE_IMM;
9858 return OP_X86_OR_MEMBASE_IMM;
9861 return OP_X86_XOR_MEMBASE_IMM;
9867 #if defined(__x86_64__)
9868 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
9873 return OP_X86_ADD_MEMBASE_REG;
9875 return OP_X86_SUB_MEMBASE_REG;
9877 return OP_X86_AND_MEMBASE_REG;
9879 return OP_X86_OR_MEMBASE_REG;
9881 return OP_X86_XOR_MEMBASE_REG;
9883 return OP_X86_ADD_MEMBASE_IMM;
9885 return OP_X86_SUB_MEMBASE_IMM;
9887 return OP_X86_AND_MEMBASE_IMM;
9889 return OP_X86_OR_MEMBASE_IMM;
9891 return OP_X86_XOR_MEMBASE_IMM;
9893 return OP_AMD64_ADD_MEMBASE_REG;
9895 return OP_AMD64_SUB_MEMBASE_REG;
9897 return OP_AMD64_AND_MEMBASE_REG;
9899 return OP_AMD64_OR_MEMBASE_REG;
9901 return OP_AMD64_XOR_MEMBASE_REG;
9904 return OP_AMD64_ADD_MEMBASE_IMM;
9907 return OP_AMD64_SUB_MEMBASE_IMM;
9910 return OP_AMD64_AND_MEMBASE_IMM;
9913 return OP_AMD64_OR_MEMBASE_IMM;
9916 return OP_AMD64_XOR_MEMBASE_IMM;
9926 op_to_op_store_membase (int store_opcode, int opcode)
9928 #if defined(__i386__) || defined(__x86_64__)
9931 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9932 return OP_X86_SETEQ_MEMBASE;
9934 if (store_opcode == OP_STOREI1_MEMBASE_REG)
9935 return OP_X86_SETNE_MEMBASE;
9943 op_to_op_src1_membase (int load_opcode, int opcode)
9946 /* FIXME: This has sign extension issues */
9948 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9949 return OP_X86_COMPARE_MEMBASE8_IMM;
9952 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
9957 return OP_X86_PUSH_MEMBASE;
9958 case OP_COMPARE_IMM:
9959 case OP_ICOMPARE_IMM:
9960 return OP_X86_COMPARE_MEMBASE_IMM;
9963 return OP_X86_COMPARE_MEMBASE_REG;
9968 /* FIXME: This has sign extension issues */
9970 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
9971 return OP_X86_COMPARE_MEMBASE8_IMM;
9976 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9977 return OP_X86_PUSH_MEMBASE;
9979 /* FIXME: This only works for 32 bit immediates
9980 case OP_COMPARE_IMM:
9981 case OP_LCOMPARE_IMM:
9982 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9983 return OP_AMD64_COMPARE_MEMBASE_IMM;
9985 case OP_ICOMPARE_IMM:
9986 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9987 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
9991 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
9992 return OP_AMD64_COMPARE_MEMBASE_REG;
9995 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
9996 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10005 op_to_op_src2_membase (int load_opcode, int opcode)
10008 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10014 return OP_X86_COMPARE_REG_MEMBASE;
10016 return OP_X86_ADD_REG_MEMBASE;
10018 return OP_X86_SUB_REG_MEMBASE;
10020 return OP_X86_AND_REG_MEMBASE;
10022 return OP_X86_OR_REG_MEMBASE;
10024 return OP_X86_XOR_REG_MEMBASE;
10031 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10032 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10036 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10037 return OP_AMD64_COMPARE_REG_MEMBASE;
10040 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10041 return OP_X86_ADD_REG_MEMBASE;
10043 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10044 return OP_X86_SUB_REG_MEMBASE;
10046 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10047 return OP_X86_AND_REG_MEMBASE;
10049 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10050 return OP_X86_OR_REG_MEMBASE;
10052 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10053 return OP_X86_XOR_REG_MEMBASE;
10055 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10056 return OP_AMD64_ADD_REG_MEMBASE;
10058 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10059 return OP_AMD64_SUB_REG_MEMBASE;
10061 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10062 return OP_AMD64_AND_REG_MEMBASE;
10064 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10065 return OP_AMD64_OR_REG_MEMBASE;
10067 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10068 return OP_AMD64_XOR_REG_MEMBASE;
10076 mono_op_to_op_imm_noemul (int opcode)
10079 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10084 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10092 return mono_op_to_op_imm (opcode);
10096 #ifndef DISABLE_JIT
10099 * mono_handle_global_vregs:
10101 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10105 mono_handle_global_vregs (MonoCompile *cfg)
10107 gint32 *vreg_to_bb;
10108 MonoBasicBlock *bb;
10111 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10113 #ifdef MONO_ARCH_SIMD_INTRINSICS
10114 if (cfg->uses_simd_intrinsics)
10115 mono_simd_simplify_indirection (cfg);
10118 /* Find local vregs used in more than one bb */
10119 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10120 MonoInst *ins = bb->code;
10121 int block_num = bb->block_num;
10123 if (cfg->verbose_level > 2)
10124 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10127 for (; ins; ins = ins->next) {
10128 const char *spec = INS_INFO (ins->opcode);
10129 int regtype, regindex;
10132 if (G_UNLIKELY (cfg->verbose_level > 2))
10133 mono_print_ins (ins);
10135 g_assert (ins->opcode >= MONO_CEE_LAST);
10137 for (regindex = 0; regindex < 4; regindex ++) {
10140 if (regindex == 0) {
10141 regtype = spec [MONO_INST_DEST];
10142 if (regtype == ' ')
10145 } else if (regindex == 1) {
10146 regtype = spec [MONO_INST_SRC1];
10147 if (regtype == ' ')
10150 } else if (regindex == 2) {
10151 regtype = spec [MONO_INST_SRC2];
10152 if (regtype == ' ')
10155 } else if (regindex == 3) {
10156 regtype = spec [MONO_INST_SRC3];
10157 if (regtype == ' ')
10162 #if SIZEOF_REGISTER == 4
10163 if (regtype == 'l') {
10165 * Since some instructions reference the original long vreg,
10166 * and some reference the two component vregs, it is quite hard
10167 * to determine when it needs to be global. So be conservative.
10169 if (!get_vreg_to_inst (cfg, vreg)) {
10170 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10172 if (cfg->verbose_level > 2)
10173 printf ("LONG VREG R%d made global.\n", vreg);
10177 * Make the component vregs volatile since the optimizations can
10178 * get confused otherwise.
10180 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10181 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10185 g_assert (vreg != -1);
10187 prev_bb = vreg_to_bb [vreg];
10188 if (prev_bb == 0) {
10189 /* 0 is a valid block num */
10190 vreg_to_bb [vreg] = block_num + 1;
10191 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10192 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10195 if (!get_vreg_to_inst (cfg, vreg)) {
10196 if (G_UNLIKELY (cfg->verbose_level > 2))
10197 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10201 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10204 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10207 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10210 g_assert_not_reached ();
10214 /* Flag as having been used in more than one bb */
10215 vreg_to_bb [vreg] = -1;
10221 /* If a variable is used in only one bblock, convert it into a local vreg */
10222 for (i = 0; i < cfg->num_varinfo; i++) {
10223 MonoInst *var = cfg->varinfo [i];
10224 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10226 switch (var->type) {
10232 #if SIZEOF_REGISTER == 8
10235 #if !defined(__i386__) && !defined(MONO_ARCH_SOFT_FLOAT)
10236 /* Enabling this screws up the fp stack on x86 */
10239 /* Arguments are implicitly global */
10240 /* Putting R4 vars into registers doesn't work currently */
10241 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10243 * Make that the variable's liveness interval doesn't contain a call, since
10244 * that would cause the lvreg to be spilled, making the whole optimization
10247 /* This is too slow for JIT compilation */
10249 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10251 int def_index, call_index, ins_index;
10252 gboolean spilled = FALSE;
10257 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10258 const char *spec = INS_INFO (ins->opcode);
10260 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10261 def_index = ins_index;
10263 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10264 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10265 if (call_index > def_index) {
10271 if (MONO_IS_CALL (ins))
10272 call_index = ins_index;
10282 if (G_UNLIKELY (cfg->verbose_level > 2))
10283 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10284 var->flags |= MONO_INST_IS_DEAD;
10285 cfg->vreg_to_inst [var->dreg] = NULL;
10292 * Compress the varinfo and vars tables so the liveness computation is faster and
10293 * takes up less space.
10296 for (i = 0; i < cfg->num_varinfo; ++i) {
10297 MonoInst *var = cfg->varinfo [i];
10298 if (pos < i && cfg->locals_start == i)
10299 cfg->locals_start = pos;
10300 if (!(var->flags & MONO_INST_IS_DEAD)) {
10302 cfg->varinfo [pos] = cfg->varinfo [i];
10303 cfg->varinfo [pos]->inst_c0 = pos;
10304 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10305 cfg->vars [pos].idx = pos;
10306 #if SIZEOF_REGISTER == 4
10307 if (cfg->varinfo [pos]->type == STACK_I8) {
10308 /* Modify the two component vars too */
10311 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10312 var1->inst_c0 = pos;
10313 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10314 var1->inst_c0 = pos;
10321 cfg->num_varinfo = pos;
10322 if (cfg->locals_start > cfg->num_varinfo)
10323 cfg->locals_start = cfg->num_varinfo;
10327 * mono_spill_global_vars:
10329 * Generate spill code for variables which are not allocated to registers,
10330 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10331 * code is generated which could be optimized by the local optimization passes.
10334 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10336 MonoBasicBlock *bb;
10338 int orig_next_vreg;
10339 guint32 *vreg_to_lvreg;
10341 guint32 i, lvregs_len;
10342 gboolean dest_has_lvreg = FALSE;
10343 guint32 stacktypes [128];
10344 MonoInst **live_range_start, **live_range_end;
10345 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10347 *need_local_opts = FALSE;
10349 memset (spec2, 0, sizeof (spec2));
10351 /* FIXME: Move this function to mini.c */
10352 stacktypes ['i'] = STACK_PTR;
10353 stacktypes ['l'] = STACK_I8;
10354 stacktypes ['f'] = STACK_R8;
10355 #ifdef MONO_ARCH_SIMD_INTRINSICS
10356 stacktypes ['x'] = STACK_VTYPE;
10359 #if SIZEOF_REGISTER == 4
10360 /* Create MonoInsts for longs */
10361 for (i = 0; i < cfg->num_varinfo; i++) {
10362 MonoInst *ins = cfg->varinfo [i];
10364 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10365 switch (ins->type) {
10366 #ifdef MONO_ARCH_SOFT_FLOAT
10372 g_assert (ins->opcode == OP_REGOFFSET);
10374 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10376 tree->opcode = OP_REGOFFSET;
10377 tree->inst_basereg = ins->inst_basereg;
10378 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10380 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10382 tree->opcode = OP_REGOFFSET;
10383 tree->inst_basereg = ins->inst_basereg;
10384 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10394 /* FIXME: widening and truncation */
10397 * As an optimization, when a variable allocated to the stack is first loaded into
10398 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10399 * the variable again.
10401 orig_next_vreg = cfg->next_vreg;
10402 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10403 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10407 * These arrays contain the first and last instructions accessing a given
10409 * Since we emit bblocks in the same order we process them here, and we
10410 * don't split live ranges, these will precisely describe the live range of
10411 * the variable, i.e. the instruction range where a valid value can be found
10412 * in the variables location.
10414 /* FIXME: Only do this if debugging info is requested */
10415 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10416 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10417 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10418 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10420 /* Add spill loads/stores */
10421 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10424 if (cfg->verbose_level > 2)
10425 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10427 /* Clear vreg_to_lvreg array */
10428 for (i = 0; i < lvregs_len; i++)
10429 vreg_to_lvreg [lvregs [i]] = 0;
10433 MONO_BB_FOR_EACH_INS (bb, ins) {
10434 const char *spec = INS_INFO (ins->opcode);
10435 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10436 gboolean store, no_lvreg;
10437 int sregs [MONO_MAX_SRC_REGS];
10439 if (G_UNLIKELY (cfg->verbose_level > 2))
10440 mono_print_ins (ins);
10442 if (ins->opcode == OP_NOP)
10446 * We handle LDADDR here as well, since it can only be decomposed
10447 * when variable addresses are known.
10449 if (ins->opcode == OP_LDADDR) {
10450 MonoInst *var = ins->inst_p0;
10452 if (var->opcode == OP_VTARG_ADDR) {
10453 /* Happens on SPARC/S390 where vtypes are passed by reference */
10454 MonoInst *vtaddr = var->inst_left;
10455 if (vtaddr->opcode == OP_REGVAR) {
10456 ins->opcode = OP_MOVE;
10457 ins->sreg1 = vtaddr->dreg;
10459 else if (var->inst_left->opcode == OP_REGOFFSET) {
10460 ins->opcode = OP_LOAD_MEMBASE;
10461 ins->inst_basereg = vtaddr->inst_basereg;
10462 ins->inst_offset = vtaddr->inst_offset;
10466 g_assert (var->opcode == OP_REGOFFSET);
10468 ins->opcode = OP_ADD_IMM;
10469 ins->sreg1 = var->inst_basereg;
10470 ins->inst_imm = var->inst_offset;
10473 *need_local_opts = TRUE;
10474 spec = INS_INFO (ins->opcode);
10477 if (ins->opcode < MONO_CEE_LAST) {
10478 mono_print_ins (ins);
10479 g_assert_not_reached ();
10483 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10487 if (MONO_IS_STORE_MEMBASE (ins)) {
10488 tmp_reg = ins->dreg;
10489 ins->dreg = ins->sreg2;
10490 ins->sreg2 = tmp_reg;
10493 spec2 [MONO_INST_DEST] = ' ';
10494 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10495 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10496 spec2 [MONO_INST_SRC3] = ' ';
10498 } else if (MONO_IS_STORE_MEMINDEX (ins))
10499 g_assert_not_reached ();
10504 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10505 printf ("\t %.3s %d", spec, ins->dreg);
10506 num_sregs = mono_inst_get_src_registers (ins, sregs);
10507 for (srcindex = 0; srcindex < 3; ++srcindex)
10508 printf (" %d", sregs [srcindex]);
10515 regtype = spec [MONO_INST_DEST];
10516 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10519 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10520 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10521 MonoInst *store_ins;
10523 MonoInst *def_ins = ins;
10524 int dreg = ins->dreg; /* The original vreg */
10526 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
10528 if (var->opcode == OP_REGVAR) {
10529 ins->dreg = var->dreg;
10530 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
10532 * Instead of emitting a load+store, use a _membase opcode.
10534 g_assert (var->opcode == OP_REGOFFSET);
10535 if (ins->opcode == OP_MOVE) {
10539 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
10540 ins->inst_basereg = var->inst_basereg;
10541 ins->inst_offset = var->inst_offset;
10544 spec = INS_INFO (ins->opcode);
10548 g_assert (var->opcode == OP_REGOFFSET);
10550 prev_dreg = ins->dreg;
10552 /* Invalidate any previous lvreg for this vreg */
10553 vreg_to_lvreg [ins->dreg] = 0;
10557 #ifdef MONO_ARCH_SOFT_FLOAT
10558 if (store_opcode == OP_STORER8_MEMBASE_REG) {
10560 store_opcode = OP_STOREI8_MEMBASE_REG;
10564 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
10566 if (regtype == 'l') {
10567 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
10568 mono_bblock_insert_after_ins (bb, ins, store_ins);
10569 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
10570 mono_bblock_insert_after_ins (bb, ins, store_ins);
10571 def_ins = store_ins;
10574 g_assert (store_opcode != OP_STOREV_MEMBASE);
10576 /* Try to fuse the store into the instruction itself */
10577 /* FIXME: Add more instructions */
10578 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
10579 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
10580 ins->inst_imm = ins->inst_c0;
10581 ins->inst_destbasereg = var->inst_basereg;
10582 ins->inst_offset = var->inst_offset;
10583 spec = INS_INFO (ins->opcode);
10584 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
10585 ins->opcode = store_opcode;
10586 ins->inst_destbasereg = var->inst_basereg;
10587 ins->inst_offset = var->inst_offset;
10591 tmp_reg = ins->dreg;
10592 ins->dreg = ins->sreg2;
10593 ins->sreg2 = tmp_reg;
10596 spec2 [MONO_INST_DEST] = ' ';
10597 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10598 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10599 spec2 [MONO_INST_SRC3] = ' ';
10601 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
10602 // FIXME: The backends expect the base reg to be in inst_basereg
10603 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
10605 ins->inst_basereg = var->inst_basereg;
10606 ins->inst_offset = var->inst_offset;
10607 spec = INS_INFO (ins->opcode);
10609 /* printf ("INS: "); mono_print_ins (ins); */
10610 /* Create a store instruction */
10611 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
10613 /* Insert it after the instruction */
10614 mono_bblock_insert_after_ins (bb, ins, store_ins);
10616 def_ins = store_ins;
10619 * We can't assign ins->dreg to var->dreg here, since the
10620 * sregs could use it. So set a flag, and do it after
10623 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
10624 dest_has_lvreg = TRUE;
10629 if (def_ins && !live_range_start [dreg]) {
10630 live_range_start [dreg] = def_ins;
10631 live_range_start_bb [dreg] = bb;
10638 num_sregs = mono_inst_get_src_registers (ins, sregs);
10639 for (srcindex = 0; srcindex < 3; ++srcindex) {
10640 regtype = spec [MONO_INST_SRC1 + srcindex];
10641 sreg = sregs [srcindex];
10643 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
10644 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
10645 MonoInst *var = get_vreg_to_inst (cfg, sreg);
10646 MonoInst *use_ins = ins;
10647 MonoInst *load_ins;
10648 guint32 load_opcode;
10650 if (var->opcode == OP_REGVAR) {
10651 sregs [srcindex] = var->dreg;
10652 //mono_inst_set_src_registers (ins, sregs);
10653 live_range_end [sreg] = use_ins;
10654 live_range_end_bb [sreg] = bb;
10658 g_assert (var->opcode == OP_REGOFFSET);
10660 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
10662 g_assert (load_opcode != OP_LOADV_MEMBASE);
10664 if (vreg_to_lvreg [sreg]) {
10665 g_assert (vreg_to_lvreg [sreg] != -1);
10667 /* The variable is already loaded to an lvreg */
10668 if (G_UNLIKELY (cfg->verbose_level > 2))
10669 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
10670 sregs [srcindex] = vreg_to_lvreg [sreg];
10671 //mono_inst_set_src_registers (ins, sregs);
10675 /* Try to fuse the load into the instruction */
10676 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
10677 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
10678 sregs [0] = var->inst_basereg;
10679 //mono_inst_set_src_registers (ins, sregs);
10680 ins->inst_offset = var->inst_offset;
10681 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
10682 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
10683 sregs [1] = var->inst_basereg;
10684 //mono_inst_set_src_registers (ins, sregs);
10685 ins->inst_offset = var->inst_offset;
10687 if (MONO_IS_REAL_MOVE (ins)) {
10688 ins->opcode = OP_NOP;
10691 //printf ("%d ", srcindex); mono_print_ins (ins);
10693 sreg = alloc_dreg (cfg, stacktypes [regtype]);
10695 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
10696 if (var->dreg == prev_dreg) {
10698 * sreg refers to the value loaded by the load
10699 * emitted below, but we need to use ins->dreg
10700 * since it refers to the store emitted earlier.
10704 g_assert (sreg != -1);
10705 vreg_to_lvreg [var->dreg] = sreg;
10706 g_assert (lvregs_len < 1024);
10707 lvregs [lvregs_len ++] = var->dreg;
10711 sregs [srcindex] = sreg;
10712 //mono_inst_set_src_registers (ins, sregs);
10714 if (regtype == 'l') {
10715 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
10716 mono_bblock_insert_before_ins (bb, ins, load_ins);
10717 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
10718 mono_bblock_insert_before_ins (bb, ins, load_ins);
10719 use_ins = load_ins;
10722 #if SIZEOF_REGISTER == 4
10723 g_assert (load_opcode != OP_LOADI8_MEMBASE);
10725 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
10726 mono_bblock_insert_before_ins (bb, ins, load_ins);
10727 use_ins = load_ins;
10731 if (var->dreg < orig_next_vreg) {
10732 live_range_end [var->dreg] = use_ins;
10733 live_range_end_bb [var->dreg] = bb;
10737 mono_inst_set_src_registers (ins, sregs);
10739 if (dest_has_lvreg) {
10740 g_assert (ins->dreg != -1);
10741 vreg_to_lvreg [prev_dreg] = ins->dreg;
10742 g_assert (lvregs_len < 1024);
10743 lvregs [lvregs_len ++] = prev_dreg;
10744 dest_has_lvreg = FALSE;
10748 tmp_reg = ins->dreg;
10749 ins->dreg = ins->sreg2;
10750 ins->sreg2 = tmp_reg;
10753 if (MONO_IS_CALL (ins)) {
10754 /* Clear vreg_to_lvreg array */
10755 for (i = 0; i < lvregs_len; i++)
10756 vreg_to_lvreg [lvregs [i]] = 0;
10758 } else if (ins->opcode == OP_NOP) {
10760 MONO_INST_NULLIFY_SREGS (ins);
10763 if (cfg->verbose_level > 2)
10764 mono_print_ins_index (1, ins);
10768 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
10770 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
10771 * by storing the current native offset into MonoMethodVar->live_range_start/end.
10773 for (i = 0; i < cfg->num_varinfo; ++i) {
10774 int vreg = MONO_VARINFO (cfg, i)->vreg;
10777 if (live_range_start [vreg]) {
10778 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
10780 ins->inst_c1 = vreg;
10781 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
10783 if (live_range_end [vreg]) {
10784 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
10786 ins->inst_c1 = vreg;
10787 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
10792 g_free (live_range_start);
10793 g_free (live_range_end);
10794 g_free (live_range_start_bb);
10795 g_free (live_range_end_bb);
10800 * - use 'iadd' instead of 'int_add'
10801 * - handling ovf opcodes: decompose in method_to_ir.
10802 * - unify iregs/fregs
10803 * -> partly done, the missing parts are:
10804 * - a more complete unification would involve unifying the hregs as well, so
10805 * code wouldn't need if (fp) all over the place. but that would mean the hregs
10806 * would no longer map to the machine hregs, so the code generators would need to
10807 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
10808 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
10809 * fp/non-fp branches speeds it up by about 15%.
10810 * - use sext/zext opcodes instead of shifts
10812 * - get rid of TEMPLOADs if possible and use vregs instead
10813 * - clean up usage of OP_P/OP_ opcodes
10814 * - cleanup usage of DUMMY_USE
10815 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
10817 * - set the stack type and allocate a dreg in the EMIT_NEW macros
10818 * - get rid of all the <foo>2 stuff when the new JIT is ready.
10819 * - make sure handle_stack_args () is called before the branch is emitted
10820 * - when the new IR is done, get rid of all unused stuff
10821 * - COMPARE/BEQ as separate instructions or unify them ?
10822 * - keeping them separate allows specialized compare instructions like
10823 * compare_imm, compare_membase
10824 * - most back ends unify fp compare+branch, fp compare+ceq
10825 * - integrate mono_save_args into inline_method
10826 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
10827 * - handle long shift opts on 32 bit platforms somehow: they require
10828 * 3 sregs (2 for arg1 and 1 for arg2)
10829 * - make byref a 'normal' type.
10830 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
10831 * variable if needed.
10832 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
10833 * like inline_method.
10834 * - remove inlining restrictions
10835 * - fix LNEG and enable cfold of INEG
10836 * - generalize x86 optimizations like ldelema as a peephole optimization
10837 * - add store_mem_imm for amd64
10838 * - optimize the loading of the interruption flag in the managed->native wrappers
10839 * - avoid special handling of OP_NOP in passes
10840 * - move code inserting instructions into one function/macro.
10841 * - try a coalescing phase after liveness analysis
10842 * - add float -> vreg conversion + local optimizations on !x86
10843 * - figure out how to handle decomposed branches during optimizations, ie.
10844 * compare+branch, op_jump_table+op_br etc.
10845 * - promote RuntimeXHandles to vregs
10846 * - vtype cleanups:
10847 * - add a NEW_VARLOADA_VREG macro
10848 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
10849 * accessing vtype fields.
10850 * - get rid of I8CONST on 64 bit platforms
10851 * - dealing with the increase in code size due to branches created during opcode
10853 * - use extended basic blocks
10854 * - all parts of the JIT
10855 * - handle_global_vregs () && local regalloc
10856 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
10857 * - sources of increase in code size:
10860 * - isinst and castclass
10861 * - lvregs not allocated to global registers even if used multiple times
10862 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
10864 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
10865 * - add all micro optimizations from the old JIT
10866 * - put tree optimizations into the deadce pass
10867 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
10868 * specific function.
10869 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
10870 * fcompare + branchCC.
10871 * - create a helper function for allocating a stack slot, taking into account
10872 * MONO_CFG_HAS_SPILLUP.
10874 * - merge the ia64 switch changes.
10875 * - optimize mono_regstate2_alloc_int/float.
10876 * - fix the pessimistic handling of variables accessed in exception handler blocks.
10877 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
10878 * parts of the tree could be separated by other instructions, killing the tree
10879 * arguments, or stores killing loads etc. Also, should we fold loads into other
10880 * instructions if the result of the load is used multiple times ?
10881 * - make the REM_IMM optimization in mini-x86.c arch-independent.
10882 * - LAST MERGE: 108395.
10883 * - when returning vtypes in registers, generate IR and append it to the end of the
10884 * last bb instead of doing it in the epilog.
10885 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
10893 - When to decompose opcodes:
10894 - earlier: this makes some optimizations hard to implement, since the low level IR
10895 no longer contains the neccessary information. But it is easier to do.
10896 - later: harder to implement, enables more optimizations.
10897 - Branches inside bblocks:
10898 - created when decomposing complex opcodes.
10899 - branches to another bblock: harmless, but not tracked by the branch
10900 optimizations, so need to branch to a label at the start of the bblock.
10901 - branches to inside the same bblock: very problematic, trips up the local
10902 reg allocator. Can be fixed by spitting the current bblock, but that is a
10903 complex operation, since some local vregs can become global vregs etc.
10904 - Local/global vregs:
10905 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
10906 local register allocator.
10907 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
10908 structure, created by mono_create_var (). Assigned to hregs or the stack by
10909 the global register allocator.
10910 - When to do optimizations like alu->alu_imm:
10911 - earlier -> saves work later on since the IR will be smaller/simpler
10912 - later -> can work on more instructions
10913 - Handling of valuetypes:
10914 - When a vtype is pushed on the stack, a new temporary is created, an
10915 instruction computing its address (LDADDR) is emitted and pushed on
10916 the stack. Need to optimize cases when the vtype is used immediately as in
10917 argument passing, stloc etc.
10918 - Instead of the to_end stuff in the old JIT, simply call the function handling
10919 the values on the stack before emitting the last instruction of the bb.
10922 #endif /* DISABLE_JIT */