2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/loader.h>
34 #include <mono/metadata/tabledefs.h>
35 #include <mono/metadata/class.h>
36 #include <mono/metadata/object.h>
37 #include <mono/metadata/exception.h>
38 #include <mono/metadata/opcodes.h>
39 #include <mono/metadata/mono-endian.h>
40 #include <mono/metadata/tokentype.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/marshal.h>
43 #include <mono/metadata/debug-helpers.h>
44 #include <mono/metadata/mono-debug.h>
45 #include <mono/metadata/gc-internal.h>
46 #include <mono/metadata/security-manager.h>
47 #include <mono/metadata/threads-types.h>
48 #include <mono/metadata/security-core-clr.h>
49 #include <mono/metadata/monitor.h>
50 #include <mono/metadata/profiler-private.h>
51 #include <mono/metadata/profiler.h>
52 #include <mono/utils/mono-compiler.h>
59 #include "jit-icalls.h"
61 #include "debugger-agent.h"
63 #define BRANCH_COST 100
64 #define INLINE_LENGTH_LIMIT 20
65 #define INLINE_FAILURE do {\
66 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
69 #define CHECK_CFG_EXCEPTION do {\
70 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
73 #define METHOD_ACCESS_FAILURE do { \
74 char *method_fname = mono_method_full_name (method, TRUE); \
75 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
76 cfg->exception_type = MONO_EXCEPTION_METHOD_ACCESS; \
77 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
78 g_free (method_fname); \
79 g_free (cil_method_fname); \
80 goto exception_exit; \
82 #define FIELD_ACCESS_FAILURE do { \
83 char *method_fname = mono_method_full_name (method, TRUE); \
84 char *field_fname = mono_field_full_name (field); \
85 cfg->exception_type = MONO_EXCEPTION_FIELD_ACCESS; \
86 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
87 g_free (method_fname); \
88 g_free (field_fname); \
89 goto exception_exit; \
91 #define GENERIC_SHARING_FAILURE(opcode) do { \
92 if (cfg->generic_sharing_context) { \
93 if (cfg->verbose_level > 2) \
94 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
95 cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED; \
96 goto exception_exit; \
100 /* Determine whenever 'ins' represents a load of the 'this' argument */
101 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
103 static int ldind_to_load_membase (int opcode);
104 static int stind_to_store_membase (int opcode);
106 int mono_op_to_op_imm (int opcode);
107 int mono_op_to_op_imm_noemul (int opcode);
109 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
110 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
111 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
113 /* helper methods signature */
114 extern MonoMethodSignature *helper_sig_class_init_trampoline;
115 extern MonoMethodSignature *helper_sig_domain_get;
116 extern MonoMethodSignature *helper_sig_generic_class_init_trampoline;
117 extern MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
118 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
119 extern MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
122 * Instruction metadata
130 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
131 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
137 #if SIZEOF_REGISTER == 8
142 /* keep in sync with the enum in mini.h */
145 #include "mini-ops.h"
150 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
151 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
153 * This should contain the index of the last sreg + 1. This is not the same
154 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
156 const gint8 ins_sreg_counts[] = {
157 #include "mini-ops.h"
162 extern GHashTable *jit_icall_name_hash;
164 #define MONO_INIT_VARINFO(vi,id) do { \
165 (vi)->range.first_use.pos.bid = 0xffff; \
171 mono_inst_set_src_registers (MonoInst *ins, int *regs)
173 ins->sreg1 = regs [0];
174 ins->sreg2 = regs [1];
175 ins->sreg3 = regs [2];
179 mono_alloc_ireg (MonoCompile *cfg)
181 return alloc_ireg (cfg);
185 mono_alloc_freg (MonoCompile *cfg)
187 return alloc_freg (cfg);
191 mono_alloc_preg (MonoCompile *cfg)
193 return alloc_preg (cfg);
197 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
199 return alloc_dreg (cfg, stack_type);
203 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
209 switch (type->type) {
212 case MONO_TYPE_BOOLEAN:
224 case MONO_TYPE_FNPTR:
226 case MONO_TYPE_CLASS:
227 case MONO_TYPE_STRING:
228 case MONO_TYPE_OBJECT:
229 case MONO_TYPE_SZARRAY:
230 case MONO_TYPE_ARRAY:
234 #if SIZEOF_REGISTER == 8
243 case MONO_TYPE_VALUETYPE:
244 if (type->data.klass->enumtype) {
245 type = mono_class_enum_basetype (type->data.klass);
248 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
251 case MONO_TYPE_TYPEDBYREF:
253 case MONO_TYPE_GENERICINST:
254 type = &type->data.generic_class->container_class->byval_arg;
258 g_assert (cfg->generic_sharing_context);
261 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
267 mono_print_bb (MonoBasicBlock *bb, const char *msg)
272 printf ("\n%s %d: [IN: ", msg, bb->block_num);
273 for (i = 0; i < bb->in_count; ++i)
274 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
276 for (i = 0; i < bb->out_count; ++i)
277 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
279 for (tree = bb->code; tree; tree = tree->next)
280 mono_print_ins_index (-1, tree);
284 * Can't put this at the beginning, since other files reference stuff from this
289 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
291 #define GET_BBLOCK(cfg,tblock,ip) do { \
292 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
294 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
295 NEW_BBLOCK (cfg, (tblock)); \
296 (tblock)->cil_code = (ip); \
297 ADD_BBLOCK (cfg, (tblock)); \
301 #if defined(TARGET_X86) || defined(TARGET_AMD64)
302 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
303 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
304 (dest)->dreg = alloc_preg ((cfg)); \
305 (dest)->sreg1 = (sr1); \
306 (dest)->sreg2 = (sr2); \
307 (dest)->inst_imm = (imm); \
308 (dest)->backend.shift_amount = (shift); \
309 MONO_ADD_INS ((cfg)->cbb, (dest)); \
313 #if SIZEOF_REGISTER == 8
314 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
315 /* FIXME: Need to add many more cases */ \
316 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
318 int dr = alloc_preg (cfg); \
319 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
320 (ins)->sreg2 = widen->dreg; \
324 #define ADD_WIDEN_OP(ins, arg1, arg2)
327 #define ADD_BINOP(op) do { \
328 MONO_INST_NEW (cfg, ins, (op)); \
330 ins->sreg1 = sp [0]->dreg; \
331 ins->sreg2 = sp [1]->dreg; \
332 type_from_op (ins, sp [0], sp [1]); \
334 /* Have to insert a widening op */ \
335 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
336 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
337 MONO_ADD_INS ((cfg)->cbb, (ins)); \
338 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
341 #define ADD_UNOP(op) do { \
342 MONO_INST_NEW (cfg, ins, (op)); \
344 ins->sreg1 = sp [0]->dreg; \
345 type_from_op (ins, sp [0], NULL); \
347 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
348 MONO_ADD_INS ((cfg)->cbb, (ins)); \
349 *sp++ = mono_decompose_opcode (cfg, ins); \
352 #define ADD_BINCOND(next_block) do { \
355 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
356 cmp->sreg1 = sp [0]->dreg; \
357 cmp->sreg2 = sp [1]->dreg; \
358 type_from_op (cmp, sp [0], sp [1]); \
360 type_from_op (ins, sp [0], sp [1]); \
361 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
362 GET_BBLOCK (cfg, tblock, target); \
363 link_bblock (cfg, bblock, tblock); \
364 ins->inst_true_bb = tblock; \
365 if ((next_block)) { \
366 link_bblock (cfg, bblock, (next_block)); \
367 ins->inst_false_bb = (next_block); \
368 start_new_bblock = 1; \
370 GET_BBLOCK (cfg, tblock, ip); \
371 link_bblock (cfg, bblock, tblock); \
372 ins->inst_false_bb = tblock; \
373 start_new_bblock = 2; \
375 if (sp != stack_start) { \
376 handle_stack_args (cfg, stack_start, sp - stack_start); \
377 CHECK_UNVERIFIABLE (cfg); \
379 MONO_ADD_INS (bblock, cmp); \
380 MONO_ADD_INS (bblock, ins); \
384 * link_bblock: Links two basic blocks
386 * links two basic blocks in the control flow graph, the 'from'
387 * argument is the starting block and the 'to' argument is the block
388 * the control flow ends to after 'from'.
391 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
393 MonoBasicBlock **newa;
397 if (from->cil_code) {
399 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
401 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
404 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
406 printf ("edge from entry to exit\n");
411 for (i = 0; i < from->out_count; ++i) {
412 if (to == from->out_bb [i]) {
418 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
419 for (i = 0; i < from->out_count; ++i) {
420 newa [i] = from->out_bb [i];
428 for (i = 0; i < to->in_count; ++i) {
429 if (from == to->in_bb [i]) {
435 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
436 for (i = 0; i < to->in_count; ++i) {
437 newa [i] = to->in_bb [i];
446 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
448 link_bblock (cfg, from, to);
452 * mono_find_block_region:
454 * We mark each basic block with a region ID. We use that to avoid BB
455 * optimizations when blocks are in different regions.
458 * A region token that encodes where this region is, and information
459 * about the clause owner for this block.
461 * The region encodes the try/catch/filter clause that owns this block
462 * as well as the type. -1 is a special value that represents a block
463 * that is in none of try/catch/filter.
466 mono_find_block_region (MonoCompile *cfg, int offset)
468 MonoMethod *method = cfg->method;
469 MonoMethodHeader *header = mono_method_get_header (method);
470 MonoExceptionClause *clause;
473 for (i = 0; i < header->num_clauses; ++i) {
474 clause = &header->clauses [i];
475 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
476 (offset < (clause->handler_offset)))
477 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
479 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
480 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
481 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
482 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
483 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
485 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
488 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
489 return ((i + 1) << 8) | clause->flags;
496 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
498 MonoMethod *method = cfg->method;
499 MonoMethodHeader *header = mono_method_get_header (method);
500 MonoExceptionClause *clause;
501 MonoBasicBlock *handler;
505 for (i = 0; i < header->num_clauses; ++i) {
506 clause = &header->clauses [i];
507 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
508 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
509 if (clause->flags == type) {
510 handler = cfg->cil_offset_to_bb [clause->handler_offset];
512 res = g_list_append (res, handler);
520 mono_create_spvar_for_region (MonoCompile *cfg, int region)
524 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
528 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
529 /* prevent it from being register allocated */
530 var->flags |= MONO_INST_INDIRECT;
532 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
536 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
538 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
542 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
546 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
550 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
551 /* prevent it from being register allocated */
552 var->flags |= MONO_INST_INDIRECT;
554 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
560 * Returns the type used in the eval stack when @type is loaded.
561 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
564 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
568 inst->klass = klass = mono_class_from_mono_type (type);
570 inst->type = STACK_MP;
575 switch (type->type) {
577 inst->type = STACK_INV;
581 case MONO_TYPE_BOOLEAN:
587 inst->type = STACK_I4;
592 case MONO_TYPE_FNPTR:
593 inst->type = STACK_PTR;
595 case MONO_TYPE_CLASS:
596 case MONO_TYPE_STRING:
597 case MONO_TYPE_OBJECT:
598 case MONO_TYPE_SZARRAY:
599 case MONO_TYPE_ARRAY:
600 inst->type = STACK_OBJ;
604 inst->type = STACK_I8;
608 inst->type = STACK_R8;
610 case MONO_TYPE_VALUETYPE:
611 if (type->data.klass->enumtype) {
612 type = mono_class_enum_basetype (type->data.klass);
616 inst->type = STACK_VTYPE;
619 case MONO_TYPE_TYPEDBYREF:
620 inst->klass = mono_defaults.typed_reference_class;
621 inst->type = STACK_VTYPE;
623 case MONO_TYPE_GENERICINST:
624 type = &type->data.generic_class->container_class->byval_arg;
627 case MONO_TYPE_MVAR :
628 /* FIXME: all the arguments must be references for now,
629 * later look inside cfg and see if the arg num is
632 g_assert (cfg->generic_sharing_context);
633 inst->type = STACK_OBJ;
636 g_error ("unknown type 0x%02x in eval stack type", type->type);
641 * The following tables are used to quickly validate the IL code in type_from_op ().
644 bin_num_table [STACK_MAX] [STACK_MAX] = {
645 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
646 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
647 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
648 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
649 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
650 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
651 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
652 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
657 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
660 /* reduce the size of this table */
662 bin_int_table [STACK_MAX] [STACK_MAX] = {
663 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
664 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
665 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
666 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
667 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
668 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
669 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
670 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
674 bin_comp_table [STACK_MAX] [STACK_MAX] = {
675 /* Inv i L p F & O vt */
677 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
678 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
679 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
680 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
681 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
682 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
683 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
686 /* reduce the size of this table */
688 shift_table [STACK_MAX] [STACK_MAX] = {
689 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
690 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
691 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
692 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
693 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
694 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
695 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
696 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
700 * Tables to map from the non-specific opcode to the matching
701 * type-specific opcode.
703 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
705 binops_op_map [STACK_MAX] = {
706 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
709 /* handles from CEE_NEG to CEE_CONV_U8 */
711 unops_op_map [STACK_MAX] = {
712 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
715 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
717 ovfops_op_map [STACK_MAX] = {
718 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
721 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
723 ovf2ops_op_map [STACK_MAX] = {
724 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
727 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
729 ovf3ops_op_map [STACK_MAX] = {
730 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
733 /* handles from CEE_BEQ to CEE_BLT_UN */
735 beqops_op_map [STACK_MAX] = {
736 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
739 /* handles from CEE_CEQ to CEE_CLT_UN */
741 ceqops_op_map [STACK_MAX] = {
742 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
746 * Sets ins->type (the type on the eval stack) according to the
747 * type of the opcode and the arguments to it.
748 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
750 * FIXME: this function sets ins->type unconditionally in some cases, but
751 * it should set it to invalid for some types (a conv.x on an object)
754 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
756 switch (ins->opcode) {
763 /* FIXME: check unverifiable args for STACK_MP */
764 ins->type = bin_num_table [src1->type] [src2->type];
765 ins->opcode += binops_op_map [ins->type];
772 ins->type = bin_int_table [src1->type] [src2->type];
773 ins->opcode += binops_op_map [ins->type];
778 ins->type = shift_table [src1->type] [src2->type];
779 ins->opcode += binops_op_map [ins->type];
784 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
785 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
786 ins->opcode = OP_LCOMPARE;
787 else if (src1->type == STACK_R8)
788 ins->opcode = OP_FCOMPARE;
790 ins->opcode = OP_ICOMPARE;
792 case OP_ICOMPARE_IMM:
793 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
794 if ((src1->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
795 ins->opcode = OP_LCOMPARE_IMM;
807 ins->opcode += beqops_op_map [src1->type];
810 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
811 ins->opcode += ceqops_op_map [src1->type];
817 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
818 ins->opcode += ceqops_op_map [src1->type];
822 ins->type = neg_table [src1->type];
823 ins->opcode += unops_op_map [ins->type];
826 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
827 ins->type = src1->type;
829 ins->type = STACK_INV;
830 ins->opcode += unops_op_map [ins->type];
836 ins->type = STACK_I4;
837 ins->opcode += unops_op_map [src1->type];
840 ins->type = STACK_R8;
841 switch (src1->type) {
844 ins->opcode = OP_ICONV_TO_R_UN;
847 ins->opcode = OP_LCONV_TO_R_UN;
851 case CEE_CONV_OVF_I1:
852 case CEE_CONV_OVF_U1:
853 case CEE_CONV_OVF_I2:
854 case CEE_CONV_OVF_U2:
855 case CEE_CONV_OVF_I4:
856 case CEE_CONV_OVF_U4:
857 ins->type = STACK_I4;
858 ins->opcode += ovf3ops_op_map [src1->type];
860 case CEE_CONV_OVF_I_UN:
861 case CEE_CONV_OVF_U_UN:
862 ins->type = STACK_PTR;
863 ins->opcode += ovf2ops_op_map [src1->type];
865 case CEE_CONV_OVF_I1_UN:
866 case CEE_CONV_OVF_I2_UN:
867 case CEE_CONV_OVF_I4_UN:
868 case CEE_CONV_OVF_U1_UN:
869 case CEE_CONV_OVF_U2_UN:
870 case CEE_CONV_OVF_U4_UN:
871 ins->type = STACK_I4;
872 ins->opcode += ovf2ops_op_map [src1->type];
875 ins->type = STACK_PTR;
876 switch (src1->type) {
878 ins->opcode = OP_ICONV_TO_U;
882 #if SIZEOF_REGISTER == 8
883 ins->opcode = OP_LCONV_TO_U;
885 ins->opcode = OP_MOVE;
889 ins->opcode = OP_LCONV_TO_U;
892 ins->opcode = OP_FCONV_TO_U;
898 ins->type = STACK_I8;
899 ins->opcode += unops_op_map [src1->type];
901 case CEE_CONV_OVF_I8:
902 case CEE_CONV_OVF_U8:
903 ins->type = STACK_I8;
904 ins->opcode += ovf3ops_op_map [src1->type];
906 case CEE_CONV_OVF_U8_UN:
907 case CEE_CONV_OVF_I8_UN:
908 ins->type = STACK_I8;
909 ins->opcode += ovf2ops_op_map [src1->type];
913 ins->type = STACK_R8;
914 ins->opcode += unops_op_map [src1->type];
917 ins->type = STACK_R8;
921 ins->type = STACK_I4;
922 ins->opcode += ovfops_op_map [src1->type];
927 ins->type = STACK_PTR;
928 ins->opcode += ovfops_op_map [src1->type];
936 ins->type = bin_num_table [src1->type] [src2->type];
937 ins->opcode += ovfops_op_map [src1->type];
938 if (ins->type == STACK_R8)
939 ins->type = STACK_INV;
941 case OP_LOAD_MEMBASE:
942 ins->type = STACK_PTR;
944 case OP_LOADI1_MEMBASE:
945 case OP_LOADU1_MEMBASE:
946 case OP_LOADI2_MEMBASE:
947 case OP_LOADU2_MEMBASE:
948 case OP_LOADI4_MEMBASE:
949 case OP_LOADU4_MEMBASE:
950 ins->type = STACK_PTR;
952 case OP_LOADI8_MEMBASE:
953 ins->type = STACK_I8;
955 case OP_LOADR4_MEMBASE:
956 case OP_LOADR8_MEMBASE:
957 ins->type = STACK_R8;
960 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
964 if (ins->type == STACK_MP)
965 ins->klass = mono_defaults.object_class;
970 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
976 param_table [STACK_MAX] [STACK_MAX] = {
981 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
985 switch (args->type) {
995 for (i = 0; i < sig->param_count; ++i) {
996 switch (args [i].type) {
1000 if (!sig->params [i]->byref)
1004 if (sig->params [i]->byref)
1006 switch (sig->params [i]->type) {
1007 case MONO_TYPE_CLASS:
1008 case MONO_TYPE_STRING:
1009 case MONO_TYPE_OBJECT:
1010 case MONO_TYPE_SZARRAY:
1011 case MONO_TYPE_ARRAY:
1018 if (sig->params [i]->byref)
1020 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1029 /*if (!param_table [args [i].type] [sig->params [i]->type])
1037 * When we need a pointer to the current domain many times in a method, we
1038 * call mono_domain_get() once and we store the result in a local variable.
1039 * This function returns the variable that represents the MonoDomain*.
1041 inline static MonoInst *
1042 mono_get_domainvar (MonoCompile *cfg)
1044 if (!cfg->domainvar)
1045 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1046 return cfg->domainvar;
1050 * The got_var contains the address of the Global Offset Table when AOT
1054 mono_get_got_var (MonoCompile *cfg)
1056 #ifdef MONO_ARCH_NEED_GOT_VAR
1057 if (!cfg->compile_aot)
1059 if (!cfg->got_var) {
1060 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1062 return cfg->got_var;
1069 mono_get_vtable_var (MonoCompile *cfg)
1071 g_assert (cfg->generic_sharing_context);
1073 if (!cfg->rgctx_var) {
1074 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1075 /* force the var to be stack allocated */
1076 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1079 return cfg->rgctx_var;
1083 type_from_stack_type (MonoInst *ins) {
1084 switch (ins->type) {
1085 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1086 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1087 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1088 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1090 return &ins->klass->this_arg;
1091 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1092 case STACK_VTYPE: return &ins->klass->byval_arg;
1094 g_error ("stack type %d to monotype not handled\n", ins->type);
1099 static G_GNUC_UNUSED int
1100 type_to_stack_type (MonoType *t)
1102 t = mono_type_get_underlying_type (t);
1106 case MONO_TYPE_BOOLEAN:
1109 case MONO_TYPE_CHAR:
1116 case MONO_TYPE_FNPTR:
1118 case MONO_TYPE_CLASS:
1119 case MONO_TYPE_STRING:
1120 case MONO_TYPE_OBJECT:
1121 case MONO_TYPE_SZARRAY:
1122 case MONO_TYPE_ARRAY:
1130 case MONO_TYPE_VALUETYPE:
1131 case MONO_TYPE_TYPEDBYREF:
1133 case MONO_TYPE_GENERICINST:
1134 if (mono_type_generic_inst_is_valuetype (t))
1140 g_assert_not_reached ();
1147 array_access_to_klass (int opcode)
1151 return mono_defaults.byte_class;
1153 return mono_defaults.uint16_class;
1156 return mono_defaults.int_class;
1159 return mono_defaults.sbyte_class;
1162 return mono_defaults.int16_class;
1165 return mono_defaults.int32_class;
1167 return mono_defaults.uint32_class;
1170 return mono_defaults.int64_class;
1173 return mono_defaults.single_class;
1176 return mono_defaults.double_class;
1177 case CEE_LDELEM_REF:
1178 case CEE_STELEM_REF:
1179 return mono_defaults.object_class;
1181 g_assert_not_reached ();
1187 * We try to share variables when possible
1190 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1195 /* inlining can result in deeper stacks */
1196 if (slot >= mono_method_get_header (cfg->method)->max_stack)
1197 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1199 pos = ins->type - 1 + slot * STACK_MAX;
1201 switch (ins->type) {
1208 if ((vnum = cfg->intvars [pos]))
1209 return cfg->varinfo [vnum];
1210 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1211 cfg->intvars [pos] = res->inst_c0;
1214 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1220 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1223 * Don't use this if a generic_context is set, since that means AOT can't
1224 * look up the method using just the image+token.
1225 * table == 0 means this is a reference made from a wrapper.
1227 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1228 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1229 jump_info_token->image = image;
1230 jump_info_token->token = token;
1231 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1236 * This function is called to handle items that are left on the evaluation stack
1237 * at basic block boundaries. What happens is that we save the values to local variables
1238 * and we reload them later when first entering the target basic block (with the
1239 * handle_loaded_temps () function).
1240 * A single joint point will use the same variables (stored in the array bb->out_stack or
1241 * bb->in_stack, if the basic block is before or after the joint point).
1243 * This function needs to be called _before_ emitting the last instruction of
1244 * the bb (i.e. before emitting a branch).
1245 * If the stack merge fails at a join point, cfg->unverifiable is set.
1248 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1251 MonoBasicBlock *bb = cfg->cbb;
1252 MonoBasicBlock *outb;
1253 MonoInst *inst, **locals;
1258 if (cfg->verbose_level > 3)
1259 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1260 if (!bb->out_scount) {
1261 bb->out_scount = count;
1262 //printf ("bblock %d has out:", bb->block_num);
1264 for (i = 0; i < bb->out_count; ++i) {
1265 outb = bb->out_bb [i];
1266 /* exception handlers are linked, but they should not be considered for stack args */
1267 if (outb->flags & BB_EXCEPTION_HANDLER)
1269 //printf (" %d", outb->block_num);
1270 if (outb->in_stack) {
1272 bb->out_stack = outb->in_stack;
1278 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1279 for (i = 0; i < count; ++i) {
1281 * try to reuse temps already allocated for this purpouse, if they occupy the same
1282 * stack slot and if they are of the same type.
1283 * This won't cause conflicts since if 'local' is used to
1284 * store one of the values in the in_stack of a bblock, then
1285 * the same variable will be used for the same outgoing stack
1287 * This doesn't work when inlining methods, since the bblocks
1288 * in the inlined methods do not inherit their in_stack from
1289 * the bblock they are inlined to. See bug #58863 for an
1292 if (cfg->inlined_method)
1293 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1295 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1300 for (i = 0; i < bb->out_count; ++i) {
1301 outb = bb->out_bb [i];
1302 /* exception handlers are linked, but they should not be considered for stack args */
1303 if (outb->flags & BB_EXCEPTION_HANDLER)
1305 if (outb->in_scount) {
1306 if (outb->in_scount != bb->out_scount) {
1307 cfg->unverifiable = TRUE;
1310 continue; /* check they are the same locals */
1312 outb->in_scount = count;
1313 outb->in_stack = bb->out_stack;
1316 locals = bb->out_stack;
1318 for (i = 0; i < count; ++i) {
1319 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1320 inst->cil_code = sp [i]->cil_code;
1321 sp [i] = locals [i];
1322 if (cfg->verbose_level > 3)
1323 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1327 * It is possible that the out bblocks already have in_stack assigned, and
1328 * the in_stacks differ. In this case, we will store to all the different
1335 /* Find a bblock which has a different in_stack */
1337 while (bindex < bb->out_count) {
1338 outb = bb->out_bb [bindex];
1339 /* exception handlers are linked, but they should not be considered for stack args */
1340 if (outb->flags & BB_EXCEPTION_HANDLER) {
1344 if (outb->in_stack != locals) {
1345 for (i = 0; i < count; ++i) {
1346 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1347 inst->cil_code = sp [i]->cil_code;
1348 sp [i] = locals [i];
1349 if (cfg->verbose_level > 3)
1350 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1352 locals = outb->in_stack;
1361 /* Emit code which loads interface_offsets [klass->interface_id]
1362 * The array is stored in memory before vtable.
1365 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1367 if (cfg->compile_aot) {
1368 int ioffset_reg = alloc_preg (cfg);
1369 int iid_reg = alloc_preg (cfg);
1371 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1372 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1373 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1381 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1382 * stored in "klass_reg" implements the interface "klass".
1385 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1387 int ibitmap_reg = alloc_preg (cfg);
1388 int ibitmap_byte_reg = alloc_preg (cfg);
1390 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap));
1392 if (cfg->compile_aot) {
1393 int iid_reg = alloc_preg (cfg);
1394 int shifted_iid_reg = alloc_preg (cfg);
1395 int ibitmap_byte_address_reg = alloc_preg (cfg);
1396 int masked_iid_reg = alloc_preg (cfg);
1397 int iid_one_bit_reg = alloc_preg (cfg);
1398 int iid_bit_reg = alloc_preg (cfg);
1399 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1400 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1401 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1402 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1403 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1404 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1405 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1406 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1408 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1409 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1414 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1415 * stored in "vtable_reg" implements the interface "klass".
1418 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1420 int ibitmap_reg = alloc_preg (cfg);
1421 int ibitmap_byte_reg = alloc_preg (cfg);
1423 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap));
1425 if (cfg->compile_aot) {
1426 int iid_reg = alloc_preg (cfg);
1427 int shifted_iid_reg = alloc_preg (cfg);
1428 int ibitmap_byte_address_reg = alloc_preg (cfg);
1429 int masked_iid_reg = alloc_preg (cfg);
1430 int iid_one_bit_reg = alloc_preg (cfg);
1431 int iid_bit_reg = alloc_preg (cfg);
1432 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1433 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, shifted_iid_reg, iid_reg, 3);
1434 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1435 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, masked_iid_reg, iid_reg, 7);
1437 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1438 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1439 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1441 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1442 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1447 * Emit code which checks whenever the interface id of @klass is smaller than
1448 * than the value given by max_iid_reg.
1451 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1452 MonoBasicBlock *false_target)
1454 if (cfg->compile_aot) {
1455 int iid_reg = alloc_preg (cfg);
1456 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1457 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1462 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1464 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1467 /* Same as above, but obtains max_iid from a vtable */
1469 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1470 MonoBasicBlock *false_target)
1472 int max_iid_reg = alloc_preg (cfg);
1474 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1475 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1478 /* Same as above, but obtains max_iid from a klass */
1480 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1481 MonoBasicBlock *false_target)
1483 int max_iid_reg = alloc_preg (cfg);
1485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1486 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1490 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1492 int idepth_reg = alloc_preg (cfg);
1493 int stypes_reg = alloc_preg (cfg);
1494 int stype = alloc_preg (cfg);
1496 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1497 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1498 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1499 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1501 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1502 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1503 if (cfg->compile_aot) {
1504 int const_reg = alloc_preg (cfg);
1505 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1506 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1514 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1516 int intf_reg = alloc_preg (cfg);
1518 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1519 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1522 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1524 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1528 * Variant of the above that takes a register to the class, not the vtable.
1531 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1533 int intf_bit_reg = alloc_preg (cfg);
1535 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1536 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1539 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1541 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1545 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1547 if (cfg->compile_aot) {
1548 int const_reg = alloc_preg (cfg);
1549 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1550 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1554 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1558 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1560 if (cfg->compile_aot) {
1561 int const_reg = alloc_preg (cfg);
1562 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1563 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1565 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1567 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1571 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1574 int rank_reg = alloc_preg (cfg);
1575 int eclass_reg = alloc_preg (cfg);
1577 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1578 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1579 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1580 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1582 if (klass->cast_class == mono_defaults.object_class) {
1583 int parent_reg = alloc_preg (cfg);
1584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1585 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1586 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1587 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1588 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1589 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1590 } else if (klass->cast_class == mono_defaults.enum_class) {
1591 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1592 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1593 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1595 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1596 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1599 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1600 /* Check that the object is a vector too */
1601 int bounds_reg = alloc_preg (cfg);
1602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1604 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1607 int idepth_reg = alloc_preg (cfg);
1608 int stypes_reg = alloc_preg (cfg);
1609 int stype = alloc_preg (cfg);
1611 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1612 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1613 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1614 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1616 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1617 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1618 mini_emit_class_check (cfg, stype, klass);
1623 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1627 g_assert (val == 0);
1632 if ((size <= 4) && (size <= align)) {
1635 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1638 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1641 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1643 #if SIZEOF_REGISTER == 8
1645 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1651 val_reg = alloc_preg (cfg);
1653 if (SIZEOF_REGISTER == 8)
1654 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1656 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1659 /* This could be optimized further if neccesary */
1661 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1668 #if !NO_UNALIGNED_ACCESS
1669 if (SIZEOF_REGISTER == 8) {
1671 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1676 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1684 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1689 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1694 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1700 #endif /* DISABLE_JIT */
1703 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1710 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1711 g_assert (size < 10000);
1714 /* This could be optimized further if neccesary */
1716 cur_reg = alloc_preg (cfg);
1717 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1718 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1725 #if !NO_UNALIGNED_ACCESS
1726 if (SIZEOF_REGISTER == 8) {
1728 cur_reg = alloc_preg (cfg);
1729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1730 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1739 cur_reg = alloc_preg (cfg);
1740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1741 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1747 cur_reg = alloc_preg (cfg);
1748 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1749 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1755 cur_reg = alloc_preg (cfg);
1756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1767 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1770 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1773 type = mini_get_basic_type_from_generic (gsctx, type);
1774 switch (type->type) {
1775 case MONO_TYPE_VOID:
1776 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1779 case MONO_TYPE_BOOLEAN:
1782 case MONO_TYPE_CHAR:
1785 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1789 case MONO_TYPE_FNPTR:
1790 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1791 case MONO_TYPE_CLASS:
1792 case MONO_TYPE_STRING:
1793 case MONO_TYPE_OBJECT:
1794 case MONO_TYPE_SZARRAY:
1795 case MONO_TYPE_ARRAY:
1796 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1799 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1802 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1803 case MONO_TYPE_VALUETYPE:
1804 if (type->data.klass->enumtype) {
1805 type = mono_class_enum_basetype (type->data.klass);
1808 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1809 case MONO_TYPE_TYPEDBYREF:
1810 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1811 case MONO_TYPE_GENERICINST:
1812 type = &type->data.generic_class->container_class->byval_arg;
1815 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1821 * target_type_is_incompatible:
1822 * @cfg: MonoCompile context
1824 * Check that the item @arg on the evaluation stack can be stored
1825 * in the target type (can be a local, or field, etc).
1826 * The cfg arg can be used to check if we need verification or just
1829 * Returns: non-0 value if arg can't be stored on a target.
1832 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1834 MonoType *simple_type;
1837 if (target->byref) {
1838 /* FIXME: check that the pointed to types match */
1839 if (arg->type == STACK_MP)
1840 return arg->klass != mono_class_from_mono_type (target);
1841 if (arg->type == STACK_PTR)
1846 simple_type = mono_type_get_underlying_type (target);
1847 switch (simple_type->type) {
1848 case MONO_TYPE_VOID:
1852 case MONO_TYPE_BOOLEAN:
1855 case MONO_TYPE_CHAR:
1858 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1862 /* STACK_MP is needed when setting pinned locals */
1863 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1868 case MONO_TYPE_FNPTR:
1869 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1872 case MONO_TYPE_CLASS:
1873 case MONO_TYPE_STRING:
1874 case MONO_TYPE_OBJECT:
1875 case MONO_TYPE_SZARRAY:
1876 case MONO_TYPE_ARRAY:
1877 if (arg->type != STACK_OBJ)
1879 /* FIXME: check type compatibility */
1883 if (arg->type != STACK_I8)
1888 if (arg->type != STACK_R8)
1891 case MONO_TYPE_VALUETYPE:
1892 if (arg->type != STACK_VTYPE)
1894 klass = mono_class_from_mono_type (simple_type);
1895 if (klass != arg->klass)
1898 case MONO_TYPE_TYPEDBYREF:
1899 if (arg->type != STACK_VTYPE)
1901 klass = mono_class_from_mono_type (simple_type);
1902 if (klass != arg->klass)
1905 case MONO_TYPE_GENERICINST:
1906 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1907 if (arg->type != STACK_VTYPE)
1909 klass = mono_class_from_mono_type (simple_type);
1910 if (klass != arg->klass)
1914 if (arg->type != STACK_OBJ)
1916 /* FIXME: check type compatibility */
1920 case MONO_TYPE_MVAR:
1921 /* FIXME: all the arguments must be references for now,
1922 * later look inside cfg and see if the arg num is
1923 * really a reference
1925 g_assert (cfg->generic_sharing_context);
1926 if (arg->type != STACK_OBJ)
1930 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
1936 * Prepare arguments for passing to a function call.
1937 * Return a non-zero value if the arguments can't be passed to the given
1939 * The type checks are not yet complete and some conversions may need
1940 * casts on 32 or 64 bit architectures.
1942 * FIXME: implement this using target_type_is_incompatible ()
1945 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
1947 MonoType *simple_type;
1951 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
1955 for (i = 0; i < sig->param_count; ++i) {
1956 if (sig->params [i]->byref) {
1957 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
1961 simple_type = sig->params [i];
1962 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
1964 switch (simple_type->type) {
1965 case MONO_TYPE_VOID:
1970 case MONO_TYPE_BOOLEAN:
1973 case MONO_TYPE_CHAR:
1976 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
1982 case MONO_TYPE_FNPTR:
1983 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
1986 case MONO_TYPE_CLASS:
1987 case MONO_TYPE_STRING:
1988 case MONO_TYPE_OBJECT:
1989 case MONO_TYPE_SZARRAY:
1990 case MONO_TYPE_ARRAY:
1991 if (args [i]->type != STACK_OBJ)
1996 if (args [i]->type != STACK_I8)
2001 if (args [i]->type != STACK_R8)
2004 case MONO_TYPE_VALUETYPE:
2005 if (simple_type->data.klass->enumtype) {
2006 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2009 if (args [i]->type != STACK_VTYPE)
2012 case MONO_TYPE_TYPEDBYREF:
2013 if (args [i]->type != STACK_VTYPE)
2016 case MONO_TYPE_GENERICINST:
2017 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2021 g_error ("unknown type 0x%02x in check_call_signature",
2029 callvirt_to_call (int opcode)
2034 case OP_VOIDCALLVIRT:
2043 g_assert_not_reached ();
2050 callvirt_to_call_membase (int opcode)
2054 return OP_CALL_MEMBASE;
2055 case OP_VOIDCALLVIRT:
2056 return OP_VOIDCALL_MEMBASE;
2058 return OP_FCALL_MEMBASE;
2060 return OP_LCALL_MEMBASE;
2062 return OP_VCALL_MEMBASE;
2064 g_assert_not_reached ();
2070 #ifdef MONO_ARCH_HAVE_IMT
2072 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2074 #ifdef MONO_ARCH_IMT_REG
2075 int method_reg = alloc_preg (cfg);
2078 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2079 } else if (cfg->compile_aot) {
2080 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2083 MONO_INST_NEW (cfg, ins, OP_PCONST);
2084 ins->inst_p0 = call->method;
2085 ins->dreg = method_reg;
2086 MONO_ADD_INS (cfg->cbb, ins);
2089 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2091 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2096 static MonoJumpInfo *
2097 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2099 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2103 ji->data.target = target;
2108 inline static MonoInst*
2109 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args);
2111 inline static MonoCallInst *
2112 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2113 MonoInst **args, int calli, int virtual, int tail)
2116 #ifdef MONO_ARCH_SOFT_FLOAT
2121 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2123 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2126 call->signature = sig;
2128 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2131 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2132 call->vret_var = cfg->vret_addr;
2133 //g_assert_not_reached ();
2135 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2136 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2139 temp->backend.is_pinvoke = sig->pinvoke;
2142 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2143 * address of return value to increase optimization opportunities.
2144 * Before vtype decomposition, the dreg of the call ins itself represents the
2145 * fact the call modifies the return value. After decomposition, the call will
2146 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2147 * will be transformed into an LDADDR.
2149 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2150 loada->dreg = alloc_preg (cfg);
2151 loada->inst_p0 = temp;
2152 /* We reference the call too since call->dreg could change during optimization */
2153 loada->inst_p1 = call;
2154 MONO_ADD_INS (cfg->cbb, loada);
2156 call->inst.dreg = temp->dreg;
2158 call->vret_var = loada;
2159 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2160 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2162 #ifdef MONO_ARCH_SOFT_FLOAT
2164 * If the call has a float argument, we would need to do an r8->r4 conversion using
2165 * an icall, but that cannot be done during the call sequence since it would clobber
2166 * the call registers + the stack. So we do it before emitting the call.
2168 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2170 MonoInst *in = call->args [i];
2172 if (i >= sig->hasthis)
2173 t = sig->params [i - sig->hasthis];
2175 t = &mono_defaults.int_class->byval_arg;
2176 t = mono_type_get_underlying_type (t);
2178 if (!t->byref && t->type == MONO_TYPE_R4) {
2179 MonoInst *iargs [1];
2183 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2185 /* The result will be in an int vreg */
2186 call->args [i] = conv;
2192 if (COMPILE_LLVM (cfg))
2193 mono_llvm_emit_call (cfg, call);
2195 mono_arch_emit_call (cfg, call);
2197 mono_arch_emit_call (cfg, call);
2200 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2201 cfg->flags |= MONO_CFG_HAS_CALLS;
2206 inline static MonoInst*
2207 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr)
2209 MonoCallInst *call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE);
2211 call->inst.sreg1 = addr->dreg;
2213 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2215 return (MonoInst*)call;
2218 inline static MonoInst*
2219 mono_emit_rgctx_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2221 #ifdef MONO_ARCH_RGCTX_REG
2226 rgctx_reg = mono_alloc_preg (cfg);
2227 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2229 call = (MonoCallInst*)mono_emit_calli (cfg, sig, args, addr);
2231 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2232 cfg->uses_rgctx_reg = TRUE;
2233 call->rgctx_reg = TRUE;
2235 return (MonoInst*)call;
2237 g_assert_not_reached ();
2243 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2245 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2248 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2249 MonoInst **args, MonoInst *this, MonoInst *imt_arg)
2251 gboolean might_be_remote;
2252 gboolean virtual = this != NULL;
2253 gboolean enable_for_aot = TRUE;
2257 if (method->string_ctor) {
2258 /* Create the real signature */
2259 /* FIXME: Cache these */
2260 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2261 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2266 might_be_remote = this && sig->hasthis &&
2267 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2268 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && !MONO_CHECK_THIS (this);
2270 context_used = mono_method_check_context_used (method);
2271 if (might_be_remote && context_used) {
2274 g_assert (cfg->generic_sharing_context);
2276 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2278 return mono_emit_calli (cfg, sig, args, addr);
2281 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE);
2283 if (might_be_remote)
2284 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2286 call->method = method;
2287 call->inst.flags |= MONO_INST_HAS_METHOD;
2288 call->inst.inst_left = this;
2291 int vtable_reg, slot_reg, this_reg;
2293 this_reg = this->dreg;
2295 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2297 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2298 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2299 /* Make a call to delegate->invoke_impl */
2300 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2301 call->inst.inst_basereg = this_reg;
2302 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2303 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2305 return (MonoInst*)call;
2309 if ((!cfg->compile_aot || enable_for_aot) &&
2310 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2311 (MONO_METHOD_IS_FINAL (method) &&
2312 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2313 !(method->klass->marshalbyref && context_used)) {
2315 * the method is not virtual, we just need to ensure this is not null
2316 * and then we can call the method directly.
2318 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2320 * The check above ensures method is not gshared, this is needed since
2321 * gshared methods can't have wrappers.
2323 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2326 if (!method->string_ctor)
2327 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2329 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2331 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2333 return (MonoInst*)call;
2336 if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2338 * the method is virtual, but we can statically dispatch since either
2339 * it's class or the method itself are sealed.
2340 * But first we need to ensure it's not a null reference.
2342 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2344 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2345 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2347 return (MonoInst*)call;
2350 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2352 vtable_reg = alloc_preg (cfg);
2353 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2354 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2356 #ifdef MONO_ARCH_HAVE_IMT
2358 guint32 imt_slot = mono_method_get_imt_slot (method);
2359 emit_imt_argument (cfg, call, imt_arg);
2360 slot_reg = vtable_reg;
2361 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2364 if (slot_reg == -1) {
2365 slot_reg = alloc_preg (cfg);
2366 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2367 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2370 slot_reg = vtable_reg;
2371 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2372 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2373 #ifdef MONO_ARCH_HAVE_IMT
2375 g_assert (mono_method_signature (method)->generic_param_count);
2376 emit_imt_argument (cfg, call, imt_arg);
2381 call->inst.sreg1 = slot_reg;
2382 call->virtual = TRUE;
2385 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2387 return (MonoInst*)call;
2391 mono_emit_rgctx_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2392 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *vtable_arg)
2394 #ifdef MONO_ARCH_RGCTX_REG
2401 #ifdef MONO_ARCH_RGCTX_REG
2402 rgctx_reg = mono_alloc_preg (cfg);
2403 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
2408 ins = mono_emit_method_call_full (cfg, method, sig, args, this, imt_arg);
2410 call = (MonoCallInst*)ins;
2412 #ifdef MONO_ARCH_RGCTX_REG
2413 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2414 cfg->uses_rgctx_reg = TRUE;
2415 call->rgctx_reg = TRUE;
2424 static inline MonoInst*
2425 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2427 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL);
2431 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2438 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE);
2441 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2443 return (MonoInst*)call;
2446 inline static MonoInst*
2447 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2449 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2453 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2457 * mono_emit_abs_call:
2459 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2461 inline static MonoInst*
2462 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2463 MonoMethodSignature *sig, MonoInst **args)
2465 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2469 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2472 if (cfg->abs_patches == NULL)
2473 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2474 g_hash_table_insert (cfg->abs_patches, ji, ji);
2475 ins = mono_emit_native_call (cfg, ji, sig, args);
2476 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2481 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2483 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2484 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2488 * Native code might return non register sized integers
2489 * without initializing the upper bits.
2491 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2492 case OP_LOADI1_MEMBASE:
2493 widen_op = OP_ICONV_TO_I1;
2495 case OP_LOADU1_MEMBASE:
2496 widen_op = OP_ICONV_TO_U1;
2498 case OP_LOADI2_MEMBASE:
2499 widen_op = OP_ICONV_TO_I2;
2501 case OP_LOADU2_MEMBASE:
2502 widen_op = OP_ICONV_TO_U2;
2508 if (widen_op != -1) {
2509 int dreg = alloc_preg (cfg);
2512 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2513 widen->type = ins->type;
2523 get_memcpy_method (void)
2525 static MonoMethod *memcpy_method = NULL;
2526 if (!memcpy_method) {
2527 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2529 g_error ("Old corlib found. Install a new one");
2531 return memcpy_method;
2535 * Emit code to copy a valuetype of type @klass whose address is stored in
2536 * @src->dreg to memory whose address is stored at @dest->dreg.
2539 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2541 MonoInst *iargs [3];
2544 MonoMethod *memcpy_method;
2548 * This check breaks with spilled vars... need to handle it during verification anyway.
2549 * g_assert (klass && klass == src->klass && klass == dest->klass);
2553 n = mono_class_native_size (klass, &align);
2555 n = mono_class_value_size (klass, &align);
2557 #if HAVE_WRITE_BARRIERS
2558 /* if native is true there should be no references in the struct */
2559 if (klass->has_references && !native) {
2560 /* Avoid barriers when storing to the stack */
2561 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2562 (dest->opcode == OP_LDADDR))) {
2563 int context_used = 0;
2568 if (cfg->generic_sharing_context)
2569 context_used = mono_class_check_context_used (klass);
2571 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2573 if (cfg->compile_aot) {
2574 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2576 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2577 mono_class_compute_gc_descriptor (klass);
2581 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2586 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2587 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2588 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2592 EMIT_NEW_ICONST (cfg, iargs [2], n);
2594 memcpy_method = get_memcpy_method ();
2595 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2600 get_memset_method (void)
2602 static MonoMethod *memset_method = NULL;
2603 if (!memset_method) {
2604 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2606 g_error ("Old corlib found. Install a new one");
2608 return memset_method;
2612 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2614 MonoInst *iargs [3];
2617 MonoMethod *memset_method;
2619 /* FIXME: Optimize this for the case when dest is an LDADDR */
2621 mono_class_init (klass);
2622 n = mono_class_value_size (klass, &align);
2624 if (n <= sizeof (gpointer) * 5) {
2625 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2628 memset_method = get_memset_method ();
2630 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2631 EMIT_NEW_ICONST (cfg, iargs [2], n);
2632 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2637 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2639 MonoInst *this = NULL;
2641 g_assert (cfg->generic_sharing_context);
2643 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2644 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2645 !method->klass->valuetype)
2646 EMIT_NEW_ARGLOAD (cfg, this, 0);
2648 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2649 MonoInst *mrgctx_loc, *mrgctx_var;
2652 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2654 mrgctx_loc = mono_get_vtable_var (cfg);
2655 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2658 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2659 MonoInst *vtable_loc, *vtable_var;
2663 vtable_loc = mono_get_vtable_var (cfg);
2664 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2666 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2667 MonoInst *mrgctx_var = vtable_var;
2670 vtable_reg = alloc_preg (cfg);
2671 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2672 vtable_var->type = STACK_PTR;
2678 int vtable_reg, res_reg;
2680 vtable_reg = alloc_preg (cfg);
2681 res_reg = alloc_preg (cfg);
2682 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2687 static MonoJumpInfoRgctxEntry *
2688 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2690 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2691 res->method = method;
2692 res->in_mrgctx = in_mrgctx;
2693 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2694 res->data->type = patch_type;
2695 res->data->data.target = patch_data;
2696 res->info_type = info_type;
2701 static inline MonoInst*
2702 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2704 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2708 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2709 MonoClass *klass, int rgctx_type)
2711 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2712 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2714 return emit_rgctx_fetch (cfg, rgctx, entry);
2718 * emit_get_rgctx_method:
2720 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2721 * normal constants, else emit a load from the rgctx.
2724 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2725 MonoMethod *cmethod, int rgctx_type)
2727 if (!context_used) {
2730 switch (rgctx_type) {
2731 case MONO_RGCTX_INFO_METHOD:
2732 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
2734 case MONO_RGCTX_INFO_METHOD_RGCTX:
2735 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
2738 g_assert_not_reached ();
2741 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
2742 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2744 return emit_rgctx_fetch (cfg, rgctx, entry);
2749 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
2750 MonoClassField *field, int rgctx_type)
2752 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
2753 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2755 return emit_rgctx_fetch (cfg, rgctx, entry);
2759 * On return the caller must check @klass for load errors.
2762 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
2764 MonoInst *vtable_arg;
2766 int context_used = 0;
2768 if (cfg->generic_sharing_context)
2769 context_used = mono_class_check_context_used (klass);
2772 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
2773 klass, MONO_RGCTX_INFO_VTABLE);
2775 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2779 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
2782 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
2783 #ifdef MONO_ARCH_VTABLE_REG
2784 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
2785 cfg->uses_vtable_reg = TRUE;
2792 * On return the caller must check @array_class for load errors
2795 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
2797 int vtable_reg = alloc_preg (cfg);
2798 int context_used = 0;
2800 if (cfg->generic_sharing_context)
2801 context_used = mono_class_check_context_used (array_class);
2803 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2805 if (cfg->opt & MONO_OPT_SHARED) {
2806 int class_reg = alloc_preg (cfg);
2807 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2808 if (cfg->compile_aot) {
2809 int klass_reg = alloc_preg (cfg);
2810 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
2811 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
2813 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
2815 } else if (context_used) {
2816 MonoInst *vtable_ins;
2818 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
2819 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
2821 if (cfg->compile_aot) {
2825 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2827 vt_reg = alloc_preg (cfg);
2828 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
2829 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
2832 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
2834 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
2838 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
2842 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
2844 if (mini_get_debug_options ()->better_cast_details) {
2845 int to_klass_reg = alloc_preg (cfg);
2846 int vtable_reg = alloc_preg (cfg);
2847 int klass_reg = alloc_preg (cfg);
2848 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2851 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
2855 MONO_ADD_INS (cfg->cbb, tls_get);
2856 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2859 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
2860 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
2861 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
2866 reset_cast_details (MonoCompile *cfg)
2868 /* Reset the variables holding the cast details */
2869 if (mini_get_debug_options ()->better_cast_details) {
2870 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
2872 MONO_ADD_INS (cfg->cbb, tls_get);
2873 /* It is enough to reset the from field */
2874 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
2879 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
2880 * generic code is generated.
2883 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
2885 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
2888 MonoInst *rgctx, *addr;
2890 /* FIXME: What if the class is shared? We might not
2891 have to get the address of the method from the
2893 addr = emit_get_rgctx_method (cfg, context_used, method,
2894 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
2896 rgctx = emit_get_rgctx (cfg, method, context_used);
2898 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
2900 return mono_emit_method_call (cfg, method, &val, NULL);
2905 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
2909 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
2910 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
2911 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
2912 int rank_reg = alloc_dreg (cfg ,STACK_I4);
2914 obj_reg = sp [0]->dreg;
2915 MONO_EMIT_NULL_CHECK (cfg, obj_reg);
2916 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2917 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
2919 /* FIXME: generics */
2920 g_assert (klass->rank == 0);
2923 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
2924 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2926 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
2927 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
2930 MonoInst *element_class;
2932 /* This assertion is from the unboxcast insn */
2933 g_assert (klass->rank == 0);
2935 element_class = emit_get_rgctx_klass (cfg, context_used,
2936 klass->element_class, MONO_RGCTX_INFO_KLASS);
2938 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
2939 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
2941 save_cast_details (cfg, klass->element_class, obj_reg);
2942 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
2943 reset_cast_details (cfg);
2946 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_PTR), obj_reg, sizeof (MonoObject));
2947 MONO_ADD_INS (cfg->cbb, add);
2948 add->type = STACK_MP;
2955 * Returns NULL and set the cfg exception on error.
2958 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box)
2960 MonoInst *iargs [2];
2963 if (cfg->opt & MONO_OPT_SHARED) {
2964 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
2965 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
2967 alloc_ftn = mono_object_new;
2968 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
2969 /* This happens often in argument checking code, eg. throw new FooException... */
2970 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
2971 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
2972 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
2974 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
2975 MonoMethod *managed_alloc = NULL;
2979 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
2980 cfg->exception_ptr = klass;
2984 #ifndef MONO_CROSS_COMPILE
2985 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
2988 if (managed_alloc) {
2989 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
2990 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
2992 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
2994 guint32 lw = vtable->klass->instance_size;
2995 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
2996 EMIT_NEW_ICONST (cfg, iargs [0], lw);
2997 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3000 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3004 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3008 handle_alloc_from_inst (MonoCompile *cfg, MonoClass *klass, MonoInst *data_inst,
3011 MonoInst *iargs [2];
3012 MonoMethod *managed_alloc = NULL;
3016 FIXME: we cannot get managed_alloc here because we can't get
3017 the class's vtable (because it's not a closed class)
3019 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3020 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3023 if (cfg->opt & MONO_OPT_SHARED) {
3024 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3025 iargs [1] = data_inst;
3026 alloc_ftn = mono_object_new;
3028 if (managed_alloc) {
3029 iargs [0] = data_inst;
3030 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3033 iargs [0] = data_inst;
3034 alloc_ftn = mono_object_new_specific;
3037 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3041 * Returns NULL and set the cfg exception on error.
3044 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass)
3046 MonoInst *alloc, *ins;
3048 if (mono_class_is_nullable (klass)) {
3049 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3050 return mono_emit_method_call (cfg, method, &val, NULL);
3053 alloc = handle_alloc (cfg, klass, TRUE);
3057 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3063 handle_box_from_inst (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoInst *data_inst)
3065 MonoInst *alloc, *ins;
3067 if (mono_class_is_nullable (klass)) {
3068 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3069 /* FIXME: What if the class is shared? We might not
3070 have to get the method address from the RGCTX. */
3071 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3072 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3073 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3075 return mono_emit_rgctx_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3077 alloc = handle_alloc_from_inst (cfg, klass, data_inst, TRUE);
3079 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3086 * Returns NULL and set the cfg exception on error.
3089 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3091 MonoBasicBlock *is_null_bb;
3092 int obj_reg = src->dreg;
3093 int vtable_reg = alloc_preg (cfg);
3095 NEW_BBLOCK (cfg, is_null_bb);
3097 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3098 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3100 save_cast_details (cfg, klass, obj_reg);
3102 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3103 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3104 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3106 int klass_reg = alloc_preg (cfg);
3108 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3110 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3111 /* the remoting code is broken, access the class for now */
3112 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3113 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3115 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3116 cfg->exception_ptr = klass;
3119 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3121 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3122 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3124 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3126 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3127 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, is_null_bb);
3131 MONO_START_BB (cfg, is_null_bb);
3133 reset_cast_details (cfg);
3139 * Returns NULL and set the cfg exception on error.
3142 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3145 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3146 int obj_reg = src->dreg;
3147 int vtable_reg = alloc_preg (cfg);
3148 int res_reg = alloc_preg (cfg);
3150 NEW_BBLOCK (cfg, is_null_bb);
3151 NEW_BBLOCK (cfg, false_bb);
3152 NEW_BBLOCK (cfg, end_bb);
3154 /* Do the assignment at the beginning, so the other assignment can be if converted */
3155 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3156 ins->type = STACK_OBJ;
3159 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3160 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3162 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3163 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3164 /* the is_null_bb target simply copies the input register to the output */
3165 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3167 int klass_reg = alloc_preg (cfg);
3169 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3172 int rank_reg = alloc_preg (cfg);
3173 int eclass_reg = alloc_preg (cfg);
3175 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3176 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3177 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3178 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3179 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3180 if (klass->cast_class == mono_defaults.object_class) {
3181 int parent_reg = alloc_preg (cfg);
3182 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3183 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3184 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3185 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3186 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3187 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3188 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3189 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3190 } else if (klass->cast_class == mono_defaults.enum_class) {
3191 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3192 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3193 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3194 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3196 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3197 /* Check that the object is a vector too */
3198 int bounds_reg = alloc_preg (cfg);
3199 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3201 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3204 /* the is_null_bb target simply copies the input register to the output */
3205 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3207 } else if (mono_class_is_nullable (klass)) {
3208 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3209 /* the is_null_bb target simply copies the input register to the output */
3210 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3212 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3213 /* the remoting code is broken, access the class for now */
3214 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3215 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3217 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
3218 cfg->exception_ptr = klass;
3221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3223 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3224 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3226 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3227 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3229 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3230 /* the is_null_bb target simply copies the input register to the output */
3231 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, is_null_bb);
3236 MONO_START_BB (cfg, false_bb);
3238 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3239 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3241 MONO_START_BB (cfg, is_null_bb);
3243 MONO_START_BB (cfg, end_bb);
3249 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3251 /* This opcode takes as input an object reference and a class, and returns:
3252 0) if the object is an instance of the class,
3253 1) if the object is not instance of the class,
3254 2) if the object is a proxy whose type cannot be determined */
3257 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3258 int obj_reg = src->dreg;
3259 int dreg = alloc_ireg (cfg);
3261 int klass_reg = alloc_preg (cfg);
3263 NEW_BBLOCK (cfg, true_bb);
3264 NEW_BBLOCK (cfg, false_bb);
3265 NEW_BBLOCK (cfg, false2_bb);
3266 NEW_BBLOCK (cfg, end_bb);
3267 NEW_BBLOCK (cfg, no_proxy_bb);
3269 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3270 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3272 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3273 NEW_BBLOCK (cfg, interface_fail_bb);
3275 tmp_reg = alloc_preg (cfg);
3276 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3277 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3278 MONO_START_BB (cfg, interface_fail_bb);
3279 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3281 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3283 tmp_reg = alloc_preg (cfg);
3284 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3285 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3286 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3288 tmp_reg = alloc_preg (cfg);
3289 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3290 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3292 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3293 tmp_reg = alloc_preg (cfg);
3294 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3295 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3297 tmp_reg = alloc_preg (cfg);
3298 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3299 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3300 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3302 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3303 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3305 MONO_START_BB (cfg, no_proxy_bb);
3307 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3310 MONO_START_BB (cfg, false_bb);
3312 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3313 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3315 MONO_START_BB (cfg, false2_bb);
3317 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3318 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3320 MONO_START_BB (cfg, true_bb);
3322 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3324 MONO_START_BB (cfg, end_bb);
3327 MONO_INST_NEW (cfg, ins, OP_ICONST);
3329 ins->type = STACK_I4;
3335 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3337 /* This opcode takes as input an object reference and a class, and returns:
3338 0) if the object is an instance of the class,
3339 1) if the object is a proxy whose type cannot be determined
3340 an InvalidCastException exception is thrown otherwhise*/
3343 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3344 int obj_reg = src->dreg;
3345 int dreg = alloc_ireg (cfg);
3346 int tmp_reg = alloc_preg (cfg);
3347 int klass_reg = alloc_preg (cfg);
3349 NEW_BBLOCK (cfg, end_bb);
3350 NEW_BBLOCK (cfg, ok_result_bb);
3352 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3353 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3355 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3356 NEW_BBLOCK (cfg, interface_fail_bb);
3358 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3359 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3360 MONO_START_BB (cfg, interface_fail_bb);
3361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3363 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3365 tmp_reg = alloc_preg (cfg);
3366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3367 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3368 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3370 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3371 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3374 NEW_BBLOCK (cfg, no_proxy_bb);
3376 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3377 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3378 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3380 tmp_reg = alloc_preg (cfg);
3381 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3382 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3384 tmp_reg = alloc_preg (cfg);
3385 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3386 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3387 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3389 NEW_BBLOCK (cfg, fail_1_bb);
3391 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3393 MONO_START_BB (cfg, fail_1_bb);
3395 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3398 MONO_START_BB (cfg, no_proxy_bb);
3400 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3403 MONO_START_BB (cfg, ok_result_bb);
3405 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3407 MONO_START_BB (cfg, end_bb);
3410 MONO_INST_NEW (cfg, ins, OP_ICONST);
3412 ins->type = STACK_I4;
3418 * Returns NULL and set the cfg exception on error.
3420 static G_GNUC_UNUSED MonoInst*
3421 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3423 gpointer *trampoline;
3424 MonoInst *obj, *method_ins, *tramp_ins;
3428 obj = handle_alloc (cfg, klass, FALSE);
3432 /* Inline the contents of mono_delegate_ctor */
3434 /* Set target field */
3435 /* Optimize away setting of NULL target */
3436 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0))
3437 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3439 /* Set method field */
3440 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3441 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3444 * To avoid looking up the compiled code belonging to the target method
3445 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3446 * store it, and we fill it after the method has been compiled.
3448 if (!cfg->compile_aot && !method->dynamic) {
3449 MonoInst *code_slot_ins;
3452 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3454 domain = mono_domain_get ();
3455 mono_domain_lock (domain);
3456 if (!domain_jit_info (domain)->method_code_hash)
3457 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3458 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3460 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3461 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3463 mono_domain_unlock (domain);
3465 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3467 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3470 /* Set invoke_impl field */
3471 if (cfg->compile_aot) {
3472 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3474 trampoline = mono_create_delegate_trampoline (klass);
3475 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3477 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3479 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3485 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3487 MonoJitICallInfo *info;
3489 /* Need to register the icall so it gets an icall wrapper */
3490 info = mono_get_array_new_va_icall (rank);
3492 cfg->flags |= MONO_CFG_HAS_VARARGS;
3494 /* mono_array_new_va () needs a vararg calling convention */
3495 cfg->disable_llvm = TRUE;
3497 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3498 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3502 mono_emit_load_got_addr (MonoCompile *cfg)
3504 MonoInst *getaddr, *dummy_use;
3506 if (!cfg->got_var || cfg->got_var_allocated)
3509 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3510 getaddr->dreg = cfg->got_var->dreg;
3512 /* Add it to the start of the first bblock */
3513 if (cfg->bb_entry->code) {
3514 getaddr->next = cfg->bb_entry->code;
3515 cfg->bb_entry->code = getaddr;
3518 MONO_ADD_INS (cfg->bb_entry, getaddr);
3520 cfg->got_var_allocated = TRUE;
3523 * Add a dummy use to keep the got_var alive, since real uses might
3524 * only be generated by the back ends.
3525 * Add it to end_bblock, so the variable's lifetime covers the whole
3527 * It would be better to make the usage of the got var explicit in all
3528 * cases when the backend needs it (i.e. calls, throw etc.), so this
3529 * wouldn't be needed.
3531 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3532 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3535 static int inline_limit;
3536 static gboolean inline_limit_inited;
3539 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3541 MonoMethodHeader *header;
3543 #ifdef MONO_ARCH_SOFT_FLOAT
3544 MonoMethodSignature *sig = mono_method_signature (method);
3548 if (cfg->generic_sharing_context)
3551 if (cfg->inline_depth > 10)
3554 #ifdef MONO_ARCH_HAVE_LMF_OPS
3555 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3556 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3557 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3561 if (method->is_inflated)
3562 /* Avoid inflating the header */
3563 header = mono_method_get_header (((MonoMethodInflated*)method)->declaring);
3565 header = mono_method_get_header (method);
3567 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME) ||
3568 (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3569 (method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3570 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3571 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
3572 (method->klass->marshalbyref) ||
3573 !header || header->num_clauses)
3576 /* also consider num_locals? */
3577 /* Do the size check early to avoid creating vtables */
3578 if (!inline_limit_inited) {
3579 if (getenv ("MONO_INLINELIMIT"))
3580 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3582 inline_limit = INLINE_LENGTH_LIMIT;
3583 inline_limit_inited = TRUE;
3585 if (header->code_size >= inline_limit)
3589 * if we can initialize the class of the method right away, we do,
3590 * otherwise we don't allow inlining if the class needs initialization,
3591 * since it would mean inserting a call to mono_runtime_class_init()
3592 * inside the inlined code
3594 if (!(cfg->opt & MONO_OPT_SHARED)) {
3595 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3596 if (cfg->run_cctors && method->klass->has_cctor) {
3597 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3598 if (!method->klass->runtime_info)
3599 /* No vtable created yet */
3601 vtable = mono_class_vtable (cfg->domain, method->klass);
3604 /* This makes so that inline cannot trigger */
3605 /* .cctors: too many apps depend on them */
3606 /* running with a specific order... */
3607 if (! vtable->initialized)
3609 mono_runtime_class_init (vtable);
3611 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3612 if (!method->klass->runtime_info)
3613 /* No vtable created yet */
3615 vtable = mono_class_vtable (cfg->domain, method->klass);
3618 if (!vtable->initialized)
3623 * If we're compiling for shared code
3624 * the cctor will need to be run at aot method load time, for example,
3625 * or at the end of the compilation of the inlining method.
3627 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
3632 * CAS - do not inline methods with declarative security
3633 * Note: this has to be before any possible return TRUE;
3635 if (mono_method_has_declsec (method))
3638 #ifdef MONO_ARCH_SOFT_FLOAT
3640 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
3642 for (i = 0; i < sig->param_count; ++i)
3643 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
3651 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
3653 if (vtable->initialized && !cfg->compile_aot)
3656 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
3659 if (!mono_class_needs_cctor_run (vtable->klass, method))
3662 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
3663 /* The initialization is already done before the method is called */
3670 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index)
3674 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
3676 mono_class_init (klass);
3677 size = mono_class_array_element_size (klass);
3679 mult_reg = alloc_preg (cfg);
3680 array_reg = arr->dreg;
3681 index_reg = index->dreg;
3683 #if SIZEOF_REGISTER == 8
3684 /* The array reg is 64 bits but the index reg is only 32 */
3685 index2_reg = alloc_preg (cfg);
3686 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
3688 if (index->type == STACK_I8) {
3689 index2_reg = alloc_preg (cfg);
3690 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
3692 index2_reg = index_reg;
3696 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
3698 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3699 if (size == 1 || size == 2 || size == 4 || size == 8) {
3700 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
3702 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
3703 ins->type = STACK_PTR;
3709 add_reg = alloc_preg (cfg);
3711 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
3712 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
3713 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3714 ins->type = STACK_PTR;
3715 MONO_ADD_INS (cfg->cbb, ins);
3720 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3722 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
3724 int bounds_reg = alloc_preg (cfg);
3725 int add_reg = alloc_preg (cfg);
3726 int mult_reg = alloc_preg (cfg);
3727 int mult2_reg = alloc_preg (cfg);
3728 int low1_reg = alloc_preg (cfg);
3729 int low2_reg = alloc_preg (cfg);
3730 int high1_reg = alloc_preg (cfg);
3731 int high2_reg = alloc_preg (cfg);
3732 int realidx1_reg = alloc_preg (cfg);
3733 int realidx2_reg = alloc_preg (cfg);
3734 int sum_reg = alloc_preg (cfg);
3739 mono_class_init (klass);
3740 size = mono_class_array_element_size (klass);
3742 index1 = index_ins1->dreg;
3743 index2 = index_ins2->dreg;
3745 /* range checking */
3746 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
3747 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
3749 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
3750 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3751 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
3752 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
3753 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
3754 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
3755 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3757 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
3758 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
3759 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
3760 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
3761 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
3762 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
3763 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
3765 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
3766 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
3767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
3768 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
3769 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
3771 ins->type = STACK_MP;
3773 MONO_ADD_INS (cfg->cbb, ins);
3780 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
3784 MonoMethod *addr_method;
3787 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
3790 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1]);
3792 #ifndef MONO_ARCH_EMULATE_MUL_DIV
3793 /* emit_ldelema_2 depends on OP_LMUL */
3794 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
3795 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
3799 element_size = mono_class_array_element_size (cmethod->klass->element_class);
3800 addr_method = mono_marshal_get_array_address (rank, element_size);
3801 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
3806 static MonoBreakPolicy
3807 always_insert_breakpoint (MonoMethod *method)
3809 return MONO_BREAK_POLICY_ALWAYS;
3812 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
3815 * mono_set_break_policy:
3816 * policy_callback: the new callback function
3818 * Allow embedders to decide wherther to actually obey breakpoint instructions
3819 * (both break IL instructions and Debugger.Break () method calls), for example
3820 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
3821 * untrusted or semi-trusted code.
3823 * @policy_callback will be called every time a break point instruction needs to
3824 * be inserted with the method argument being the method that calls Debugger.Break()
3825 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
3826 * if it wants the breakpoint to not be effective in the given method.
3827 * #MONO_BREAK_POLICY_ALWAYS is the default.
3830 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
3832 if (policy_callback)
3833 break_policy_func = policy_callback;
3835 break_policy_func = always_insert_breakpoint;
3839 should_insert_brekpoint (MonoMethod *method) {
3840 switch (break_policy_func (method)) {
3841 case MONO_BREAK_POLICY_ALWAYS:
3843 case MONO_BREAK_POLICY_NEVER:
3845 case MONO_BREAK_POLICY_ON_DBG:
3846 return mono_debug_using_mono_debugger ();
3848 g_warning ("Incorrect value returned from break policy callback");
3854 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3856 MonoInst *ins = NULL;
3858 static MonoClass *runtime_helpers_class = NULL;
3859 if (! runtime_helpers_class)
3860 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
3861 "System.Runtime.CompilerServices", "RuntimeHelpers");
3863 if (cmethod->klass == mono_defaults.string_class) {
3864 if (strcmp (cmethod->name, "get_Chars") == 0) {
3865 int dreg = alloc_ireg (cfg);
3866 int index_reg = alloc_preg (cfg);
3867 int mult_reg = alloc_preg (cfg);
3868 int add_reg = alloc_preg (cfg);
3870 #if SIZEOF_REGISTER == 8
3871 /* The array reg is 64 bits but the index reg is only 32 */
3872 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
3874 index_reg = args [1]->dreg;
3876 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
3878 #if defined(TARGET_X86) || defined(TARGET_AMD64)
3879 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
3880 add_reg = ins->dreg;
3881 /* Avoid a warning */
3883 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3886 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
3887 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3888 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
3889 add_reg, G_STRUCT_OFFSET (MonoString, chars));
3891 type_from_op (ins, NULL, NULL);
3893 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3894 int dreg = alloc_ireg (cfg);
3895 /* Decompose later to allow more optimizations */
3896 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
3897 ins->type = STACK_I4;
3898 cfg->cbb->has_array_access = TRUE;
3899 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
3902 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
3903 int mult_reg = alloc_preg (cfg);
3904 int add_reg = alloc_preg (cfg);
3906 /* The corlib functions check for oob already. */
3907 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
3908 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
3909 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
3912 } else if (cmethod->klass == mono_defaults.object_class) {
3914 if (strcmp (cmethod->name, "GetType") == 0) {
3915 int dreg = alloc_preg (cfg);
3916 int vt_reg = alloc_preg (cfg);
3917 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3918 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3919 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
3920 type_from_op (ins, NULL, NULL);
3923 #if !defined(MONO_ARCH_EMULATE_MUL_DIV) && !defined(HAVE_MOVING_COLLECTOR)
3924 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0) {
3925 int dreg = alloc_ireg (cfg);
3926 int t1 = alloc_ireg (cfg);
3928 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
3929 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
3930 ins->type = STACK_I4;
3934 } else if (strcmp (cmethod->name, ".ctor") == 0) {
3935 MONO_INST_NEW (cfg, ins, OP_NOP);
3936 MONO_ADD_INS (cfg->cbb, ins);
3940 } else if (cmethod->klass == mono_defaults.array_class) {
3941 if (cmethod->name [0] != 'g')
3944 if (strcmp (cmethod->name, "get_Rank") == 0) {
3945 int dreg = alloc_ireg (cfg);
3946 int vtable_reg = alloc_preg (cfg);
3947 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3948 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, vtable_reg,
3949 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3950 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
3951 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3952 type_from_op (ins, NULL, NULL);
3955 } else if (strcmp (cmethod->name, "get_Length") == 0) {
3956 int dreg = alloc_ireg (cfg);
3958 MONO_EMIT_NULL_CHECK (cfg, args [0]->dreg);
3959 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
3960 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
3961 type_from_op (ins, NULL, NULL);
3966 } else if (cmethod->klass == runtime_helpers_class) {
3968 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
3969 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
3973 } else if (cmethod->klass == mono_defaults.thread_class) {
3974 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
3975 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
3976 MONO_ADD_INS (cfg->cbb, ins);
3978 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
3979 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3980 MONO_ADD_INS (cfg->cbb, ins);
3983 } else if (cmethod->klass == mono_defaults.monitor_class) {
3984 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
3985 if (strcmp (cmethod->name, "Enter") == 0) {
3988 if (COMPILE_LLVM (cfg)) {
3990 * Pass the argument normally, the LLVM backend will handle the
3991 * calling convention problems.
3993 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
3995 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
3996 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
3997 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
3998 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4001 return (MonoInst*)call;
4002 } else if (strcmp (cmethod->name, "Exit") == 0) {
4005 if (COMPILE_LLVM (cfg)) {
4006 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4008 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4009 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4010 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4011 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4014 return (MonoInst*)call;
4016 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4017 MonoMethod *fast_method = NULL;
4019 /* Avoid infinite recursion */
4020 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4021 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4022 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4025 if (strcmp (cmethod->name, "Enter") == 0 ||
4026 strcmp (cmethod->name, "Exit") == 0)
4027 fast_method = mono_monitor_get_fast_path (cmethod);
4031 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4033 } else if (mini_class_is_system_array (cmethod->klass) &&
4034 strcmp (cmethod->name, "GetGenericValueImpl") == 0) {
4035 MonoInst *addr, *store, *load;
4036 MonoClass *eklass = mono_class_from_mono_type (fsig->params [1]);
4038 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1]);
4039 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4040 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4042 } else if (cmethod->klass->image == mono_defaults.corlib &&
4043 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4044 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4047 #if SIZEOF_REGISTER == 8
4048 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4049 /* 64 bit reads are already atomic */
4050 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4051 ins->dreg = mono_alloc_preg (cfg);
4052 ins->inst_basereg = args [0]->dreg;
4053 ins->inst_offset = 0;
4054 MONO_ADD_INS (cfg->cbb, ins);
4058 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4059 if (strcmp (cmethod->name, "Increment") == 0) {
4060 MonoInst *ins_iconst;
4063 if (fsig->params [0]->type == MONO_TYPE_I4)
4064 opcode = OP_ATOMIC_ADD_NEW_I4;
4065 #if SIZEOF_REGISTER == 8
4066 else if (fsig->params [0]->type == MONO_TYPE_I8)
4067 opcode = OP_ATOMIC_ADD_NEW_I8;
4070 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4071 ins_iconst->inst_c0 = 1;
4072 ins_iconst->dreg = mono_alloc_ireg (cfg);
4073 MONO_ADD_INS (cfg->cbb, ins_iconst);
4075 MONO_INST_NEW (cfg, ins, opcode);
4076 ins->dreg = mono_alloc_ireg (cfg);
4077 ins->inst_basereg = args [0]->dreg;
4078 ins->inst_offset = 0;
4079 ins->sreg2 = ins_iconst->dreg;
4080 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4081 MONO_ADD_INS (cfg->cbb, ins);
4083 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4084 MonoInst *ins_iconst;
4087 if (fsig->params [0]->type == MONO_TYPE_I4)
4088 opcode = OP_ATOMIC_ADD_NEW_I4;
4089 #if SIZEOF_REGISTER == 8
4090 else if (fsig->params [0]->type == MONO_TYPE_I8)
4091 opcode = OP_ATOMIC_ADD_NEW_I8;
4094 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4095 ins_iconst->inst_c0 = -1;
4096 ins_iconst->dreg = mono_alloc_ireg (cfg);
4097 MONO_ADD_INS (cfg->cbb, ins_iconst);
4099 MONO_INST_NEW (cfg, ins, opcode);
4100 ins->dreg = mono_alloc_ireg (cfg);
4101 ins->inst_basereg = args [0]->dreg;
4102 ins->inst_offset = 0;
4103 ins->sreg2 = ins_iconst->dreg;
4104 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4105 MONO_ADD_INS (cfg->cbb, ins);
4107 } else if (strcmp (cmethod->name, "Add") == 0) {
4110 if (fsig->params [0]->type == MONO_TYPE_I4)
4111 opcode = OP_ATOMIC_ADD_NEW_I4;
4112 #if SIZEOF_REGISTER == 8
4113 else if (fsig->params [0]->type == MONO_TYPE_I8)
4114 opcode = OP_ATOMIC_ADD_NEW_I8;
4118 MONO_INST_NEW (cfg, ins, opcode);
4119 ins->dreg = mono_alloc_ireg (cfg);
4120 ins->inst_basereg = args [0]->dreg;
4121 ins->inst_offset = 0;
4122 ins->sreg2 = args [1]->dreg;
4123 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4124 MONO_ADD_INS (cfg->cbb, ins);
4127 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4129 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4130 if (strcmp (cmethod->name, "Exchange") == 0) {
4132 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4134 if (fsig->params [0]->type == MONO_TYPE_I4)
4135 opcode = OP_ATOMIC_EXCHANGE_I4;
4136 #if SIZEOF_REGISTER == 8
4137 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4138 (fsig->params [0]->type == MONO_TYPE_I))
4139 opcode = OP_ATOMIC_EXCHANGE_I8;
4141 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4142 opcode = OP_ATOMIC_EXCHANGE_I4;
4147 MONO_INST_NEW (cfg, ins, opcode);
4148 ins->dreg = mono_alloc_ireg (cfg);
4149 ins->inst_basereg = args [0]->dreg;
4150 ins->inst_offset = 0;
4151 ins->sreg2 = args [1]->dreg;
4152 MONO_ADD_INS (cfg->cbb, ins);
4154 switch (fsig->params [0]->type) {
4156 ins->type = STACK_I4;
4160 ins->type = STACK_I8;
4162 case MONO_TYPE_OBJECT:
4163 ins->type = STACK_OBJ;
4166 g_assert_not_reached ();
4169 #if HAVE_WRITE_BARRIERS
4171 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4172 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4176 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4178 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4179 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4181 gboolean is_ref = MONO_TYPE_IS_REFERENCE (fsig->params [1]);
4182 if (fsig->params [1]->type == MONO_TYPE_I4)
4184 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4185 size = sizeof (gpointer);
4186 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I4)
4189 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4190 ins->dreg = alloc_ireg (cfg);
4191 ins->sreg1 = args [0]->dreg;
4192 ins->sreg2 = args [1]->dreg;
4193 ins->sreg3 = args [2]->dreg;
4194 ins->type = STACK_I4;
4195 MONO_ADD_INS (cfg->cbb, ins);
4196 } else if (size == 8) {
4197 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4198 ins->dreg = alloc_ireg (cfg);
4199 ins->sreg1 = args [0]->dreg;
4200 ins->sreg2 = args [1]->dreg;
4201 ins->sreg3 = args [2]->dreg;
4202 ins->type = STACK_I8;
4203 MONO_ADD_INS (cfg->cbb, ins);
4205 /* g_assert_not_reached (); */
4207 #if HAVE_WRITE_BARRIERS
4209 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
4210 mono_emit_method_call (cfg, write_barrier, &args [0], NULL);
4214 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4218 } else if (cmethod->klass->image == mono_defaults.corlib) {
4219 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4220 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4221 if (should_insert_brekpoint (cfg->method))
4222 MONO_INST_NEW (cfg, ins, OP_BREAK);
4224 MONO_INST_NEW (cfg, ins, OP_NOP);
4225 MONO_ADD_INS (cfg->cbb, ins);
4228 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4229 && strcmp (cmethod->klass->name, "Environment") == 0) {
4231 EMIT_NEW_ICONST (cfg, ins, 1);
4233 EMIT_NEW_ICONST (cfg, ins, 0);
4237 } else if (cmethod->klass == mono_defaults.math_class) {
4239 * There is general branches code for Min/Max, but it does not work for
4241 * http://everything2.com/?node_id=1051618
4245 #ifdef MONO_ARCH_SIMD_INTRINSICS
4246 if (cfg->opt & MONO_OPT_SIMD) {
4247 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4253 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4257 * This entry point could be used later for arbitrary method
4260 inline static MonoInst*
4261 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4262 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4264 if (method->klass == mono_defaults.string_class) {
4265 /* managed string allocation support */
4266 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_STRING_ALLOC)) {
4267 MonoInst *iargs [2];
4268 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4269 MonoMethod *managed_alloc = NULL;
4271 g_assert (vtable); /*Should not fail since it System.String*/
4272 #ifndef MONO_CROSS_COMPILE
4273 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4277 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4278 iargs [1] = args [0];
4279 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4286 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4288 MonoInst *store, *temp;
4291 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4292 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4295 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4296 * would be different than the MonoInst's used to represent arguments, and
4297 * the ldelema implementation can't deal with that.
4298 * Solution: When ldelema is used on an inline argument, create a var for
4299 * it, emit ldelema on that var, and emit the saving code below in
4300 * inline_method () if needed.
4302 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4303 cfg->args [i] = temp;
4304 /* This uses cfg->args [i] which is set by the preceeding line */
4305 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4306 store->cil_code = sp [0]->cil_code;
4311 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4312 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4314 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4316 check_inline_called_method_name_limit (MonoMethod *called_method)
4319 static char *limit = NULL;
4321 if (limit == NULL) {
4322 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4324 if (limit_string != NULL)
4325 limit = limit_string;
4327 limit = (char *) "";
4330 if (limit [0] != '\0') {
4331 char *called_method_name = mono_method_full_name (called_method, TRUE);
4333 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4334 g_free (called_method_name);
4336 //return (strncmp_result <= 0);
4337 return (strncmp_result == 0);
4344 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4346 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4349 static char *limit = NULL;
4351 if (limit == NULL) {
4352 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4353 if (limit_string != NULL) {
4354 limit = limit_string;
4356 limit = (char *) "";
4360 if (limit [0] != '\0') {
4361 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4363 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4364 g_free (caller_method_name);
4366 //return (strncmp_result <= 0);
4367 return (strncmp_result == 0);
4375 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4376 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_allways)
4378 MonoInst *ins, *rvar = NULL;
4379 MonoMethodHeader *cheader;
4380 MonoBasicBlock *ebblock, *sbblock;
4382 MonoMethod *prev_inlined_method;
4383 MonoInst **prev_locals, **prev_args;
4384 MonoType **prev_arg_types;
4385 guint prev_real_offset;
4386 GHashTable *prev_cbb_hash;
4387 MonoBasicBlock **prev_cil_offset_to_bb;
4388 MonoBasicBlock *prev_cbb;
4389 unsigned char* prev_cil_start;
4390 guint32 prev_cil_offset_to_bb_len;
4391 MonoMethod *prev_current_method;
4392 MonoGenericContext *prev_generic_context;
4393 gboolean ret_var_set, prev_ret_var_set;
4395 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4397 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4398 if ((! inline_allways) && ! check_inline_called_method_name_limit (cmethod))
4401 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4402 if ((! inline_allways) && ! check_inline_caller_method_name_limit (cfg->method))
4406 if (cfg->verbose_level > 2)
4407 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4409 if (!cmethod->inline_info) {
4410 mono_jit_stats.inlineable_methods++;
4411 cmethod->inline_info = 1;
4413 /* allocate space to store the return value */
4414 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4415 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4418 /* allocate local variables */
4419 cheader = mono_method_get_header (cmethod);
4420 prev_locals = cfg->locals;
4421 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4422 for (i = 0; i < cheader->num_locals; ++i)
4423 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4425 /* allocate start and end blocks */
4426 /* This is needed so if the inline is aborted, we can clean up */
4427 NEW_BBLOCK (cfg, sbblock);
4428 sbblock->real_offset = real_offset;
4430 NEW_BBLOCK (cfg, ebblock);
4431 ebblock->block_num = cfg->num_bblocks++;
4432 ebblock->real_offset = real_offset;
4434 prev_args = cfg->args;
4435 prev_arg_types = cfg->arg_types;
4436 prev_inlined_method = cfg->inlined_method;
4437 cfg->inlined_method = cmethod;
4438 cfg->ret_var_set = FALSE;
4439 cfg->inline_depth ++;
4440 prev_real_offset = cfg->real_offset;
4441 prev_cbb_hash = cfg->cbb_hash;
4442 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4443 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4444 prev_cil_start = cfg->cil_start;
4445 prev_cbb = cfg->cbb;
4446 prev_current_method = cfg->current_method;
4447 prev_generic_context = cfg->generic_context;
4448 prev_ret_var_set = cfg->ret_var_set;
4450 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, *ip == CEE_CALLVIRT);
4452 ret_var_set = cfg->ret_var_set;
4454 cfg->inlined_method = prev_inlined_method;
4455 cfg->real_offset = prev_real_offset;
4456 cfg->cbb_hash = prev_cbb_hash;
4457 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4458 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4459 cfg->cil_start = prev_cil_start;
4460 cfg->locals = prev_locals;
4461 cfg->args = prev_args;
4462 cfg->arg_types = prev_arg_types;
4463 cfg->current_method = prev_current_method;
4464 cfg->generic_context = prev_generic_context;
4465 cfg->ret_var_set = prev_ret_var_set;
4466 cfg->inline_depth --;
4468 if ((costs >= 0 && costs < 60) || inline_allways) {
4469 if (cfg->verbose_level > 2)
4470 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4472 mono_jit_stats.inlined_methods++;
4474 /* always add some code to avoid block split failures */
4475 MONO_INST_NEW (cfg, ins, OP_NOP);
4476 MONO_ADD_INS (prev_cbb, ins);
4478 prev_cbb->next_bb = sbblock;
4479 link_bblock (cfg, prev_cbb, sbblock);
4482 * Get rid of the begin and end bblocks if possible to aid local
4485 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4487 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4488 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4490 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4491 MonoBasicBlock *prev = ebblock->in_bb [0];
4492 mono_merge_basic_blocks (cfg, prev, ebblock);
4494 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4495 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4496 cfg->cbb = prev_cbb;
4504 * If the inlined method contains only a throw, then the ret var is not
4505 * set, so set it to a dummy value.
4508 static double r8_0 = 0.0;
4510 switch (rvar->type) {
4512 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
4515 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
4520 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
4523 MONO_INST_NEW (cfg, ins, OP_R8CONST);
4524 ins->type = STACK_R8;
4525 ins->inst_p0 = (void*)&r8_0;
4526 ins->dreg = rvar->dreg;
4527 MONO_ADD_INS (cfg->cbb, ins);
4530 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
4533 g_assert_not_reached ();
4537 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
4542 if (cfg->verbose_level > 2)
4543 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
4544 cfg->exception_type = MONO_EXCEPTION_NONE;
4545 mono_loader_clear_error ();
4547 /* This gets rid of the newly added bblocks */
4548 cfg->cbb = prev_cbb;
4554 * Some of these comments may well be out-of-date.
4555 * Design decisions: we do a single pass over the IL code (and we do bblock
4556 * splitting/merging in the few cases when it's required: a back jump to an IL
4557 * address that was not already seen as bblock starting point).
4558 * Code is validated as we go (full verification is still better left to metadata/verify.c).
4559 * Complex operations are decomposed in simpler ones right away. We need to let the
4560 * arch-specific code peek and poke inside this process somehow (except when the
4561 * optimizations can take advantage of the full semantic info of coarse opcodes).
4562 * All the opcodes of the form opcode.s are 'normalized' to opcode.
4563 * MonoInst->opcode initially is the IL opcode or some simplification of that
4564 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
4565 * opcode with value bigger than OP_LAST.
4566 * At this point the IR can be handed over to an interpreter, a dumb code generator
4567 * or to the optimizing code generator that will translate it to SSA form.
4569 * Profiling directed optimizations.
4570 * We may compile by default with few or no optimizations and instrument the code
4571 * or the user may indicate what methods to optimize the most either in a config file
4572 * or through repeated runs where the compiler applies offline the optimizations to
4573 * each method and then decides if it was worth it.
4576 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
4577 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
4578 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
4579 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
4580 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
4581 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
4582 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
4583 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; goto load_error;}
4585 /* offset from br.s -> br like opcodes */
4586 #define BIG_BRANCH_OFFSET 13
4589 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
4591 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
4593 return b == NULL || b == bb;
4597 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
4599 unsigned char *ip = start;
4600 unsigned char *target;
4603 MonoBasicBlock *bblock;
4604 const MonoOpcode *opcode;
4607 cli_addr = ip - start;
4608 i = mono_opcode_value ((const guint8 **)&ip, end);
4611 opcode = &mono_opcodes [i];
4612 switch (opcode->argument) {
4613 case MonoInlineNone:
4616 case MonoInlineString:
4617 case MonoInlineType:
4618 case MonoInlineField:
4619 case MonoInlineMethod:
4622 case MonoShortInlineR:
4629 case MonoShortInlineVar:
4630 case MonoShortInlineI:
4633 case MonoShortInlineBrTarget:
4634 target = start + cli_addr + 2 + (signed char)ip [1];
4635 GET_BBLOCK (cfg, bblock, target);
4638 GET_BBLOCK (cfg, bblock, ip);
4640 case MonoInlineBrTarget:
4641 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
4642 GET_BBLOCK (cfg, bblock, target);
4645 GET_BBLOCK (cfg, bblock, ip);
4647 case MonoInlineSwitch: {
4648 guint32 n = read32 (ip + 1);
4651 cli_addr += 5 + 4 * n;
4652 target = start + cli_addr;
4653 GET_BBLOCK (cfg, bblock, target);
4655 for (j = 0; j < n; ++j) {
4656 target = start + cli_addr + (gint32)read32 (ip);
4657 GET_BBLOCK (cfg, bblock, target);
4667 g_assert_not_reached ();
4670 if (i == CEE_THROW) {
4671 unsigned char *bb_start = ip - 1;
4673 /* Find the start of the bblock containing the throw */
4675 while ((bb_start >= start) && !bblock) {
4676 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
4680 bblock->out_of_line = 1;
4689 static inline MonoMethod *
4690 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4694 if (m->wrapper_type != MONO_WRAPPER_NONE)
4695 return mono_method_get_wrapper_data (m, token);
4697 method = mono_get_method_full (m->klass->image, token, klass, context);
4702 static inline MonoMethod *
4703 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
4705 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
4707 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
4713 static inline MonoClass*
4714 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
4718 if (method->wrapper_type != MONO_WRAPPER_NONE)
4719 klass = mono_method_get_wrapper_data (method, token);
4721 klass = mono_class_get_full (method->klass->image, token, context);
4723 mono_class_init (klass);
4728 * Returns TRUE if the JIT should abort inlining because "callee"
4729 * is influenced by security attributes.
4732 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
4736 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
4740 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
4741 if (result == MONO_JIT_SECURITY_OK)
4744 if (result == MONO_JIT_LINKDEMAND_ECMA) {
4745 /* Generate code to throw a SecurityException before the actual call/link */
4746 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4749 NEW_ICONST (cfg, args [0], 4);
4750 NEW_METHODCONST (cfg, args [1], caller);
4751 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
4752 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
4753 /* don't hide previous results */
4754 cfg->exception_type = MONO_EXCEPTION_SECURITY_LINKDEMAND;
4755 cfg->exception_data = result;
4763 throw_exception (void)
4765 static MonoMethod *method = NULL;
4768 MonoSecurityManager *secman = mono_security_manager_get_methods ();
4769 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
4776 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
4778 MonoMethod *thrower = throw_exception ();
4781 EMIT_NEW_PCONST (cfg, args [0], ex);
4782 mono_emit_method_call (cfg, thrower, args, NULL);
4786 * Return the original method is a wrapper is specified. We can only access
4787 * the custom attributes from the original method.
4790 get_original_method (MonoMethod *method)
4792 if (method->wrapper_type == MONO_WRAPPER_NONE)
4795 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
4796 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
4799 /* in other cases we need to find the original method */
4800 return mono_marshal_method_from_wrapper (method);
4804 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
4805 MonoBasicBlock *bblock, unsigned char *ip)
4807 /* there's no restriction to access Transparent or SafeCritical fields, so we only check calls to Critical methods */
4808 if (mono_security_core_clr_class_level (mono_field_get_parent (field)) != MONO_SECURITY_CORE_CLR_CRITICAL)
4811 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4812 caller = get_original_method (caller);
4816 /* caller is Critical! only SafeCritical and Critical callers can access the field, so we throw if caller is Transparent */
4817 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4818 emit_throw_exception (cfg, mono_get_exception_field_access ());
4822 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
4823 MonoBasicBlock *bblock, unsigned char *ip)
4825 /* there's no restriction to call Transparent or SafeCritical code, so we only check calls to Critical methods */
4826 if (mono_security_core_clr_method_level (callee, TRUE) != MONO_SECURITY_CORE_CLR_CRITICAL)
4829 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
4830 caller = get_original_method (caller);
4834 /* caller is Critical! only SafeCritical and Critical callers can call it, so we throw if the caller is Transparent */
4835 if (mono_security_core_clr_method_level (caller, TRUE) == MONO_SECURITY_CORE_CLR_TRANSPARENT)
4836 emit_throw_exception (cfg, mono_get_exception_method_access ());
4840 * Check that the IL instructions at ip are the array initialization
4841 * sequence and return the pointer to the data and the size.
4844 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
4847 * newarr[System.Int32]
4849 * ldtoken field valuetype ...
4850 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
4852 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
4853 guint32 token = read32 (ip + 7);
4854 guint32 field_token = read32 (ip + 2);
4855 guint32 field_index = field_token & 0xffffff;
4857 const char *data_ptr;
4859 MonoMethod *cmethod;
4860 MonoClass *dummy_class;
4861 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
4867 *out_field_token = field_token;
4869 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
4872 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
4874 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
4875 case MONO_TYPE_BOOLEAN:
4879 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
4880 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
4881 case MONO_TYPE_CHAR:
4891 return NULL; /* stupid ARM FP swapped format */
4901 if (size > mono_type_size (field->type, &dummy_align))
4904 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
4905 if (!method->klass->image->dynamic) {
4906 field_index = read32 (ip + 2) & 0xffffff;
4907 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
4908 data_ptr = mono_image_rva_map (method->klass->image, rva);
4909 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
4910 /* for aot code we do the lookup on load */
4911 if (aot && data_ptr)
4912 return GUINT_TO_POINTER (rva);
4914 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
4916 data_ptr = mono_field_get_data (field);
4924 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
4926 char *method_fname = mono_method_full_name (method, TRUE);
4929 if (mono_method_get_header (method)->code_size == 0)
4930 method_code = g_strdup ("method body is empty.");
4932 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
4933 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
4934 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
4935 g_free (method_fname);
4936 g_free (method_code);
4940 set_exception_object (MonoCompile *cfg, MonoException *exception)
4942 cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
4943 MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
4944 cfg->exception_ptr = exception;
4948 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
4952 if (cfg->generic_sharing_context)
4953 type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, &klass->byval_arg);
4955 type = &klass->byval_arg;
4956 return MONO_TYPE_IS_REFERENCE (type);
4960 * mono_decompose_array_access_opts:
4962 * Decompose array access opcodes.
4963 * This should be in decompose.c, but it emits calls so it has to stay here until
4964 * the old JIT is gone.
4967 mono_decompose_array_access_opts (MonoCompile *cfg)
4969 MonoBasicBlock *bb, *first_bb;
4972 * Unlike decompose_long_opts, this pass does not alter the CFG of the method so it
4973 * can be executed anytime. It should be run before decompose_long
4977 * Create a dummy bblock and emit code into it so we can use the normal
4978 * code generation macros.
4980 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
4981 first_bb = cfg->cbb;
4983 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4985 MonoInst *prev = NULL;
4987 MonoInst *iargs [3];
4990 if (!bb->has_array_access)
4993 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE DECOMPOSE-ARRAY-ACCESS-OPTS ");
4995 cfg->cbb->code = cfg->cbb->last_ins = NULL;
5001 for (ins = bb->code; ins; ins = ins->next) {
5002 switch (ins->opcode) {
5004 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
5005 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg, ins->sreg1,
5006 G_STRUCT_OFFSET (MonoArray, max_length));
5007 MONO_ADD_INS (cfg->cbb, dest);
5009 case OP_BOUNDS_CHECK:
5010 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1); \
5011 MONO_ARCH_EMIT_BOUNDS_CHECK (cfg, ins->sreg1, ins->inst_imm, ins->sreg2);
5014 if (cfg->opt & MONO_OPT_SHARED) {
5015 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
5016 EMIT_NEW_CLASSCONST (cfg, iargs [1], ins->inst_newa_class);
5017 MONO_INST_NEW (cfg, iargs [2], OP_MOVE);
5018 iargs [2]->dreg = ins->sreg1;
5020 dest = mono_emit_jit_icall (cfg, mono_array_new, iargs);
5021 dest->dreg = ins->dreg;
5023 MonoVTable *vtable = mono_class_vtable (cfg->domain, mono_array_class_get (ins->inst_newa_class, 1));
5024 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (vtable, 1);
5026 g_assert (vtable); /*This shall not fail since we check for this condition on OP_NEWARR creation*/
5027 NEW_VTABLECONST (cfg, iargs [0], vtable);
5028 MONO_ADD_INS (cfg->cbb, iargs [0]);
5029 MONO_INST_NEW (cfg, iargs [1], OP_MOVE);
5030 iargs [1]->dreg = ins->sreg1;
5033 dest = mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
5035 dest = mono_emit_jit_icall (cfg, mono_array_new_specific, iargs);
5036 dest->dreg = ins->dreg;
5040 MONO_EMIT_NULL_CHECK (cfg, ins->sreg1);
5041 NEW_LOAD_MEMBASE (cfg, dest, OP_LOADI4_MEMBASE, ins->dreg,
5042 ins->sreg1, G_STRUCT_OFFSET (MonoString, length));
5043 MONO_ADD_INS (cfg->cbb, dest);
5049 g_assert (cfg->cbb == first_bb);
5051 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5052 /* Replace the original instruction with the new code sequence */
5054 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5055 first_bb->code = first_bb->last_ins = NULL;
5056 first_bb->in_count = first_bb->out_count = 0;
5057 cfg->cbb = first_bb;
5064 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER DECOMPOSE-ARRAY-ACCESS-OPTS ");
5074 #ifdef MONO_ARCH_SOFT_FLOAT
5077 * mono_decompose_soft_float:
5079 * Soft float support on ARM. We store each double value in a pair of integer vregs,
5080 * similar to long support on 32 bit platforms. 32 bit float values require special
5081 * handling when used as locals, arguments, and in calls.
5082 * One big problem with soft-float is that there are few r4 test cases in our test suite.
5085 mono_decompose_soft_float (MonoCompile *cfg)
5087 MonoBasicBlock *bb, *first_bb;
5090 * This pass creates long opcodes, so it should be run before decompose_long_opts ().
5094 * Create a dummy bblock and emit code into it so we can use the normal
5095 * code generation macros.
5097 cfg->cbb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
5098 first_bb = cfg->cbb;
5100 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5102 MonoInst *prev = NULL;
5105 if (cfg->verbose_level > 3) mono_print_bb (bb, "BEFORE HANDLE-SOFT-FLOAT ");
5107 cfg->cbb->code = cfg->cbb->last_ins = NULL;
5113 for (ins = bb->code; ins; ins = ins->next) {
5114 const char *spec = INS_INFO (ins->opcode);
5116 /* Most fp operations are handled automatically by opcode emulation */
5118 switch (ins->opcode) {
5121 d.vald = *(double*)ins->inst_p0;
5122 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5127 /* We load the r8 value */
5128 d.vald = *(float*)ins->inst_p0;
5129 MONO_EMIT_NEW_I8CONST (cfg, ins->dreg, d.vall);
5133 ins->opcode = OP_LMOVE;
5136 ins->opcode = OP_MOVE;
5137 ins->sreg1 = ins->sreg1 + 1;
5140 ins->opcode = OP_MOVE;
5141 ins->sreg1 = ins->sreg1 + 2;
5144 int reg = ins->sreg1;
5146 ins->opcode = OP_SETLRET;
5148 ins->sreg1 = reg + 1;
5149 ins->sreg2 = reg + 2;
5152 case OP_LOADR8_MEMBASE:
5153 ins->opcode = OP_LOADI8_MEMBASE;
5155 case OP_STORER8_MEMBASE_REG:
5156 ins->opcode = OP_STOREI8_MEMBASE_REG;
5158 case OP_STORER4_MEMBASE_REG: {
5159 MonoInst *iargs [2];
5162 /* Arg 1 is the double value */
5163 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5164 iargs [0]->dreg = ins->sreg1;
5166 /* Arg 2 is the address to store to */
5167 addr_reg = mono_alloc_preg (cfg);
5168 EMIT_NEW_BIALU_IMM (cfg, iargs [1], OP_PADD_IMM, addr_reg, ins->inst_destbasereg, ins->inst_offset);
5169 mono_emit_jit_icall (cfg, mono_fstore_r4, iargs);
5173 case OP_LOADR4_MEMBASE: {
5174 MonoInst *iargs [1];
5178 addr_reg = mono_alloc_preg (cfg);
5179 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, addr_reg, ins->inst_basereg, ins->inst_offset);
5180 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5181 conv->dreg = ins->dreg;
5186 case OP_FCALL_MEMBASE: {
5187 MonoCallInst *call = (MonoCallInst*)ins;
5188 if (call->signature->ret->type == MONO_TYPE_R4) {
5189 MonoCallInst *call2;
5190 MonoInst *iargs [1];
5193 /* Convert the call into a call returning an int */
5194 MONO_INST_NEW_CALL (cfg, call2, OP_CALL);
5195 memcpy (call2, call, sizeof (MonoCallInst));
5196 switch (ins->opcode) {
5198 call2->inst.opcode = OP_CALL;
5201 call2->inst.opcode = OP_CALL_REG;
5203 case OP_FCALL_MEMBASE:
5204 call2->inst.opcode = OP_CALL_MEMBASE;
5207 g_assert_not_reached ();
5209 call2->inst.dreg = mono_alloc_ireg (cfg);
5210 MONO_ADD_INS (cfg->cbb, (MonoInst*)call2);
5212 /* FIXME: Optimize this */
5214 /* Emit an r4->r8 conversion */
5215 EMIT_NEW_VARLOADA_VREG (cfg, iargs [0], call2->inst.dreg, &mono_defaults.int32_class->byval_arg);
5216 conv = mono_emit_jit_icall (cfg, mono_fload_r4, iargs);
5217 conv->dreg = ins->dreg;
5219 /* The call sequence might include fp ins */
5222 switch (ins->opcode) {
5224 ins->opcode = OP_LCALL;
5227 ins->opcode = OP_LCALL_REG;
5229 case OP_FCALL_MEMBASE:
5230 ins->opcode = OP_LCALL_MEMBASE;
5233 g_assert_not_reached ();
5239 MonoJitICallInfo *info;
5240 MonoInst *iargs [2];
5241 MonoInst *call, *cmp, *br;
5243 /* Convert fcompare+fbcc to icall+icompare+beq */
5245 info = mono_find_jit_opcode_emulation (ins->next->opcode);
5248 /* Create dummy MonoInst's for the arguments */
5249 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5250 iargs [0]->dreg = ins->sreg1;
5251 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5252 iargs [1]->dreg = ins->sreg2;
5254 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5256 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5257 cmp->sreg1 = call->dreg;
5259 MONO_ADD_INS (cfg->cbb, cmp);
5261 MONO_INST_NEW (cfg, br, OP_IBNE_UN);
5262 br->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * 2);
5263 br->inst_true_bb = ins->next->inst_true_bb;
5264 br->inst_false_bb = ins->next->inst_false_bb;
5265 MONO_ADD_INS (cfg->cbb, br);
5267 /* The call sequence might include fp ins */
5270 /* Skip fbcc or fccc */
5271 NULLIFY_INS (ins->next);
5279 MonoJitICallInfo *info;
5280 MonoInst *iargs [2];
5283 /* Convert fccc to icall+icompare+iceq */
5285 info = mono_find_jit_opcode_emulation (ins->opcode);
5288 /* Create dummy MonoInst's for the arguments */
5289 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5290 iargs [0]->dreg = ins->sreg1;
5291 MONO_INST_NEW (cfg, iargs [1], OP_ARG);
5292 iargs [1]->dreg = ins->sreg2;
5294 call = mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, iargs);
5296 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, call->dreg, 1);
5297 MONO_EMIT_NEW_UNALU (cfg, OP_ICEQ, ins->dreg, -1);
5299 /* The call sequence might include fp ins */
5304 MonoInst *iargs [2];
5305 MonoInst *call, *cmp;
5307 /* Convert to icall+icompare+cond_exc+move */
5309 /* Create dummy MonoInst's for the arguments */
5310 MONO_INST_NEW (cfg, iargs [0], OP_ARG);
5311 iargs [0]->dreg = ins->sreg1;
5313 call = mono_emit_jit_icall (cfg, mono_isfinite, iargs);
5315 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE_IMM);
5316 cmp->sreg1 = call->dreg;
5318 MONO_ADD_INS (cfg->cbb, cmp);
5320 MONO_EMIT_NEW_COND_EXC (cfg, INE_UN, "ArithmeticException");
5322 /* Do the assignment if the value is finite */
5323 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, ins->dreg, ins->sreg1);
5329 if (spec [MONO_INST_SRC1] == 'f' || spec [MONO_INST_SRC2] == 'f' || spec [MONO_INST_DEST] == 'f') {
5330 mono_print_ins (ins);
5331 g_assert_not_reached ();
5336 g_assert (cfg->cbb == first_bb);
5338 if (cfg->cbb->code || (cfg->cbb != first_bb)) {
5339 /* Replace the original instruction with the new code sequence */
5341 mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
5342 first_bb->code = first_bb->last_ins = NULL;
5343 first_bb->in_count = first_bb->out_count = 0;
5344 cfg->cbb = first_bb;
5351 if (cfg->verbose_level > 3) mono_print_bb (bb, "AFTER HANDLE-SOFT-FLOAT ");
5354 mono_decompose_long_opts (cfg);
5360 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5363 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5364 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5365 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5366 /* Optimize reg-reg moves away */
5368 * Can't optimize other opcodes, since sp[0] might point to
5369 * the last ins of a decomposed opcode.
5371 sp [0]->dreg = (cfg)->locals [n]->dreg;
5373 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5378 * ldloca inhibits many optimizations so try to get rid of it in common
5381 static inline unsigned char *
5382 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5391 local = read16 (ip + 2);
5395 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5396 gboolean skip = FALSE;
5398 /* From the INITOBJ case */
5399 token = read32 (ip + 2);
5400 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5401 CHECK_TYPELOAD (klass);
5402 if (generic_class_is_reference_type (cfg, klass)) {
5403 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5404 } else if (MONO_TYPE_IS_REFERENCE (&klass->byval_arg)) {
5405 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5406 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5407 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5420 is_exception_class (MonoClass *class)
5423 if (class == mono_defaults.exception_class)
5425 class = class->parent;
5431 * mono_method_to_ir:
5433 * Translate the .net IL into linear IR.
5436 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5437 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5438 guint inline_offset, gboolean is_virtual_call)
5440 MonoInst *ins, **sp, **stack_start;
5441 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5442 MonoMethod *cmethod, *method_definition;
5443 MonoInst **arg_array;
5444 MonoMethodHeader *header;
5446 guint32 token, ins_flag;
5448 MonoClass *constrained_call = NULL;
5449 unsigned char *ip, *end, *target, *err_pos;
5450 static double r8_0 = 0.0;
5451 MonoMethodSignature *sig;
5452 MonoGenericContext *generic_context = NULL;
5453 MonoGenericContainer *generic_container = NULL;
5454 MonoType **param_types;
5455 int i, n, start_new_bblock, dreg;
5456 int num_calls = 0, inline_costs = 0;
5457 int breakpoint_id = 0;
5459 MonoBoolean security, pinvoke;
5460 MonoSecurityManager* secman = NULL;
5461 MonoDeclSecurityActions actions;
5462 GSList *class_inits = NULL;
5463 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5465 gboolean init_locals, seq_points;
5467 /* serialization and xdomain stuff may need access to private fields and methods */
5468 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5469 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5470 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5471 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5472 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5473 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5475 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5477 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5478 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5479 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5480 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5482 image = method->klass->image;
5483 header = mono_method_get_header (method);
5484 generic_container = mono_method_get_generic_container (method);
5485 sig = mono_method_signature (method);
5486 num_args = sig->hasthis + sig->param_count;
5487 ip = (unsigned char*)header->code;
5488 cfg->cil_start = ip;
5489 end = ip + header->code_size;
5490 mono_jit_stats.cil_code_size += header->code_size;
5491 init_locals = header->init_locals;
5493 seq_points = cfg->gen_seq_points && cfg->method == method;
5496 * Methods without init_locals set could cause asserts in various passes
5501 method_definition = method;
5502 while (method_definition->is_inflated) {
5503 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5504 method_definition = imethod->declaring;
5507 /* SkipVerification is not allowed if core-clr is enabled */
5508 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5510 dont_verify_stloc = TRUE;
5513 if (!dont_verify && mini_method_verify (cfg, method_definition))
5514 goto exception_exit;
5516 if (mono_debug_using_mono_debugger ())
5517 cfg->keep_cil_nops = TRUE;
5519 if (sig->is_inflated)
5520 generic_context = mono_method_get_context (method);
5521 else if (generic_container)
5522 generic_context = &generic_container->context;
5523 cfg->generic_context = generic_context;
5525 if (!cfg->generic_sharing_context)
5526 g_assert (!sig->has_type_parameters);
5528 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5529 g_assert (method->is_inflated);
5530 g_assert (mono_method_get_context (method)->method_inst);
5532 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5533 g_assert (sig->generic_param_count);
5535 if (cfg->method == method) {
5536 cfg->real_offset = 0;
5538 cfg->real_offset = inline_offset;
5541 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5542 cfg->cil_offset_to_bb_len = header->code_size;
5544 cfg->current_method = method;
5546 if (cfg->verbose_level > 2)
5547 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5549 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5551 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5552 for (n = 0; n < sig->param_count; ++n)
5553 param_types [n + sig->hasthis] = sig->params [n];
5554 cfg->arg_types = param_types;
5556 dont_inline = g_list_prepend (dont_inline, method);
5557 if (cfg->method == method) {
5559 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5560 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5563 NEW_BBLOCK (cfg, start_bblock);
5564 cfg->bb_entry = start_bblock;
5565 start_bblock->cil_code = NULL;
5566 start_bblock->cil_length = 0;
5569 NEW_BBLOCK (cfg, end_bblock);
5570 cfg->bb_exit = end_bblock;
5571 end_bblock->cil_code = NULL;
5572 end_bblock->cil_length = 0;
5573 g_assert (cfg->num_bblocks == 2);
5575 arg_array = cfg->args;
5577 if (header->num_clauses) {
5578 cfg->spvars = g_hash_table_new (NULL, NULL);
5579 cfg->exvars = g_hash_table_new (NULL, NULL);
5581 /* handle exception clauses */
5582 for (i = 0; i < header->num_clauses; ++i) {
5583 MonoBasicBlock *try_bb;
5584 MonoExceptionClause *clause = &header->clauses [i];
5585 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5586 try_bb->real_offset = clause->try_offset;
5587 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5588 tblock->real_offset = clause->handler_offset;
5589 tblock->flags |= BB_EXCEPTION_HANDLER;
5591 link_bblock (cfg, try_bb, tblock);
5593 if (*(ip + clause->handler_offset) == CEE_POP)
5594 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5596 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5597 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5598 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5599 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5600 MONO_ADD_INS (tblock, ins);
5602 /* todo: is a fault block unsafe to optimize? */
5603 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5604 tblock->flags |= BB_EXCEPTION_UNSAFE;
5608 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5610 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5612 /* catch and filter blocks get the exception object on the stack */
5613 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5614 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5615 MonoInst *dummy_use;
5617 /* mostly like handle_stack_args (), but just sets the input args */
5618 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5619 tblock->in_scount = 1;
5620 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5621 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5624 * Add a dummy use for the exvar so its liveness info will be
5628 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5630 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5631 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5632 tblock->flags |= BB_EXCEPTION_HANDLER;
5633 tblock->real_offset = clause->data.filter_offset;
5634 tblock->in_scount = 1;
5635 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5636 /* The filter block shares the exvar with the handler block */
5637 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5638 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5639 MONO_ADD_INS (tblock, ins);
5643 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5644 clause->data.catch_class &&
5645 cfg->generic_sharing_context &&
5646 mono_class_check_context_used (clause->data.catch_class)) {
5648 * In shared generic code with catch
5649 * clauses containing type variables
5650 * the exception handling code has to
5651 * be able to get to the rgctx.
5652 * Therefore we have to make sure that
5653 * the vtable/mrgctx argument (for
5654 * static or generic methods) or the
5655 * "this" argument (for non-static
5656 * methods) are live.
5658 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5659 mini_method_get_context (method)->method_inst ||
5660 method->klass->valuetype) {
5661 mono_get_vtable_var (cfg);
5663 MonoInst *dummy_use;
5665 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5670 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5671 cfg->cbb = start_bblock;
5672 cfg->args = arg_array;
5673 mono_save_args (cfg, sig, inline_args);
5676 /* FIRST CODE BLOCK */
5677 NEW_BBLOCK (cfg, bblock);
5678 bblock->cil_code = ip;
5682 ADD_BBLOCK (cfg, bblock);
5684 if (cfg->method == method) {
5685 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5686 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5687 MONO_INST_NEW (cfg, ins, OP_BREAK);
5688 MONO_ADD_INS (bblock, ins);
5692 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5693 secman = mono_security_manager_get_methods ();
5695 security = (secman && mono_method_has_declsec (method));
5696 /* at this point having security doesn't mean we have any code to generate */
5697 if (security && (cfg->method == method)) {
5698 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5699 * And we do not want to enter the next section (with allocation) if we
5700 * have nothing to generate */
5701 security = mono_declsec_get_demands (method, &actions);
5704 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
5705 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
5707 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5708 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
5709 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
5711 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
5712 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5716 mono_custom_attrs_free (custom);
5719 custom = mono_custom_attrs_from_class (wrapped->klass);
5720 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
5724 mono_custom_attrs_free (custom);
5727 /* not a P/Invoke after all */
5732 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
5733 /* we use a separate basic block for the initialization code */
5734 NEW_BBLOCK (cfg, init_localsbb);
5735 cfg->bb_init = init_localsbb;
5736 init_localsbb->real_offset = cfg->real_offset;
5737 start_bblock->next_bb = init_localsbb;
5738 init_localsbb->next_bb = bblock;
5739 link_bblock (cfg, start_bblock, init_localsbb);
5740 link_bblock (cfg, init_localsbb, bblock);
5742 cfg->cbb = init_localsbb;
5744 start_bblock->next_bb = bblock;
5745 link_bblock (cfg, start_bblock, bblock);
5748 /* at this point we know, if security is TRUE, that some code needs to be generated */
5749 if (security && (cfg->method == method)) {
5752 mono_jit_stats.cas_demand_generation++;
5754 if (actions.demand.blob) {
5755 /* Add code for SecurityAction.Demand */
5756 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
5757 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
5758 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5759 mono_emit_method_call (cfg, secman->demand, args, NULL);
5761 if (actions.noncasdemand.blob) {
5762 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
5763 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
5764 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
5765 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
5766 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
5767 mono_emit_method_call (cfg, secman->demand, args, NULL);
5769 if (actions.demandchoice.blob) {
5770 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
5771 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
5772 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
5773 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
5774 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
5778 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
5780 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
5783 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
5784 /* check if this is native code, e.g. an icall or a p/invoke */
5785 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
5786 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
5788 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
5789 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
5791 /* if this ia a native call then it can only be JITted from platform code */
5792 if ((icall || pinvk) && method->klass && method->klass->image) {
5793 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
5794 MonoException *ex = icall ? mono_get_exception_security () :
5795 mono_get_exception_method_access ();
5796 emit_throw_exception (cfg, ex);
5803 if (header->code_size == 0)
5806 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
5811 if (cfg->method == method)
5812 mono_debug_init_method (cfg, bblock, breakpoint_id);
5814 for (n = 0; n < header->num_locals; ++n) {
5815 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
5820 /* We force the vtable variable here for all shared methods
5821 for the possibility that they might show up in a stack
5822 trace where their exact instantiation is needed. */
5823 if (cfg->generic_sharing_context && method == cfg->method) {
5824 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5825 mini_method_get_context (method)->method_inst ||
5826 method->klass->valuetype) {
5827 mono_get_vtable_var (cfg);
5829 /* FIXME: Is there a better way to do this?
5830 We need the variable live for the duration
5831 of the whole method. */
5832 cfg->args [0]->flags |= MONO_INST_INDIRECT;
5836 /* add a check for this != NULL to inlined methods */
5837 if (is_virtual_call) {
5840 NEW_ARGLOAD (cfg, arg_ins, 0);
5841 MONO_ADD_INS (cfg->cbb, arg_ins);
5842 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
5845 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
5846 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
5849 start_new_bblock = 0;
5853 if (cfg->method == method)
5854 cfg->real_offset = ip - header->code;
5856 cfg->real_offset = inline_offset;
5861 if (start_new_bblock) {
5862 bblock->cil_length = ip - bblock->cil_code;
5863 if (start_new_bblock == 2) {
5864 g_assert (ip == tblock->cil_code);
5866 GET_BBLOCK (cfg, tblock, ip);
5868 bblock->next_bb = tblock;
5871 start_new_bblock = 0;
5872 for (i = 0; i < bblock->in_scount; ++i) {
5873 if (cfg->verbose_level > 3)
5874 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5875 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5879 g_slist_free (class_inits);
5882 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
5883 link_bblock (cfg, bblock, tblock);
5884 if (sp != stack_start) {
5885 handle_stack_args (cfg, stack_start, sp - stack_start);
5887 CHECK_UNVERIFIABLE (cfg);
5889 bblock->next_bb = tblock;
5892 for (i = 0; i < bblock->in_scount; ++i) {
5893 if (cfg->verbose_level > 3)
5894 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
5895 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
5898 g_slist_free (class_inits);
5904 * Sequence points are points where the debugger can place a breakpoint.
5905 * Currently, we generate these automatically at points where the IL
5908 if (seq_points && sp == stack_start) {
5909 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
5910 MONO_ADD_INS (cfg->cbb, ins);
5913 bblock->real_offset = cfg->real_offset;
5915 if ((cfg->method == method) && cfg->coverage_info) {
5916 guint32 cil_offset = ip - header->code;
5917 cfg->coverage_info->data [cil_offset].cil_code = ip;
5919 /* TODO: Use an increment here */
5920 #if defined(TARGET_X86)
5921 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
5922 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
5924 MONO_ADD_INS (cfg->cbb, ins);
5926 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
5927 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
5931 if (cfg->verbose_level > 3)
5932 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
5936 if (cfg->keep_cil_nops)
5937 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
5939 MONO_INST_NEW (cfg, ins, OP_NOP);
5941 MONO_ADD_INS (bblock, ins);
5944 if (should_insert_brekpoint (cfg->method))
5945 MONO_INST_NEW (cfg, ins, OP_BREAK);
5947 MONO_INST_NEW (cfg, ins, OP_NOP);
5949 MONO_ADD_INS (bblock, ins);
5955 CHECK_STACK_OVF (1);
5956 n = (*ip)-CEE_LDARG_0;
5958 EMIT_NEW_ARGLOAD (cfg, ins, n);
5966 CHECK_STACK_OVF (1);
5967 n = (*ip)-CEE_LDLOC_0;
5969 EMIT_NEW_LOCLOAD (cfg, ins, n);
5978 n = (*ip)-CEE_STLOC_0;
5981 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
5983 emit_stloc_ir (cfg, sp, header, n);
5990 CHECK_STACK_OVF (1);
5993 EMIT_NEW_ARGLOAD (cfg, ins, n);
5999 CHECK_STACK_OVF (1);
6002 NEW_ARGLOADA (cfg, ins, n);
6003 MONO_ADD_INS (cfg->cbb, ins);
6013 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6015 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6020 CHECK_STACK_OVF (1);
6023 EMIT_NEW_LOCLOAD (cfg, ins, n);
6027 case CEE_LDLOCA_S: {
6028 unsigned char *tmp_ip;
6030 CHECK_STACK_OVF (1);
6031 CHECK_LOCAL (ip [1]);
6033 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6039 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6048 CHECK_LOCAL (ip [1]);
6049 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6051 emit_stloc_ir (cfg, sp, header, ip [1]);
6056 CHECK_STACK_OVF (1);
6057 EMIT_NEW_PCONST (cfg, ins, NULL);
6058 ins->type = STACK_OBJ;
6063 CHECK_STACK_OVF (1);
6064 EMIT_NEW_ICONST (cfg, ins, -1);
6077 CHECK_STACK_OVF (1);
6078 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6084 CHECK_STACK_OVF (1);
6086 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6092 CHECK_STACK_OVF (1);
6093 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6099 CHECK_STACK_OVF (1);
6100 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6101 ins->type = STACK_I8;
6102 ins->dreg = alloc_dreg (cfg, STACK_I8);
6104 ins->inst_l = (gint64)read64 (ip);
6105 MONO_ADD_INS (bblock, ins);
6111 gboolean use_aotconst = FALSE;
6113 #ifdef TARGET_POWERPC
6114 /* FIXME: Clean this up */
6115 if (cfg->compile_aot)
6116 use_aotconst = TRUE;
6119 /* FIXME: we should really allocate this only late in the compilation process */
6120 f = mono_domain_alloc (cfg->domain, sizeof (float));
6122 CHECK_STACK_OVF (1);
6128 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6130 dreg = alloc_freg (cfg);
6131 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6132 ins->type = STACK_R8;
6134 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6135 ins->type = STACK_R8;
6136 ins->dreg = alloc_dreg (cfg, STACK_R8);
6138 MONO_ADD_INS (bblock, ins);
6148 gboolean use_aotconst = FALSE;
6150 #ifdef TARGET_POWERPC
6151 /* FIXME: Clean this up */
6152 if (cfg->compile_aot)
6153 use_aotconst = TRUE;
6156 /* FIXME: we should really allocate this only late in the compilation process */
6157 d = mono_domain_alloc (cfg->domain, sizeof (double));
6159 CHECK_STACK_OVF (1);
6165 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6167 dreg = alloc_freg (cfg);
6168 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6169 ins->type = STACK_R8;
6171 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6172 ins->type = STACK_R8;
6173 ins->dreg = alloc_dreg (cfg, STACK_R8);
6175 MONO_ADD_INS (bblock, ins);
6184 MonoInst *temp, *store;
6186 CHECK_STACK_OVF (1);
6190 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6191 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6193 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6196 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6209 if (sp [0]->type == STACK_R8)
6210 /* we need to pop the value from the x86 FP stack */
6211 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6220 if (stack_start != sp)
6222 token = read32 (ip + 1);
6223 /* FIXME: check the signature matches */
6224 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6229 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6230 GENERIC_SHARING_FAILURE (CEE_JMP);
6232 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6233 CHECK_CFG_EXCEPTION;
6235 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6237 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6240 /* Handle tail calls similarly to calls */
6241 n = fsig->param_count + fsig->hasthis;
6243 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6244 call->method = cmethod;
6245 call->tail_call = TRUE;
6246 call->signature = mono_method_signature (cmethod);
6247 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6248 call->inst.inst_p0 = cmethod;
6249 for (i = 0; i < n; ++i)
6250 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6252 mono_arch_emit_call (cfg, call);
6253 MONO_ADD_INS (bblock, (MonoInst*)call);
6256 for (i = 0; i < num_args; ++i)
6257 /* Prevent arguments from being optimized away */
6258 arg_array [i]->flags |= MONO_INST_VOLATILE;
6260 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6261 ins = (MonoInst*)call;
6262 ins->inst_p0 = cmethod;
6263 MONO_ADD_INS (bblock, ins);
6267 start_new_bblock = 1;
6272 case CEE_CALLVIRT: {
6273 MonoInst *addr = NULL;
6274 MonoMethodSignature *fsig = NULL;
6276 int virtual = *ip == CEE_CALLVIRT;
6277 int calli = *ip == CEE_CALLI;
6278 gboolean pass_imt_from_rgctx = FALSE;
6279 MonoInst *imt_arg = NULL;
6280 gboolean pass_vtable = FALSE;
6281 gboolean pass_mrgctx = FALSE;
6282 MonoInst *vtable_arg = NULL;
6283 gboolean check_this = FALSE;
6284 gboolean supported_tail_call = FALSE;
6287 token = read32 (ip + 1);
6294 if (method->wrapper_type != MONO_WRAPPER_NONE)
6295 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6297 fsig = mono_metadata_parse_signature (image, token);
6299 n = fsig->param_count + fsig->hasthis;
6301 if (method->dynamic && fsig->pinvoke) {
6305 * This is a call through a function pointer using a pinvoke
6306 * signature. Have to create a wrapper and call that instead.
6307 * FIXME: This is very slow, need to create a wrapper at JIT time
6308 * instead based on the signature.
6310 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6311 EMIT_NEW_PCONST (cfg, args [1], fsig);
6313 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6316 MonoMethod *cil_method;
6318 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6319 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6320 cil_method = cmethod;
6321 } else if (constrained_call) {
6322 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6324 * This is needed since get_method_constrained can't find
6325 * the method in klass representing a type var.
6326 * The type var is guaranteed to be a reference type in this
6329 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6330 cil_method = cmethod;
6331 g_assert (!cmethod->klass->valuetype);
6333 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6336 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6337 cil_method = cmethod;
6342 if (!dont_verify && !cfg->skip_visibility) {
6343 MonoMethod *target_method = cil_method;
6344 if (method->is_inflated) {
6345 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6347 if (!mono_method_can_access_method (method_definition, target_method) &&
6348 !mono_method_can_access_method (method, cil_method))
6349 METHOD_ACCESS_FAILURE;
6352 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6353 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6355 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6356 /* MS.NET seems to silently convert this to a callvirt */
6359 if (!cmethod->klass->inited)
6360 if (!mono_class_init (cmethod->klass))
6363 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6364 mini_class_is_system_array (cmethod->klass)) {
6365 array_rank = cmethod->klass->rank;
6366 fsig = mono_method_signature (cmethod);
6368 if (mono_method_signature (cmethod)->pinvoke) {
6369 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6370 check_for_pending_exc, FALSE);
6371 fsig = mono_method_signature (wrapper);
6372 } else if (constrained_call) {
6373 fsig = mono_method_signature (cmethod);
6375 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6379 mono_save_token_info (cfg, image, token, cil_method);
6381 n = fsig->param_count + fsig->hasthis;
6383 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6384 if (check_linkdemand (cfg, method, cmethod))
6386 CHECK_CFG_EXCEPTION;
6389 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6390 g_assert_not_reached ();
6393 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6396 if (!cfg->generic_sharing_context && cmethod)
6397 g_assert (!mono_method_check_context_used (cmethod));
6401 //g_assert (!virtual || fsig->hasthis);
6405 if (constrained_call) {
6407 * We have the `constrained.' prefix opcode.
6409 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6411 * The type parameter is instantiated as a valuetype,
6412 * but that type doesn't override the method we're
6413 * calling, so we need to box `this'.
6415 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6416 ins->klass = constrained_call;
6417 sp [0] = handle_box (cfg, ins, constrained_call);
6418 CHECK_CFG_EXCEPTION;
6419 } else if (!constrained_call->valuetype) {
6420 int dreg = alloc_preg (cfg);
6423 * The type parameter is instantiated as a reference
6424 * type. We have a managed pointer on the stack, so
6425 * we need to dereference it here.
6427 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6428 ins->type = STACK_OBJ;
6430 } else if (cmethod->klass->valuetype)
6432 constrained_call = NULL;
6435 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6439 * If the callee is a shared method, then its static cctor
6440 * might not get called after the call was patched.
6442 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6443 emit_generic_class_init (cfg, cmethod->klass);
6444 CHECK_TYPELOAD (cmethod->klass);
6447 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6448 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6449 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6450 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6451 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6454 * Pass vtable iff target method might
6455 * be shared, which means that sharing
6456 * is enabled for its class and its
6457 * context is sharable (and it's not a
6460 if (sharing_enabled && context_sharable &&
6461 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6465 if (cmethod && mini_method_get_context (cmethod) &&
6466 mini_method_get_context (cmethod)->method_inst) {
6467 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6468 MonoGenericContext *context = mini_method_get_context (cmethod);
6469 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6471 g_assert (!pass_vtable);
6473 if (sharing_enabled && context_sharable)
6477 if (cfg->generic_sharing_context && cmethod) {
6478 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6480 context_used = mono_method_check_context_used (cmethod);
6482 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6483 /* Generic method interface
6484 calls are resolved via a
6485 helper function and don't
6487 if (!cmethod_context || !cmethod_context->method_inst)
6488 pass_imt_from_rgctx = TRUE;
6492 * If a shared method calls another
6493 * shared method then the caller must
6494 * have a generic sharing context
6495 * because the magic trampoline
6496 * requires it. FIXME: We shouldn't
6497 * have to force the vtable/mrgctx
6498 * variable here. Instead there
6499 * should be a flag in the cfg to
6500 * request a generic sharing context.
6503 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6504 mono_get_vtable_var (cfg);
6509 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6511 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6513 CHECK_TYPELOAD (cmethod->klass);
6514 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6519 g_assert (!vtable_arg);
6521 if (!cfg->compile_aot) {
6523 * emit_get_rgctx_method () calls mono_class_vtable () so check
6524 * for type load errors before.
6526 mono_class_vtable (cfg->domain, cmethod->klass);
6527 CHECK_TYPELOAD (cmethod->klass);
6530 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6532 if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6533 MONO_METHOD_IS_FINAL (cmethod)) {
6540 if (pass_imt_from_rgctx) {
6541 g_assert (!pass_vtable);
6544 imt_arg = emit_get_rgctx_method (cfg, context_used,
6545 cmethod, MONO_RGCTX_INFO_METHOD);
6549 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6551 /* Calling virtual generic methods */
6552 if (cmethod && virtual &&
6553 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6554 !(MONO_METHOD_IS_FINAL (cmethod) &&
6555 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6556 mono_method_signature (cmethod)->generic_param_count) {
6557 MonoInst *this_temp, *this_arg_temp, *store;
6558 MonoInst *iargs [4];
6560 g_assert (mono_method_signature (cmethod)->is_inflated);
6562 /* Prevent inlining of methods that contain indirect calls */
6565 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK
6566 /* The llvm vcall trampolines doesn't support generic virtual calls yet */
6567 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt && !mono_use_llvm) {
6568 g_assert (!imt_arg);
6570 g_assert (cmethod->is_inflated);
6571 imt_arg = emit_get_rgctx_method (cfg, context_used,
6572 cmethod, MONO_RGCTX_INFO_METHOD);
6573 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg);
6577 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6578 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6579 MONO_ADD_INS (bblock, store);
6581 /* FIXME: This should be a managed pointer */
6582 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6584 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6585 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6586 cmethod, MONO_RGCTX_INFO_METHOD);
6587 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6588 addr = mono_emit_jit_icall (cfg,
6589 mono_helper_compile_generic_method, iargs);
6591 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6593 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6596 if (!MONO_TYPE_IS_VOID (fsig->ret))
6597 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6604 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6605 supported_tail_call = cmethod && MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
6607 supported_tail_call = cmethod && mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6611 /* FIXME: runtime generic context pointer for jumps? */
6612 /* FIXME: handle this for generic sharing eventually */
6613 if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) && supported_tail_call) {
6616 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6619 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6620 /* Handle tail calls similarly to calls */
6621 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE);
6623 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6624 call->tail_call = TRUE;
6625 call->method = cmethod;
6626 call->signature = mono_method_signature (cmethod);
6629 * We implement tail calls by storing the actual arguments into the
6630 * argument variables, then emitting a CEE_JMP.
6632 for (i = 0; i < n; ++i) {
6633 /* Prevent argument from being register allocated */
6634 arg_array [i]->flags |= MONO_INST_VOLATILE;
6635 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6639 ins = (MonoInst*)call;
6640 ins->inst_p0 = cmethod;
6641 ins->inst_p1 = arg_array [0];
6642 MONO_ADD_INS (bblock, ins);
6643 link_bblock (cfg, bblock, end_bblock);
6644 start_new_bblock = 1;
6645 /* skip CEE_RET as well */
6651 /* Conversion to a JIT intrinsic */
6652 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6653 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6654 type_to_eval_stack_type ((cfg), fsig->ret, ins);
6665 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
6666 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
6667 mono_method_check_inlining (cfg, cmethod) &&
6668 !g_list_find (dont_inline, cmethod)) {
6670 gboolean allways = FALSE;
6672 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
6673 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6674 /* Prevent inlining of methods that call wrappers */
6676 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
6680 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, allways))) {
6682 cfg->real_offset += 5;
6685 if (!MONO_TYPE_IS_VOID (fsig->ret))
6686 /* *sp is already set by inline_method */
6689 inline_costs += costs;
6695 inline_costs += 10 * num_calls++;
6697 /* Tail recursion elimination */
6698 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
6699 gboolean has_vtargs = FALSE;
6702 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
6705 /* keep it simple */
6706 for (i = fsig->param_count - 1; i >= 0; i--) {
6707 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
6712 for (i = 0; i < n; ++i)
6713 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
6714 MONO_INST_NEW (cfg, ins, OP_BR);
6715 MONO_ADD_INS (bblock, ins);
6716 tblock = start_bblock->out_bb [0];
6717 link_bblock (cfg, bblock, tblock);
6718 ins->inst_target_bb = tblock;
6719 start_new_bblock = 1;
6721 /* skip the CEE_RET, too */
6722 if (ip_in_bb (cfg, bblock, ip + 5))
6732 /* Generic sharing */
6733 /* FIXME: only do this for generic methods if
6734 they are not shared! */
6735 if (context_used && !imt_arg && !array_rank &&
6736 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
6737 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
6738 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
6739 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
6742 g_assert (cfg->generic_sharing_context && cmethod);
6746 * We are compiling a call to a
6747 * generic method from shared code,
6748 * which means that we have to look up
6749 * the method in the rgctx and do an
6752 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
6755 /* Indirect calls */
6757 g_assert (!imt_arg);
6759 if (*ip == CEE_CALL)
6760 g_assert (context_used);
6761 else if (*ip == CEE_CALLI)
6762 g_assert (!vtable_arg);
6764 /* FIXME: what the hell is this??? */
6765 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
6766 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
6768 /* Prevent inlining of methods with indirect calls */
6772 #ifdef MONO_ARCH_RGCTX_REG
6774 int rgctx_reg = mono_alloc_preg (cfg);
6776 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, vtable_arg->dreg);
6777 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6778 call = (MonoCallInst*)ins;
6779 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
6780 cfg->uses_rgctx_reg = TRUE;
6781 call->rgctx_reg = TRUE;
6786 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6788 * Instead of emitting an indirect call, emit a direct call
6789 * with the contents of the aotconst as the patch info.
6791 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
6793 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
6794 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
6797 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr);
6800 if (!MONO_TYPE_IS_VOID (fsig->ret))
6801 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6812 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
6813 if (sp [fsig->param_count]->type == STACK_OBJ) {
6814 MonoInst *iargs [2];
6817 iargs [1] = sp [fsig->param_count];
6819 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
6822 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
6823 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, sp [fsig->param_count]->dreg);
6824 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
6825 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6827 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
6830 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
6831 if (!cmethod->klass->element_class->valuetype && !readonly)
6832 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
6833 CHECK_TYPELOAD (cmethod->klass);
6836 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
6839 g_assert_not_reached ();
6847 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
6849 if (!MONO_TYPE_IS_VOID (fsig->ret))
6850 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6860 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
6862 } else if (imt_arg) {
6863 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, imt_arg);
6865 ins = (MonoInst*)mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL, NULL);
6868 if (!MONO_TYPE_IS_VOID (fsig->ret))
6869 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6876 if (cfg->method != method) {
6877 /* return from inlined method */
6879 * If in_count == 0, that means the ret is unreachable due to
6880 * being preceeded by a throw. In that case, inline_method () will
6881 * handle setting the return value
6882 * (test case: test_0_inline_throw ()).
6884 if (return_var && cfg->cbb->in_count) {
6888 //g_assert (returnvar != -1);
6889 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
6890 cfg->ret_var_set = TRUE;
6894 MonoType *ret_type = mono_method_signature (method)->ret;
6896 g_assert (!return_var);
6899 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
6902 if (!cfg->vret_addr) {
6905 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
6907 EMIT_NEW_RETLOADA (cfg, ret_addr);
6909 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
6910 ins->klass = mono_class_from_mono_type (ret_type);
6913 #ifdef MONO_ARCH_SOFT_FLOAT
6914 if (!ret_type->byref && ret_type->type == MONO_TYPE_R4) {
6915 MonoInst *iargs [1];
6919 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
6920 mono_arch_emit_setret (cfg, method, conv);
6922 mono_arch_emit_setret (cfg, method, *sp);
6925 mono_arch_emit_setret (cfg, method, *sp);
6930 if (sp != stack_start)
6932 MONO_INST_NEW (cfg, ins, OP_BR);
6934 ins->inst_target_bb = end_bblock;
6935 MONO_ADD_INS (bblock, ins);
6936 link_bblock (cfg, bblock, end_bblock);
6937 start_new_bblock = 1;
6941 MONO_INST_NEW (cfg, ins, OP_BR);
6943 target = ip + 1 + (signed char)(*ip);
6945 GET_BBLOCK (cfg, tblock, target);
6946 link_bblock (cfg, bblock, tblock);
6947 ins->inst_target_bb = tblock;
6948 if (sp != stack_start) {
6949 handle_stack_args (cfg, stack_start, sp - stack_start);
6951 CHECK_UNVERIFIABLE (cfg);
6953 MONO_ADD_INS (bblock, ins);
6954 start_new_bblock = 1;
6955 inline_costs += BRANCH_COST;
6969 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
6971 target = ip + 1 + *(signed char*)ip;
6977 inline_costs += BRANCH_COST;
6981 MONO_INST_NEW (cfg, ins, OP_BR);
6984 target = ip + 4 + (gint32)read32(ip);
6986 GET_BBLOCK (cfg, tblock, target);
6987 link_bblock (cfg, bblock, tblock);
6988 ins->inst_target_bb = tblock;
6989 if (sp != stack_start) {
6990 handle_stack_args (cfg, stack_start, sp - stack_start);
6992 CHECK_UNVERIFIABLE (cfg);
6995 MONO_ADD_INS (bblock, ins);
6997 start_new_bblock = 1;
6998 inline_costs += BRANCH_COST;
7005 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7006 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7007 guint32 opsize = is_short ? 1 : 4;
7009 CHECK_OPSIZE (opsize);
7011 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7014 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7019 GET_BBLOCK (cfg, tblock, target);
7020 link_bblock (cfg, bblock, tblock);
7021 GET_BBLOCK (cfg, tblock, ip);
7022 link_bblock (cfg, bblock, tblock);
7024 if (sp != stack_start) {
7025 handle_stack_args (cfg, stack_start, sp - stack_start);
7026 CHECK_UNVERIFIABLE (cfg);
7029 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7030 cmp->sreg1 = sp [0]->dreg;
7031 type_from_op (cmp, sp [0], NULL);
7034 #if SIZEOF_REGISTER == 4
7035 if (cmp->opcode == OP_LCOMPARE_IMM) {
7036 /* Convert it to OP_LCOMPARE */
7037 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7038 ins->type = STACK_I8;
7039 ins->dreg = alloc_dreg (cfg, STACK_I8);
7041 MONO_ADD_INS (bblock, ins);
7042 cmp->opcode = OP_LCOMPARE;
7043 cmp->sreg2 = ins->dreg;
7046 MONO_ADD_INS (bblock, cmp);
7048 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7049 type_from_op (ins, sp [0], NULL);
7050 MONO_ADD_INS (bblock, ins);
7051 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7052 GET_BBLOCK (cfg, tblock, target);
7053 ins->inst_true_bb = tblock;
7054 GET_BBLOCK (cfg, tblock, ip);
7055 ins->inst_false_bb = tblock;
7056 start_new_bblock = 2;
7059 inline_costs += BRANCH_COST;
7074 MONO_INST_NEW (cfg, ins, *ip);
7076 target = ip + 4 + (gint32)read32(ip);
7082 inline_costs += BRANCH_COST;
7086 MonoBasicBlock **targets;
7087 MonoBasicBlock *default_bblock;
7088 MonoJumpInfoBBTable *table;
7089 int offset_reg = alloc_preg (cfg);
7090 int target_reg = alloc_preg (cfg);
7091 int table_reg = alloc_preg (cfg);
7092 int sum_reg = alloc_preg (cfg);
7093 gboolean use_op_switch;
7097 n = read32 (ip + 1);
7100 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7104 CHECK_OPSIZE (n * sizeof (guint32));
7105 target = ip + n * sizeof (guint32);
7107 GET_BBLOCK (cfg, default_bblock, target);
7109 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7110 for (i = 0; i < n; ++i) {
7111 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7112 targets [i] = tblock;
7116 if (sp != stack_start) {
7118 * Link the current bb with the targets as well, so handle_stack_args
7119 * will set their in_stack correctly.
7121 link_bblock (cfg, bblock, default_bblock);
7122 for (i = 0; i < n; ++i)
7123 link_bblock (cfg, bblock, targets [i]);
7125 handle_stack_args (cfg, stack_start, sp - stack_start);
7127 CHECK_UNVERIFIABLE (cfg);
7130 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7131 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7134 for (i = 0; i < n; ++i)
7135 link_bblock (cfg, bblock, targets [i]);
7137 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7138 table->table = targets;
7139 table->table_size = n;
7141 use_op_switch = FALSE;
7143 /* ARM implements SWITCH statements differently */
7144 /* FIXME: Make it use the generic implementation */
7145 if (!cfg->compile_aot)
7146 use_op_switch = TRUE;
7149 if (COMPILE_LLVM (cfg))
7150 use_op_switch = TRUE;
7152 cfg->cbb->has_jump_table = 1;
7154 if (use_op_switch) {
7155 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7156 ins->sreg1 = src1->dreg;
7157 ins->inst_p0 = table;
7158 ins->inst_many_bb = targets;
7159 ins->klass = GUINT_TO_POINTER (n);
7160 MONO_ADD_INS (cfg->cbb, ins);
7162 if (sizeof (gpointer) == 8)
7163 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7165 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7167 #if SIZEOF_REGISTER == 8
7168 /* The upper word might not be zero, and we add it to a 64 bit address later */
7169 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7172 if (cfg->compile_aot) {
7173 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7175 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7176 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7177 ins->inst_p0 = table;
7178 ins->dreg = table_reg;
7179 MONO_ADD_INS (cfg->cbb, ins);
7182 /* FIXME: Use load_memindex */
7183 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7184 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7185 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7187 start_new_bblock = 1;
7188 inline_costs += (BRANCH_COST * 2);
7208 dreg = alloc_freg (cfg);
7211 dreg = alloc_lreg (cfg);
7214 dreg = alloc_preg (cfg);
7217 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7218 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7219 ins->flags |= ins_flag;
7221 MONO_ADD_INS (bblock, ins);
7236 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7237 ins->flags |= ins_flag;
7239 MONO_ADD_INS (bblock, ins);
7241 #if HAVE_WRITE_BARRIERS
7242 if (*ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0))) {
7243 /* insert call to write barrier */
7244 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
7245 mono_emit_method_call (cfg, write_barrier, sp, NULL);
7256 MONO_INST_NEW (cfg, ins, (*ip));
7258 ins->sreg1 = sp [0]->dreg;
7259 ins->sreg2 = sp [1]->dreg;
7260 type_from_op (ins, sp [0], sp [1]);
7262 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7264 /* Use the immediate opcodes if possible */
7265 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7266 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7267 if (imm_opcode != -1) {
7268 ins->opcode = imm_opcode;
7269 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7272 sp [1]->opcode = OP_NOP;
7276 MONO_ADD_INS ((cfg)->cbb, (ins));
7278 *sp++ = mono_decompose_opcode (cfg, ins);
7295 MONO_INST_NEW (cfg, ins, (*ip));
7297 ins->sreg1 = sp [0]->dreg;
7298 ins->sreg2 = sp [1]->dreg;
7299 type_from_op (ins, sp [0], sp [1]);
7301 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7302 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7304 /* FIXME: Pass opcode to is_inst_imm */
7306 /* Use the immediate opcodes if possible */
7307 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7310 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7311 if (imm_opcode != -1) {
7312 ins->opcode = imm_opcode;
7313 if (sp [1]->opcode == OP_I8CONST) {
7314 #if SIZEOF_REGISTER == 8
7315 ins->inst_imm = sp [1]->inst_l;
7317 ins->inst_ls_word = sp [1]->inst_ls_word;
7318 ins->inst_ms_word = sp [1]->inst_ms_word;
7322 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7325 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7326 if (sp [1]->next == NULL)
7327 sp [1]->opcode = OP_NOP;
7330 MONO_ADD_INS ((cfg)->cbb, (ins));
7332 *sp++ = mono_decompose_opcode (cfg, ins);
7345 case CEE_CONV_OVF_I8:
7346 case CEE_CONV_OVF_U8:
7350 /* Special case this earlier so we have long constants in the IR */
7351 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7352 int data = sp [-1]->inst_c0;
7353 sp [-1]->opcode = OP_I8CONST;
7354 sp [-1]->type = STACK_I8;
7355 #if SIZEOF_REGISTER == 8
7356 if ((*ip) == CEE_CONV_U8)
7357 sp [-1]->inst_c0 = (guint32)data;
7359 sp [-1]->inst_c0 = data;
7361 sp [-1]->inst_ls_word = data;
7362 if ((*ip) == CEE_CONV_U8)
7363 sp [-1]->inst_ms_word = 0;
7365 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7367 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7374 case CEE_CONV_OVF_I4:
7375 case CEE_CONV_OVF_I1:
7376 case CEE_CONV_OVF_I2:
7377 case CEE_CONV_OVF_I:
7378 case CEE_CONV_OVF_U:
7381 if (sp [-1]->type == STACK_R8) {
7382 ADD_UNOP (CEE_CONV_OVF_I8);
7389 case CEE_CONV_OVF_U1:
7390 case CEE_CONV_OVF_U2:
7391 case CEE_CONV_OVF_U4:
7394 if (sp [-1]->type == STACK_R8) {
7395 ADD_UNOP (CEE_CONV_OVF_U8);
7402 case CEE_CONV_OVF_I1_UN:
7403 case CEE_CONV_OVF_I2_UN:
7404 case CEE_CONV_OVF_I4_UN:
7405 case CEE_CONV_OVF_I8_UN:
7406 case CEE_CONV_OVF_U1_UN:
7407 case CEE_CONV_OVF_U2_UN:
7408 case CEE_CONV_OVF_U4_UN:
7409 case CEE_CONV_OVF_U8_UN:
7410 case CEE_CONV_OVF_I_UN:
7411 case CEE_CONV_OVF_U_UN:
7421 case CEE_ADD_OVF_UN:
7423 case CEE_MUL_OVF_UN:
7425 case CEE_SUB_OVF_UN:
7433 token = read32 (ip + 1);
7434 klass = mini_get_class (method, token, generic_context);
7435 CHECK_TYPELOAD (klass);
7437 if (generic_class_is_reference_type (cfg, klass)) {
7438 MonoInst *store, *load;
7439 int dreg = alloc_preg (cfg);
7441 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7442 load->flags |= ins_flag;
7443 MONO_ADD_INS (cfg->cbb, load);
7445 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7446 store->flags |= ins_flag;
7447 MONO_ADD_INS (cfg->cbb, store);
7449 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7461 token = read32 (ip + 1);
7462 klass = mini_get_class (method, token, generic_context);
7463 CHECK_TYPELOAD (klass);
7465 /* Optimize the common ldobj+stloc combination */
7475 loc_index = ip [5] - CEE_STLOC_0;
7482 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7483 CHECK_LOCAL (loc_index);
7485 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7486 ins->dreg = cfg->locals [loc_index]->dreg;
7492 /* Optimize the ldobj+stobj combination */
7493 /* The reference case ends up being a load+store anyway */
7494 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7499 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7506 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7515 CHECK_STACK_OVF (1);
7517 n = read32 (ip + 1);
7519 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7520 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7521 ins->type = STACK_OBJ;
7524 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7525 MonoInst *iargs [1];
7527 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7528 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7530 if (cfg->opt & MONO_OPT_SHARED) {
7531 MonoInst *iargs [3];
7533 if (cfg->compile_aot) {
7534 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7536 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7537 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7538 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7539 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7540 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7542 if (bblock->out_of_line) {
7543 MonoInst *iargs [2];
7545 if (image == mono_defaults.corlib) {
7547 * Avoid relocations in AOT and save some space by using a
7548 * version of helper_ldstr specialized to mscorlib.
7550 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7551 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7553 /* Avoid creating the string object */
7554 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7555 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7556 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7560 if (cfg->compile_aot) {
7561 NEW_LDSTRCONST (cfg, ins, image, n);
7563 MONO_ADD_INS (bblock, ins);
7566 NEW_PCONST (cfg, ins, NULL);
7567 ins->type = STACK_OBJ;
7568 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7570 MONO_ADD_INS (bblock, ins);
7579 MonoInst *iargs [2];
7580 MonoMethodSignature *fsig;
7583 MonoInst *vtable_arg = NULL;
7586 token = read32 (ip + 1);
7587 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
7590 fsig = mono_method_get_signature (cmethod, image, token);
7594 mono_save_token_info (cfg, image, token, cmethod);
7596 if (!mono_class_init (cmethod->klass))
7599 if (cfg->generic_sharing_context)
7600 context_used = mono_method_check_context_used (cmethod);
7602 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
7603 if (check_linkdemand (cfg, method, cmethod))
7605 CHECK_CFG_EXCEPTION;
7606 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
7607 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
7610 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7611 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
7612 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7613 mono_class_vtable (cfg->domain, cmethod->klass);
7614 CHECK_TYPELOAD (cmethod->klass);
7616 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7617 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7620 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7621 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7623 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7625 CHECK_TYPELOAD (cmethod->klass);
7626 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7631 n = fsig->param_count;
7635 * Generate smaller code for the common newobj <exception> instruction in
7636 * argument checking code.
7638 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
7639 is_exception_class (cmethod->klass) && n <= 2 &&
7640 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
7641 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
7642 MonoInst *iargs [3];
7644 g_assert (!vtable_arg);
7648 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
7651 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
7655 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
7660 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
7663 g_assert_not_reached ();
7671 /* move the args to allow room for 'this' in the first position */
7677 /* check_call_signature () requires sp[0] to be set */
7678 this_ins.type = STACK_OBJ;
7680 if (check_call_signature (cfg, fsig, sp))
7685 if (mini_class_is_system_array (cmethod->klass)) {
7686 g_assert (!vtable_arg);
7688 *sp = emit_get_rgctx_method (cfg, context_used,
7689 cmethod, MONO_RGCTX_INFO_METHOD);
7691 /* Avoid varargs in the common case */
7692 if (fsig->param_count == 1)
7693 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
7694 else if (fsig->param_count == 2)
7695 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
7696 else if (fsig->param_count == 3)
7697 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
7699 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
7700 } else if (cmethod->string_ctor) {
7701 g_assert (!context_used);
7702 g_assert (!vtable_arg);
7703 /* we simply pass a null pointer */
7704 EMIT_NEW_PCONST (cfg, *sp, NULL);
7705 /* now call the string ctor */
7706 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL);
7708 MonoInst* callvirt_this_arg = NULL;
7710 if (cmethod->klass->valuetype) {
7711 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
7712 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
7713 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
7718 * The code generated by mini_emit_virtual_call () expects
7719 * iargs [0] to be a boxed instance, but luckily the vcall
7720 * will be transformed into a normal call there.
7722 } else if (context_used) {
7726 if (cfg->opt & MONO_OPT_SHARED)
7727 rgctx_info = MONO_RGCTX_INFO_KLASS;
7729 rgctx_info = MONO_RGCTX_INFO_VTABLE;
7730 data = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, rgctx_info);
7732 alloc = handle_alloc_from_inst (cfg, cmethod->klass, data, FALSE);
7735 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7737 CHECK_TYPELOAD (cmethod->klass);
7740 * TypeInitializationExceptions thrown from the mono_runtime_class_init
7741 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
7742 * As a workaround, we call class cctors before allocating objects.
7744 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
7745 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
7746 if (cfg->verbose_level > 2)
7747 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
7748 class_inits = g_slist_prepend (class_inits, vtable);
7751 alloc = handle_alloc (cfg, cmethod->klass, FALSE);
7754 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
7757 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
7759 /* Now call the actual ctor */
7760 /* Avoid virtual calls to ctors if possible */
7761 if (cmethod->klass->marshalbyref)
7762 callvirt_this_arg = sp [0];
7764 if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7765 mono_method_check_inlining (cfg, cmethod) &&
7766 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
7767 !g_list_find (dont_inline, cmethod)) {
7770 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
7771 cfg->real_offset += 5;
7774 inline_costs += costs - 5;
7777 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL);
7779 } else if (context_used &&
7780 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7781 !mono_class_generic_sharing_enabled (cmethod->klass))) {
7782 MonoInst *cmethod_addr;
7784 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7785 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7787 mono_emit_rgctx_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
7790 ins = mono_emit_rgctx_method_call_full (cfg, cmethod, fsig, sp,
7791 callvirt_this_arg, NULL, vtable_arg);
7795 if (alloc == NULL) {
7797 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
7798 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
7812 token = read32 (ip + 1);
7813 klass = mini_get_class (method, token, generic_context);
7814 CHECK_TYPELOAD (klass);
7815 if (sp [0]->type != STACK_OBJ)
7818 if (cfg->generic_sharing_context)
7819 context_used = mono_class_check_context_used (klass);
7828 args [1] = emit_get_rgctx_klass (cfg, context_used,
7829 klass, MONO_RGCTX_INFO_KLASS);
7831 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7835 } else if (mono_class_has_variant_generic_params (klass)) {
7842 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7844 ins = mono_emit_jit_icall (cfg, mono_object_castclass, args);
7848 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7849 MonoMethod *mono_castclass;
7850 MonoInst *iargs [1];
7853 mono_castclass = mono_marshal_get_castclass (klass);
7856 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7857 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7858 g_assert (costs > 0);
7861 cfg->real_offset += 5;
7866 inline_costs += costs;
7869 ins = handle_castclass (cfg, klass, *sp);
7870 CHECK_CFG_EXCEPTION;
7880 token = read32 (ip + 1);
7881 klass = mini_get_class (method, token, generic_context);
7882 CHECK_TYPELOAD (klass);
7883 if (sp [0]->type != STACK_OBJ)
7886 if (cfg->generic_sharing_context)
7887 context_used = mono_class_check_context_used (klass);
7896 args [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7898 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7902 } else if (mono_class_has_variant_generic_params (klass)) {
7909 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
7911 *sp = mono_emit_jit_icall (cfg, mono_object_isinst, args);
7915 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7916 MonoMethod *mono_isinst;
7917 MonoInst *iargs [1];
7920 mono_isinst = mono_marshal_get_isinst (klass);
7923 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
7924 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7925 g_assert (costs > 0);
7928 cfg->real_offset += 5;
7933 inline_costs += costs;
7936 ins = handle_isinst (cfg, klass, *sp);
7937 CHECK_CFG_EXCEPTION;
7944 case CEE_UNBOX_ANY: {
7948 token = read32 (ip + 1);
7949 klass = mini_get_class (method, token, generic_context);
7950 CHECK_TYPELOAD (klass);
7952 mono_save_token_info (cfg, image, token, klass);
7954 if (cfg->generic_sharing_context)
7955 context_used = mono_class_check_context_used (klass);
7957 if (generic_class_is_reference_type (cfg, klass)) {
7958 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
7960 MonoInst *iargs [2];
7965 iargs [1] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
7966 ins = mono_emit_jit_icall (cfg, mono_object_castclass, iargs);
7970 } else if (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
7971 MonoMethod *mono_castclass;
7972 MonoInst *iargs [1];
7975 mono_castclass = mono_marshal_get_castclass (klass);
7978 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
7979 iargs, ip, cfg->real_offset, dont_inline, TRUE);
7981 g_assert (costs > 0);
7984 cfg->real_offset += 5;
7988 inline_costs += costs;
7990 ins = handle_castclass (cfg, klass, *sp);
7991 CHECK_CFG_EXCEPTION;
7999 if (mono_class_is_nullable (klass)) {
8000 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8007 ins = handle_unbox (cfg, klass, sp, context_used);
8013 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8026 token = read32 (ip + 1);
8027 klass = mini_get_class (method, token, generic_context);
8028 CHECK_TYPELOAD (klass);
8030 mono_save_token_info (cfg, image, token, klass);
8032 if (cfg->generic_sharing_context)
8033 context_used = mono_class_check_context_used (klass);
8035 if (generic_class_is_reference_type (cfg, klass)) {
8041 if (klass == mono_defaults.void_class)
8043 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8045 /* frequent check in generic code: box (struct), brtrue */
8046 if (!mono_class_is_nullable (klass) &&
8047 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) && (ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S)) {
8048 /*printf ("box-brtrue opt at 0x%04x in %s\n", real_offset, method->name);*/
8050 MONO_INST_NEW (cfg, ins, OP_BR);
8051 if (*ip == CEE_BRTRUE_S) {
8054 target = ip + 1 + (signed char)(*ip);
8059 target = ip + 4 + (gint)(read32 (ip));
8062 GET_BBLOCK (cfg, tblock, target);
8063 link_bblock (cfg, bblock, tblock);
8064 ins->inst_target_bb = tblock;
8065 GET_BBLOCK (cfg, tblock, ip);
8067 * This leads to some inconsistency, since the two bblocks are
8068 * not really connected, but it is needed for handling stack
8069 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8070 * FIXME: This should only be needed if sp != stack_start, but that
8071 * doesn't work for some reason (test failure in mcs/tests on x86).
8073 link_bblock (cfg, bblock, tblock);
8074 if (sp != stack_start) {
8075 handle_stack_args (cfg, stack_start, sp - stack_start);
8077 CHECK_UNVERIFIABLE (cfg);
8079 MONO_ADD_INS (bblock, ins);
8080 start_new_bblock = 1;
8088 if (cfg->opt & MONO_OPT_SHARED)
8089 rgctx_info = MONO_RGCTX_INFO_KLASS;
8091 rgctx_info = MONO_RGCTX_INFO_VTABLE;
8092 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
8093 *sp++ = handle_box_from_inst (cfg, val, klass, context_used, data);
8095 *sp++ = handle_box (cfg, val, klass);
8098 CHECK_CFG_EXCEPTION;
8107 token = read32 (ip + 1);
8108 klass = mini_get_class (method, token, generic_context);
8109 CHECK_TYPELOAD (klass);
8111 mono_save_token_info (cfg, image, token, klass);
8113 if (cfg->generic_sharing_context)
8114 context_used = mono_class_check_context_used (klass);
8116 if (mono_class_is_nullable (klass)) {
8119 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8120 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8124 ins = handle_unbox (cfg, klass, sp, context_used);
8134 MonoClassField *field;
8138 if (*ip == CEE_STFLD) {
8145 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8147 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8150 token = read32 (ip + 1);
8151 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8152 field = mono_method_get_wrapper_data (method, token);
8153 klass = field->parent;
8156 field = mono_field_from_token (image, token, &klass, generic_context);
8160 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8161 FIELD_ACCESS_FAILURE;
8162 mono_class_init (klass);
8164 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8165 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8166 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8167 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8170 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8171 if (*ip == CEE_STFLD) {
8172 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8174 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8175 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8176 MonoInst *iargs [5];
8179 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8180 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8181 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8185 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8186 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8187 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8188 g_assert (costs > 0);
8190 cfg->real_offset += 5;
8193 inline_costs += costs;
8195 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8200 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8202 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8204 #if HAVE_WRITE_BARRIERS
8205 if (mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8206 /* insert call to write barrier */
8207 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
8208 MonoInst *iargs [2];
8211 dreg = alloc_preg (cfg);
8212 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8214 mono_emit_method_call (cfg, write_barrier, iargs, NULL);
8218 store->flags |= ins_flag;
8225 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8226 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8227 MonoInst *iargs [4];
8230 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8231 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8232 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8233 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8234 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8235 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8237 g_assert (costs > 0);
8239 cfg->real_offset += 5;
8243 inline_costs += costs;
8245 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8249 if (sp [0]->type == STACK_VTYPE) {
8252 /* Have to compute the address of the variable */
8254 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8256 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8258 g_assert (var->klass == klass);
8260 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8264 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8266 if (*ip == CEE_LDFLDA) {
8267 dreg = alloc_preg (cfg);
8269 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8270 ins->klass = mono_class_from_mono_type (field->type);
8271 ins->type = STACK_MP;
8276 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8277 load->flags |= ins_flag;
8288 MonoClassField *field;
8289 gpointer addr = NULL;
8290 gboolean is_special_static;
8293 token = read32 (ip + 1);
8295 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8296 field = mono_method_get_wrapper_data (method, token);
8297 klass = field->parent;
8300 field = mono_field_from_token (image, token, &klass, generic_context);
8303 mono_class_init (klass);
8304 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8305 FIELD_ACCESS_FAILURE;
8307 /* if the class is Critical then transparent code cannot access it's fields */
8308 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8309 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8312 * We can only support shared generic static
8313 * field access on architectures where the
8314 * trampoline code has been extended to handle
8315 * the generic class init.
8317 #ifndef MONO_ARCH_VTABLE_REG
8318 GENERIC_SHARING_FAILURE (*ip);
8321 if (cfg->generic_sharing_context)
8322 context_used = mono_class_check_context_used (klass);
8324 g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
8326 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8327 * to be called here.
8329 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8330 mono_class_vtable (cfg->domain, klass);
8331 CHECK_TYPELOAD (klass);
8333 mono_domain_lock (cfg->domain);
8334 if (cfg->domain->special_static_fields)
8335 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8336 mono_domain_unlock (cfg->domain);
8338 is_special_static = mono_class_field_is_special_static (field);
8340 /* Generate IR to compute the field address */
8342 if ((cfg->opt & MONO_OPT_SHARED) ||
8343 (cfg->compile_aot && is_special_static) ||
8344 (context_used && is_special_static)) {
8345 MonoInst *iargs [2];
8347 g_assert (field->parent);
8348 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8350 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8351 field, MONO_RGCTX_INFO_CLASS_FIELD);
8353 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8355 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8356 } else if (context_used) {
8357 MonoInst *static_data;
8360 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8361 method->klass->name_space, method->klass->name, method->name,
8362 depth, field->offset);
8365 if (mono_class_needs_cctor_run (klass, method)) {
8369 vtable = emit_get_rgctx_klass (cfg, context_used,
8370 klass, MONO_RGCTX_INFO_VTABLE);
8372 // FIXME: This doesn't work since it tries to pass the argument
8373 // in the normal way, instead of using MONO_ARCH_VTABLE_REG
8375 * The vtable pointer is always passed in a register regardless of
8376 * the calling convention, so assign it manually, and make a call
8377 * using a signature without parameters.
8379 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable);
8380 #ifdef MONO_ARCH_VTABLE_REG
8381 mono_call_inst_add_outarg_reg (cfg, call, vtable->dreg, MONO_ARCH_VTABLE_REG, FALSE);
8382 cfg->uses_vtable_reg = TRUE;
8389 * The pointer we're computing here is
8391 * super_info.static_data + field->offset
8393 static_data = emit_get_rgctx_klass (cfg, context_used,
8394 klass, MONO_RGCTX_INFO_STATIC_DATA);
8396 if (field->offset == 0) {
8399 int addr_reg = mono_alloc_preg (cfg);
8400 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8402 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8403 MonoInst *iargs [2];
8405 g_assert (field->parent);
8406 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8407 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8408 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8410 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8412 CHECK_TYPELOAD (klass);
8414 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8415 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8416 if (cfg->verbose_level > 2)
8417 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8418 class_inits = g_slist_prepend (class_inits, vtable);
8420 if (cfg->run_cctors) {
8422 /* This makes so that inline cannot trigger */
8423 /* .cctors: too many apps depend on them */
8424 /* running with a specific order... */
8425 if (! vtable->initialized)
8427 ex = mono_runtime_class_init_full (vtable, FALSE);
8429 set_exception_object (cfg, ex);
8430 goto exception_exit;
8434 addr = (char*)vtable->data + field->offset;
8436 if (cfg->compile_aot)
8437 EMIT_NEW_SFLDACONST (cfg, ins, field);
8439 EMIT_NEW_PCONST (cfg, ins, addr);
8442 * insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
8443 * This could be later optimized to do just a couple of
8444 * memory dereferences with constant offsets.
8446 MonoInst *iargs [1];
8447 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8448 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8452 /* Generate IR to do the actual load/store operation */
8454 if (*ip == CEE_LDSFLDA) {
8455 ins->klass = mono_class_from_mono_type (field->type);
8456 ins->type = STACK_PTR;
8458 } else if (*ip == CEE_STSFLD) {
8463 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, ins->dreg, 0, sp [0]->dreg);
8464 store->flags |= ins_flag;
8466 gboolean is_const = FALSE;
8467 MonoVTable *vtable = NULL;
8469 if (!context_used) {
8470 vtable = mono_class_vtable (cfg->domain, klass);
8471 CHECK_TYPELOAD (klass);
8473 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8474 vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8475 gpointer addr = (char*)vtable->data + field->offset;
8476 int ro_type = field->type->type;
8477 if (ro_type == MONO_TYPE_VALUETYPE && field->type->data.klass->enumtype) {
8478 ro_type = mono_class_enum_basetype (field->type->data.klass)->type;
8480 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8483 case MONO_TYPE_BOOLEAN:
8485 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8489 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8492 case MONO_TYPE_CHAR:
8494 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8498 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
8503 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
8507 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
8510 #ifndef HAVE_MOVING_COLLECTOR
8513 case MONO_TYPE_STRING:
8514 case MONO_TYPE_OBJECT:
8515 case MONO_TYPE_CLASS:
8516 case MONO_TYPE_SZARRAY:
8518 case MONO_TYPE_FNPTR:
8519 case MONO_TYPE_ARRAY:
8520 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
8521 type_to_eval_stack_type ((cfg), field->type, *sp);
8527 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
8532 case MONO_TYPE_VALUETYPE:
8542 CHECK_STACK_OVF (1);
8544 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
8545 load->flags |= ins_flag;
8558 token = read32 (ip + 1);
8559 klass = mini_get_class (method, token, generic_context);
8560 CHECK_TYPELOAD (klass);
8561 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
8562 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
8573 const char *data_ptr;
8575 guint32 field_token;
8581 token = read32 (ip + 1);
8583 klass = mini_get_class (method, token, generic_context);
8584 CHECK_TYPELOAD (klass);
8586 if (cfg->generic_sharing_context)
8587 context_used = mono_class_check_context_used (klass);
8589 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
8590 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
8591 ins->sreg1 = sp [0]->dreg;
8592 ins->type = STACK_I4;
8593 ins->dreg = alloc_ireg (cfg);
8594 MONO_ADD_INS (cfg->cbb, ins);
8595 *sp = mono_decompose_opcode (cfg, ins);
8600 MonoClass *array_class = mono_array_class_get (klass, 1);
8601 /* FIXME: we cannot get a managed
8602 allocator because we can't get the
8603 open generic class's vtable. We
8604 have the same problem in
8605 handle_alloc_from_inst(). This
8606 needs to be solved so that we can
8607 have managed allocs of shared
8610 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
8611 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
8613 MonoMethod *managed_alloc = NULL;
8615 /* FIXME: Decompose later to help abcrem */
8618 args [0] = emit_get_rgctx_klass (cfg, context_used,
8619 array_class, MONO_RGCTX_INFO_VTABLE);
8624 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
8626 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
8628 if (cfg->opt & MONO_OPT_SHARED) {
8629 /* Decompose now to avoid problems with references to the domainvar */
8630 MonoInst *iargs [3];
8632 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8633 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8636 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
8638 /* Decompose later since it is needed by abcrem */
8639 MonoClass *array_type = mono_array_class_get (klass, 1);
8640 mono_class_vtable (cfg->domain, array_type);
8641 CHECK_TYPELOAD (array_type);
8643 MONO_INST_NEW (cfg, ins, OP_NEWARR);
8644 ins->dreg = alloc_preg (cfg);
8645 ins->sreg1 = sp [0]->dreg;
8646 ins->inst_newa_class = klass;
8647 ins->type = STACK_OBJ;
8649 MONO_ADD_INS (cfg->cbb, ins);
8650 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8651 cfg->cbb->has_array_access = TRUE;
8653 /* Needed so mono_emit_load_get_addr () gets called */
8654 mono_get_got_var (cfg);
8664 * we inline/optimize the initialization sequence if possible.
8665 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
8666 * for small sizes open code the memcpy
8667 * ensure the rva field is big enough
8669 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
8670 MonoMethod *memcpy_method = get_memcpy_method ();
8671 MonoInst *iargs [3];
8672 int add_reg = alloc_preg (cfg);
8674 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
8675 if (cfg->compile_aot) {
8676 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
8678 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
8680 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
8681 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
8690 if (sp [0]->type != STACK_OBJ)
8693 dreg = alloc_preg (cfg);
8694 MONO_INST_NEW (cfg, ins, OP_LDLEN);
8695 ins->dreg = alloc_preg (cfg);
8696 ins->sreg1 = sp [0]->dreg;
8697 ins->type = STACK_I4;
8698 MONO_ADD_INS (cfg->cbb, ins);
8699 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
8700 cfg->cbb->has_array_access = TRUE;
8708 if (sp [0]->type != STACK_OBJ)
8711 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8713 klass = mini_get_class (method, read32 (ip + 1), generic_context);
8714 CHECK_TYPELOAD (klass);
8715 /* we need to make sure that this array is exactly the type it needs
8716 * to be for correctness. the wrappers are lax with their usage
8717 * so we need to ignore them here
8719 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
8720 MonoClass *array_class = mono_array_class_get (klass, 1);
8721 mini_emit_check_array_type (cfg, sp [0], array_class);
8722 CHECK_TYPELOAD (array_class);
8726 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8741 case CEE_LDELEM_REF: {
8747 if (*ip == CEE_LDELEM) {
8749 token = read32 (ip + 1);
8750 klass = mini_get_class (method, token, generic_context);
8751 CHECK_TYPELOAD (klass);
8752 mono_class_init (klass);
8755 klass = array_access_to_klass (*ip);
8757 if (sp [0]->type != STACK_OBJ)
8760 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8762 if (sp [1]->opcode == OP_ICONST) {
8763 int array_reg = sp [0]->dreg;
8764 int index_reg = sp [1]->dreg;
8765 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8767 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8768 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
8770 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8771 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
8774 if (*ip == CEE_LDELEM)
8787 case CEE_STELEM_REF:
8794 cfg->flags |= MONO_CFG_HAS_LDELEMA;
8796 if (*ip == CEE_STELEM) {
8798 token = read32 (ip + 1);
8799 klass = mini_get_class (method, token, generic_context);
8800 CHECK_TYPELOAD (klass);
8801 mono_class_init (klass);
8804 klass = array_access_to_klass (*ip);
8806 if (sp [0]->type != STACK_OBJ)
8809 /* storing a NULL doesn't need any of the complex checks in stelemref */
8810 if (generic_class_is_reference_type (cfg, klass) &&
8811 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
8812 MonoMethod* helper = mono_marshal_get_stelemref ();
8813 MonoInst *iargs [3];
8815 if (sp [0]->type != STACK_OBJ)
8817 if (sp [2]->type != STACK_OBJ)
8824 mono_emit_method_call (cfg, helper, iargs, NULL);
8826 if (sp [1]->opcode == OP_ICONST) {
8827 int array_reg = sp [0]->dreg;
8828 int index_reg = sp [1]->dreg;
8829 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
8831 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
8832 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
8834 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1]);
8835 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
8839 if (*ip == CEE_STELEM)
8846 case CEE_CKFINITE: {
8850 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
8851 ins->sreg1 = sp [0]->dreg;
8852 ins->dreg = alloc_freg (cfg);
8853 ins->type = STACK_R8;
8854 MONO_ADD_INS (bblock, ins);
8856 *sp++ = mono_decompose_opcode (cfg, ins);
8861 case CEE_REFANYVAL: {
8862 MonoInst *src_var, *src;
8864 int klass_reg = alloc_preg (cfg);
8865 int dreg = alloc_preg (cfg);
8868 MONO_INST_NEW (cfg, ins, *ip);
8871 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8872 CHECK_TYPELOAD (klass);
8873 mono_class_init (klass);
8875 if (cfg->generic_sharing_context)
8876 context_used = mono_class_check_context_used (klass);
8879 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
8881 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
8882 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
8883 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
8886 MonoInst *klass_ins;
8888 klass_ins = emit_get_rgctx_klass (cfg, context_used,
8889 klass, MONO_RGCTX_INFO_KLASS);
8892 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
8893 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
8895 mini_emit_class_check (cfg, klass_reg, klass);
8897 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
8898 ins->type = STACK_MP;
8903 case CEE_MKREFANY: {
8904 MonoInst *loc, *addr;
8907 MONO_INST_NEW (cfg, ins, *ip);
8910 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
8911 CHECK_TYPELOAD (klass);
8912 mono_class_init (klass);
8914 if (cfg->generic_sharing_context)
8915 context_used = mono_class_check_context_used (klass);
8917 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
8918 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
8921 MonoInst *const_ins;
8922 int type_reg = alloc_preg (cfg);
8924 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
8925 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
8926 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8927 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8928 } else if (cfg->compile_aot) {
8929 int const_reg = alloc_preg (cfg);
8930 int type_reg = alloc_preg (cfg);
8932 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
8933 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
8934 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
8935 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
8937 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
8938 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
8940 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
8942 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
8943 ins->type = STACK_VTYPE;
8944 ins->klass = mono_defaults.typed_reference_class;
8951 MonoClass *handle_class;
8953 CHECK_STACK_OVF (1);
8956 n = read32 (ip + 1);
8958 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
8959 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8960 handle = mono_method_get_wrapper_data (method, n);
8961 handle_class = mono_method_get_wrapper_data (method, n + 1);
8962 if (handle_class == mono_defaults.typehandle_class)
8963 handle = &((MonoClass*)handle)->byval_arg;
8966 handle = mono_ldtoken (image, n, &handle_class, generic_context);
8970 mono_class_init (handle_class);
8971 if (cfg->generic_sharing_context) {
8972 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
8973 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
8974 /* This case handles ldtoken
8975 of an open type, like for
8978 } else if (handle_class == mono_defaults.typehandle_class) {
8979 /* If we get a MONO_TYPE_CLASS
8980 then we need to provide the
8982 instantiation of it. */
8983 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
8986 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
8987 } else if (handle_class == mono_defaults.fieldhandle_class)
8988 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
8989 else if (handle_class == mono_defaults.methodhandle_class)
8990 context_used = mono_method_check_context_used (handle);
8992 g_assert_not_reached ();
8995 if ((cfg->opt & MONO_OPT_SHARED) &&
8996 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
8997 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
8998 MonoInst *addr, *vtvar, *iargs [3];
8999 int method_context_used;
9001 if (cfg->generic_sharing_context)
9002 method_context_used = mono_method_check_context_used (method);
9004 method_context_used = 0;
9006 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9008 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9009 EMIT_NEW_ICONST (cfg, iargs [1], n);
9010 if (method_context_used) {
9011 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9012 method, MONO_RGCTX_INFO_METHOD);
9013 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9015 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9016 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9018 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9020 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9022 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9024 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9025 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9026 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9027 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9028 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9029 MonoClass *tclass = mono_class_from_mono_type (handle);
9031 mono_class_init (tclass);
9033 ins = emit_get_rgctx_klass (cfg, context_used,
9034 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9035 } else if (cfg->compile_aot) {
9036 if (method->wrapper_type) {
9037 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9038 /* Special case for static synchronized wrappers */
9039 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9041 /* FIXME: n is not a normal token */
9042 cfg->disable_aot = TRUE;
9043 EMIT_NEW_PCONST (cfg, ins, NULL);
9046 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9049 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9051 ins->type = STACK_OBJ;
9052 ins->klass = cmethod->klass;
9055 MonoInst *addr, *vtvar;
9057 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9060 if (handle_class == mono_defaults.typehandle_class) {
9061 ins = emit_get_rgctx_klass (cfg, context_used,
9062 mono_class_from_mono_type (handle),
9063 MONO_RGCTX_INFO_TYPE);
9064 } else if (handle_class == mono_defaults.methodhandle_class) {
9065 ins = emit_get_rgctx_method (cfg, context_used,
9066 handle, MONO_RGCTX_INFO_METHOD);
9067 } else if (handle_class == mono_defaults.fieldhandle_class) {
9068 ins = emit_get_rgctx_field (cfg, context_used,
9069 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9071 g_assert_not_reached ();
9073 } else if (cfg->compile_aot) {
9074 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9076 EMIT_NEW_PCONST (cfg, ins, handle);
9078 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9079 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9080 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9090 MONO_INST_NEW (cfg, ins, OP_THROW);
9092 ins->sreg1 = sp [0]->dreg;
9094 bblock->out_of_line = TRUE;
9095 MONO_ADD_INS (bblock, ins);
9096 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9097 MONO_ADD_INS (bblock, ins);
9100 link_bblock (cfg, bblock, end_bblock);
9101 start_new_bblock = 1;
9103 case CEE_ENDFINALLY:
9104 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9105 MONO_ADD_INS (bblock, ins);
9107 start_new_bblock = 1;
9110 * Control will leave the method so empty the stack, otherwise
9111 * the next basic block will start with a nonempty stack.
9113 while (sp != stack_start) {
9121 if (*ip == CEE_LEAVE) {
9123 target = ip + 5 + (gint32)read32(ip + 1);
9126 target = ip + 2 + (signed char)(ip [1]);
9129 /* empty the stack */
9130 while (sp != stack_start) {
9135 * If this leave statement is in a catch block, check for a
9136 * pending exception, and rethrow it if necessary.
9137 * We avoid doing this in runtime invoke wrappers, since those are called
9138 * by native code which excepts the wrapper to catch all exceptions.
9140 for (i = 0; i < header->num_clauses; ++i) {
9141 MonoExceptionClause *clause = &header->clauses [i];
9144 * Use <= in the final comparison to handle clauses with multiple
9145 * leave statements, like in bug #78024.
9146 * The ordering of the exception clauses guarantees that we find the
9149 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9151 MonoBasicBlock *dont_throw;
9156 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9159 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9161 NEW_BBLOCK (cfg, dont_throw);
9164 * Currently, we allways rethrow the abort exception, despite the
9165 * fact that this is not correct. See thread6.cs for an example.
9166 * But propagating the abort exception is more important than
9167 * getting the sematics right.
9169 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9170 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9171 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9173 MONO_START_BB (cfg, dont_throw);
9178 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9180 for (tmp = handlers; tmp; tmp = tmp->next) {
9182 link_bblock (cfg, bblock, tblock);
9183 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9184 ins->inst_target_bb = tblock;
9185 MONO_ADD_INS (bblock, ins);
9186 bblock->has_call_handler = 1;
9187 if (COMPILE_LLVM (cfg)) {
9188 MonoBasicBlock *target_bb;
9191 * Link the finally bblock with the target, since it will
9192 * conceptually branch there.
9193 * FIXME: Have to link the bblock containing the endfinally.
9195 GET_BBLOCK (cfg, target_bb, target);
9196 link_bblock (cfg, tblock, target_bb);
9199 g_list_free (handlers);
9202 MONO_INST_NEW (cfg, ins, OP_BR);
9203 MONO_ADD_INS (bblock, ins);
9204 GET_BBLOCK (cfg, tblock, target);
9205 link_bblock (cfg, bblock, tblock);
9206 ins->inst_target_bb = tblock;
9207 start_new_bblock = 1;
9209 if (*ip == CEE_LEAVE)
9218 * Mono specific opcodes
9220 case MONO_CUSTOM_PREFIX: {
9222 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9226 case CEE_MONO_ICALL: {
9228 MonoJitICallInfo *info;
9230 token = read32 (ip + 2);
9231 func = mono_method_get_wrapper_data (method, token);
9232 info = mono_find_jit_icall_by_addr (func);
9235 CHECK_STACK (info->sig->param_count);
9236 sp -= info->sig->param_count;
9238 ins = mono_emit_jit_icall (cfg, info->func, sp);
9239 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9243 inline_costs += 10 * num_calls++;
9247 case CEE_MONO_LDPTR: {
9250 CHECK_STACK_OVF (1);
9252 token = read32 (ip + 2);
9254 ptr = mono_method_get_wrapper_data (method, token);
9255 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9256 MonoJitICallInfo *callinfo;
9257 const char *icall_name;
9259 icall_name = method->name + strlen ("__icall_wrapper_");
9260 g_assert (icall_name);
9261 callinfo = mono_find_jit_icall_by_name (icall_name);
9262 g_assert (callinfo);
9264 if (ptr == callinfo->func) {
9265 /* Will be transformed into an AOTCONST later */
9266 EMIT_NEW_PCONST (cfg, ins, ptr);
9272 /* FIXME: Generalize this */
9273 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9274 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9279 EMIT_NEW_PCONST (cfg, ins, ptr);
9282 inline_costs += 10 * num_calls++;
9283 /* Can't embed random pointers into AOT code */
9284 cfg->disable_aot = 1;
9287 case CEE_MONO_ICALL_ADDR: {
9288 MonoMethod *cmethod;
9291 CHECK_STACK_OVF (1);
9293 token = read32 (ip + 2);
9295 cmethod = mono_method_get_wrapper_data (method, token);
9297 if (cfg->compile_aot) {
9298 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9300 ptr = mono_lookup_internal_call (cmethod);
9302 EMIT_NEW_PCONST (cfg, ins, ptr);
9308 case CEE_MONO_VTADDR: {
9309 MonoInst *src_var, *src;
9315 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9316 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9321 case CEE_MONO_NEWOBJ: {
9322 MonoInst *iargs [2];
9324 CHECK_STACK_OVF (1);
9326 token = read32 (ip + 2);
9327 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9328 mono_class_init (klass);
9329 NEW_DOMAINCONST (cfg, iargs [0]);
9330 MONO_ADD_INS (cfg->cbb, iargs [0]);
9331 NEW_CLASSCONST (cfg, iargs [1], klass);
9332 MONO_ADD_INS (cfg->cbb, iargs [1]);
9333 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9335 inline_costs += 10 * num_calls++;
9338 case CEE_MONO_OBJADDR:
9341 MONO_INST_NEW (cfg, ins, OP_MOVE);
9342 ins->dreg = alloc_preg (cfg);
9343 ins->sreg1 = sp [0]->dreg;
9344 ins->type = STACK_MP;
9345 MONO_ADD_INS (cfg->cbb, ins);
9349 case CEE_MONO_LDNATIVEOBJ:
9351 * Similar to LDOBJ, but instead load the unmanaged
9352 * representation of the vtype to the stack.
9357 token = read32 (ip + 2);
9358 klass = mono_method_get_wrapper_data (method, token);
9359 g_assert (klass->valuetype);
9360 mono_class_init (klass);
9363 MonoInst *src, *dest, *temp;
9366 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9367 temp->backend.is_pinvoke = 1;
9368 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9369 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9371 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9372 dest->type = STACK_VTYPE;
9373 dest->klass = klass;
9379 case CEE_MONO_RETOBJ: {
9381 * Same as RET, but return the native representation of a vtype
9384 g_assert (cfg->ret);
9385 g_assert (mono_method_signature (method)->pinvoke);
9390 token = read32 (ip + 2);
9391 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9393 if (!cfg->vret_addr) {
9394 g_assert (cfg->ret_var_is_local);
9396 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9398 EMIT_NEW_RETLOADA (cfg, ins);
9400 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9402 if (sp != stack_start)
9405 MONO_INST_NEW (cfg, ins, OP_BR);
9406 ins->inst_target_bb = end_bblock;
9407 MONO_ADD_INS (bblock, ins);
9408 link_bblock (cfg, bblock, end_bblock);
9409 start_new_bblock = 1;
9413 case CEE_MONO_CISINST:
9414 case CEE_MONO_CCASTCLASS: {
9419 token = read32 (ip + 2);
9420 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9421 if (ip [1] == CEE_MONO_CISINST)
9422 ins = handle_cisinst (cfg, klass, sp [0]);
9424 ins = handle_ccastclass (cfg, klass, sp [0]);
9430 case CEE_MONO_SAVE_LMF:
9431 case CEE_MONO_RESTORE_LMF:
9432 #ifdef MONO_ARCH_HAVE_LMF_OPS
9433 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9434 MONO_ADD_INS (bblock, ins);
9435 cfg->need_lmf_area = TRUE;
9439 case CEE_MONO_CLASSCONST:
9440 CHECK_STACK_OVF (1);
9442 token = read32 (ip + 2);
9443 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9446 inline_costs += 10 * num_calls++;
9448 case CEE_MONO_NOT_TAKEN:
9449 bblock->out_of_line = TRUE;
9453 CHECK_STACK_OVF (1);
9455 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9456 ins->dreg = alloc_preg (cfg);
9457 ins->inst_offset = (gint32)read32 (ip + 2);
9458 ins->type = STACK_PTR;
9459 MONO_ADD_INS (bblock, ins);
9463 case CEE_MONO_DYN_CALL: {
9466 /* It would be easier to call a trampoline, but that would put an
9467 * extra frame on the stack, confusing exception handling. So
9468 * implement it inline using an opcode for now.
9471 if (!cfg->dyn_call_var) {
9472 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9473 /* prevent it from being register allocated */
9474 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9477 /* Has to use a call inst since it local regalloc expects it */
9478 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
9479 ins = (MonoInst*)call;
9481 ins->sreg1 = sp [0]->dreg;
9482 ins->sreg2 = sp [1]->dreg;
9483 MONO_ADD_INS (bblock, ins);
9485 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
9486 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
9490 inline_costs += 10 * num_calls++;
9495 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
9505 /* somewhat similar to LDTOKEN */
9506 MonoInst *addr, *vtvar;
9507 CHECK_STACK_OVF (1);
9508 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
9510 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9511 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
9513 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9514 ins->type = STACK_VTYPE;
9515 ins->klass = mono_defaults.argumenthandle_class;
9528 * The following transforms:
9529 * CEE_CEQ into OP_CEQ
9530 * CEE_CGT into OP_CGT
9531 * CEE_CGT_UN into OP_CGT_UN
9532 * CEE_CLT into OP_CLT
9533 * CEE_CLT_UN into OP_CLT_UN
9535 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
9537 MONO_INST_NEW (cfg, ins, cmp->opcode);
9539 cmp->sreg1 = sp [0]->dreg;
9540 cmp->sreg2 = sp [1]->dreg;
9541 type_from_op (cmp, sp [0], sp [1]);
9543 if ((sp [0]->type == STACK_I8) || ((SIZEOF_REGISTER == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
9544 cmp->opcode = OP_LCOMPARE;
9545 else if (sp [0]->type == STACK_R8)
9546 cmp->opcode = OP_FCOMPARE;
9548 cmp->opcode = OP_ICOMPARE;
9549 MONO_ADD_INS (bblock, cmp);
9550 ins->type = STACK_I4;
9551 ins->dreg = alloc_dreg (cfg, ins->type);
9552 type_from_op (ins, sp [0], sp [1]);
9554 if (cmp->opcode == OP_FCOMPARE) {
9556 * The backends expect the fceq opcodes to do the
9559 cmp->opcode = OP_NOP;
9560 ins->sreg1 = cmp->sreg1;
9561 ins->sreg2 = cmp->sreg2;
9563 MONO_ADD_INS (bblock, ins);
9570 MonoMethod *cil_method;
9571 gboolean needs_static_rgctx_invoke;
9572 int invoke_context_used = 0;
9574 CHECK_STACK_OVF (1);
9576 n = read32 (ip + 2);
9577 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9580 mono_class_init (cmethod->klass);
9582 mono_save_token_info (cfg, image, n, cmethod);
9584 if (cfg->generic_sharing_context)
9585 context_used = mono_method_check_context_used (cmethod);
9587 needs_static_rgctx_invoke = mono_method_needs_static_rgctx_invoke (cmethod, TRUE);
9589 cil_method = cmethod;
9590 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
9591 METHOD_ACCESS_FAILURE;
9593 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9594 if (check_linkdemand (cfg, method, cmethod))
9596 CHECK_CFG_EXCEPTION;
9597 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9598 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9602 * Optimize the common case of ldftn+delegate creation
9604 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
9605 /* FIXME: SGEN support */
9606 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
9607 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
9608 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
9609 MonoInst *target_ins;
9612 invoke = mono_get_delegate_invoke (ctor_method->klass);
9613 if (!invoke || !mono_method_signature (invoke))
9616 if (cfg->generic_sharing_context)
9617 invoke_context_used = mono_method_check_context_used (invoke);
9619 if (invoke_context_used == 0) {
9621 if (cfg->verbose_level > 3)
9622 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
9623 target_ins = sp [-1];
9625 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
9626 CHECK_CFG_EXCEPTION;
9635 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
9636 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
9640 inline_costs += 10 * num_calls++;
9643 case CEE_LDVIRTFTN: {
9648 n = read32 (ip + 2);
9649 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
9652 mono_class_init (cmethod->klass);
9654 if (cfg->generic_sharing_context)
9655 context_used = mono_method_check_context_used (cmethod);
9657 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
9658 if (check_linkdemand (cfg, method, cmethod))
9660 CHECK_CFG_EXCEPTION;
9661 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
9662 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9668 args [1] = emit_get_rgctx_method (cfg, context_used,
9669 cmethod, MONO_RGCTX_INFO_METHOD);
9672 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
9674 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
9677 inline_costs += 10 * num_calls++;
9681 CHECK_STACK_OVF (1);
9683 n = read16 (ip + 2);
9685 EMIT_NEW_ARGLOAD (cfg, ins, n);
9690 CHECK_STACK_OVF (1);
9692 n = read16 (ip + 2);
9694 NEW_ARGLOADA (cfg, ins, n);
9695 MONO_ADD_INS (cfg->cbb, ins);
9703 n = read16 (ip + 2);
9705 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
9707 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
9711 CHECK_STACK_OVF (1);
9713 n = read16 (ip + 2);
9715 EMIT_NEW_LOCLOAD (cfg, ins, n);
9720 unsigned char *tmp_ip;
9721 CHECK_STACK_OVF (1);
9723 n = read16 (ip + 2);
9726 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
9732 EMIT_NEW_LOCLOADA (cfg, ins, n);
9741 n = read16 (ip + 2);
9743 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
9745 emit_stloc_ir (cfg, sp, header, n);
9752 if (sp != stack_start)
9754 if (cfg->method != method)
9756 * Inlining this into a loop in a parent could lead to
9757 * stack overflows which is different behavior than the
9758 * non-inlined case, thus disable inlining in this case.
9760 goto inline_failure;
9762 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
9763 ins->dreg = alloc_preg (cfg);
9764 ins->sreg1 = sp [0]->dreg;
9765 ins->type = STACK_PTR;
9766 MONO_ADD_INS (cfg->cbb, ins);
9768 cfg->flags |= MONO_CFG_HAS_ALLOCA;
9770 ins->flags |= MONO_INST_INIT;
9775 case CEE_ENDFILTER: {
9776 MonoExceptionClause *clause, *nearest;
9777 int cc, nearest_num;
9781 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
9783 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
9784 ins->sreg1 = (*sp)->dreg;
9785 MONO_ADD_INS (bblock, ins);
9786 start_new_bblock = 1;
9791 for (cc = 0; cc < header->num_clauses; ++cc) {
9792 clause = &header->clauses [cc];
9793 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
9794 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
9795 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
9801 if ((ip - header->code) != nearest->handler_offset)
9806 case CEE_UNALIGNED_:
9807 ins_flag |= MONO_INST_UNALIGNED;
9808 /* FIXME: record alignment? we can assume 1 for now */
9813 ins_flag |= MONO_INST_VOLATILE;
9817 ins_flag |= MONO_INST_TAILCALL;
9818 cfg->flags |= MONO_CFG_HAS_TAIL;
9819 /* Can't inline tail calls at this time */
9820 inline_costs += 100000;
9827 token = read32 (ip + 2);
9828 klass = mini_get_class (method, token, generic_context);
9829 CHECK_TYPELOAD (klass);
9830 if (generic_class_is_reference_type (cfg, klass))
9831 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
9833 mini_emit_initobj (cfg, *sp, NULL, klass);
9837 case CEE_CONSTRAINED_:
9839 token = read32 (ip + 2);
9840 if (method->wrapper_type != MONO_WRAPPER_NONE)
9841 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
9843 constrained_call = mono_class_get_full (image, token, generic_context);
9844 CHECK_TYPELOAD (constrained_call);
9849 MonoInst *iargs [3];
9853 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
9854 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
9855 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
9856 /* emit_memset only works when val == 0 */
9857 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
9862 if (ip [1] == CEE_CPBLK) {
9863 MonoMethod *memcpy_method = get_memcpy_method ();
9864 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9866 MonoMethod *memset_method = get_memset_method ();
9867 mono_emit_method_call (cfg, memset_method, iargs, NULL);
9877 ins_flag |= MONO_INST_NOTYPECHECK;
9879 ins_flag |= MONO_INST_NORANGECHECK;
9880 /* we ignore the no-nullcheck for now since we
9881 * really do it explicitly only when doing callvirt->call
9887 int handler_offset = -1;
9889 for (i = 0; i < header->num_clauses; ++i) {
9890 MonoExceptionClause *clause = &header->clauses [i];
9891 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
9892 handler_offset = clause->handler_offset;
9897 bblock->flags |= BB_EXCEPTION_UNSAFE;
9899 g_assert (handler_offset != -1);
9901 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
9902 MONO_INST_NEW (cfg, ins, OP_RETHROW);
9903 ins->sreg1 = load->dreg;
9904 MONO_ADD_INS (bblock, ins);
9906 link_bblock (cfg, bblock, end_bblock);
9907 start_new_bblock = 1;
9915 CHECK_STACK_OVF (1);
9917 token = read32 (ip + 2);
9918 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC) {
9919 MonoType *type = mono_type_create_from_typespec (image, token);
9920 token = mono_type_size (type, &ialign);
9922 MonoClass *klass = mono_class_get_full (image, token, generic_context);
9923 CHECK_TYPELOAD (klass);
9924 mono_class_init (klass);
9925 token = mono_class_value_size (klass, &align);
9927 EMIT_NEW_ICONST (cfg, ins, token);
9932 case CEE_REFANYTYPE: {
9933 MonoInst *src_var, *src;
9939 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9941 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9942 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9943 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
9961 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
9971 g_warning ("opcode 0x%02x not handled", *ip);
9975 if (start_new_bblock != 1)
9978 bblock->cil_length = ip - bblock->cil_code;
9979 bblock->next_bb = end_bblock;
9981 if (cfg->method == method && cfg->domainvar) {
9983 MonoInst *get_domain;
9985 cfg->cbb = init_localsbb;
9987 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
9988 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
9991 get_domain->dreg = alloc_preg (cfg);
9992 MONO_ADD_INS (cfg->cbb, get_domain);
9994 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
9995 MONO_ADD_INS (cfg->cbb, store);
9998 #ifdef TARGET_POWERPC
9999 if (cfg->compile_aot)
10000 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10001 mono_get_got_var (cfg);
10004 if (cfg->method == method && cfg->got_var)
10005 mono_emit_load_got_addr (cfg);
10010 cfg->cbb = init_localsbb;
10012 for (i = 0; i < header->num_locals; ++i) {
10013 MonoType *ptype = header->locals [i];
10014 int t = ptype->type;
10015 dreg = cfg->locals [i]->dreg;
10017 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10018 t = mono_class_enum_basetype (ptype->data.klass)->type;
10019 if (ptype->byref) {
10020 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10021 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10022 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10023 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10024 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10025 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10026 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10027 ins->type = STACK_R8;
10028 ins->inst_p0 = (void*)&r8_0;
10029 ins->dreg = alloc_dreg (cfg, STACK_R8);
10030 MONO_ADD_INS (init_localsbb, ins);
10031 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10032 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10033 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10034 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10036 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10041 /* Add a sequence point for method entry/exit events */
10043 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10044 MONO_ADD_INS (init_localsbb, ins);
10045 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10046 MONO_ADD_INS (cfg->bb_exit, ins);
10051 if (cfg->method == method) {
10052 MonoBasicBlock *bb;
10053 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10054 bb->region = mono_find_block_region (cfg, bb->real_offset);
10056 mono_create_spvar_for_region (cfg, bb->region);
10057 if (cfg->verbose_level > 2)
10058 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10062 g_slist_free (class_inits);
10063 dont_inline = g_list_remove (dont_inline, method);
10065 if (inline_costs < 0) {
10068 /* Method is too large */
10069 mname = mono_method_full_name (method, TRUE);
10070 cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
10071 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10076 if ((cfg->verbose_level > 2) && (cfg->method == method))
10077 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10079 return inline_costs;
10082 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10083 g_slist_free (class_inits);
10084 dont_inline = g_list_remove (dont_inline, method);
10088 g_slist_free (class_inits);
10089 dont_inline = g_list_remove (dont_inline, method);
10093 g_slist_free (class_inits);
10094 dont_inline = g_list_remove (dont_inline, method);
10095 cfg->exception_type = MONO_EXCEPTION_TYPE_LOAD;
10099 g_slist_free (class_inits);
10100 dont_inline = g_list_remove (dont_inline, method);
10101 set_exception_type_from_invalid_il (cfg, method, ip);
10106 store_membase_reg_to_store_membase_imm (int opcode)
10109 case OP_STORE_MEMBASE_REG:
10110 return OP_STORE_MEMBASE_IMM;
10111 case OP_STOREI1_MEMBASE_REG:
10112 return OP_STOREI1_MEMBASE_IMM;
10113 case OP_STOREI2_MEMBASE_REG:
10114 return OP_STOREI2_MEMBASE_IMM;
10115 case OP_STOREI4_MEMBASE_REG:
10116 return OP_STOREI4_MEMBASE_IMM;
10117 case OP_STOREI8_MEMBASE_REG:
10118 return OP_STOREI8_MEMBASE_IMM;
10120 g_assert_not_reached ();
10126 #endif /* DISABLE_JIT */
10129 mono_op_to_op_imm (int opcode)
10133 return OP_IADD_IMM;
10135 return OP_ISUB_IMM;
10137 return OP_IDIV_IMM;
10139 return OP_IDIV_UN_IMM;
10141 return OP_IREM_IMM;
10143 return OP_IREM_UN_IMM;
10145 return OP_IMUL_IMM;
10147 return OP_IAND_IMM;
10151 return OP_IXOR_IMM;
10153 return OP_ISHL_IMM;
10155 return OP_ISHR_IMM;
10157 return OP_ISHR_UN_IMM;
10160 return OP_LADD_IMM;
10162 return OP_LSUB_IMM;
10164 return OP_LAND_IMM;
10168 return OP_LXOR_IMM;
10170 return OP_LSHL_IMM;
10172 return OP_LSHR_IMM;
10174 return OP_LSHR_UN_IMM;
10177 return OP_COMPARE_IMM;
10179 return OP_ICOMPARE_IMM;
10181 return OP_LCOMPARE_IMM;
10183 case OP_STORE_MEMBASE_REG:
10184 return OP_STORE_MEMBASE_IMM;
10185 case OP_STOREI1_MEMBASE_REG:
10186 return OP_STOREI1_MEMBASE_IMM;
10187 case OP_STOREI2_MEMBASE_REG:
10188 return OP_STOREI2_MEMBASE_IMM;
10189 case OP_STOREI4_MEMBASE_REG:
10190 return OP_STOREI4_MEMBASE_IMM;
10192 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10194 return OP_X86_PUSH_IMM;
10195 case OP_X86_COMPARE_MEMBASE_REG:
10196 return OP_X86_COMPARE_MEMBASE_IMM;
10198 #if defined(TARGET_AMD64)
10199 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10200 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10202 case OP_VOIDCALL_REG:
10203 return OP_VOIDCALL;
10211 return OP_LOCALLOC_IMM;
10218 ldind_to_load_membase (int opcode)
10222 return OP_LOADI1_MEMBASE;
10224 return OP_LOADU1_MEMBASE;
10226 return OP_LOADI2_MEMBASE;
10228 return OP_LOADU2_MEMBASE;
10230 return OP_LOADI4_MEMBASE;
10232 return OP_LOADU4_MEMBASE;
10234 return OP_LOAD_MEMBASE;
10235 case CEE_LDIND_REF:
10236 return OP_LOAD_MEMBASE;
10238 return OP_LOADI8_MEMBASE;
10240 return OP_LOADR4_MEMBASE;
10242 return OP_LOADR8_MEMBASE;
10244 g_assert_not_reached ();
10251 stind_to_store_membase (int opcode)
10255 return OP_STOREI1_MEMBASE_REG;
10257 return OP_STOREI2_MEMBASE_REG;
10259 return OP_STOREI4_MEMBASE_REG;
10261 case CEE_STIND_REF:
10262 return OP_STORE_MEMBASE_REG;
10264 return OP_STOREI8_MEMBASE_REG;
10266 return OP_STORER4_MEMBASE_REG;
10268 return OP_STORER8_MEMBASE_REG;
10270 g_assert_not_reached ();
10277 mono_load_membase_to_load_mem (int opcode)
10279 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10280 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10282 case OP_LOAD_MEMBASE:
10283 return OP_LOAD_MEM;
10284 case OP_LOADU1_MEMBASE:
10285 return OP_LOADU1_MEM;
10286 case OP_LOADU2_MEMBASE:
10287 return OP_LOADU2_MEM;
10288 case OP_LOADI4_MEMBASE:
10289 return OP_LOADI4_MEM;
10290 case OP_LOADU4_MEMBASE:
10291 return OP_LOADU4_MEM;
10292 #if SIZEOF_REGISTER == 8
10293 case OP_LOADI8_MEMBASE:
10294 return OP_LOADI8_MEM;
10303 op_to_op_dest_membase (int store_opcode, int opcode)
10305 #if defined(TARGET_X86)
10306 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10311 return OP_X86_ADD_MEMBASE_REG;
10313 return OP_X86_SUB_MEMBASE_REG;
10315 return OP_X86_AND_MEMBASE_REG;
10317 return OP_X86_OR_MEMBASE_REG;
10319 return OP_X86_XOR_MEMBASE_REG;
10322 return OP_X86_ADD_MEMBASE_IMM;
10325 return OP_X86_SUB_MEMBASE_IMM;
10328 return OP_X86_AND_MEMBASE_IMM;
10331 return OP_X86_OR_MEMBASE_IMM;
10334 return OP_X86_XOR_MEMBASE_IMM;
10340 #if defined(TARGET_AMD64)
10341 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10346 return OP_X86_ADD_MEMBASE_REG;
10348 return OP_X86_SUB_MEMBASE_REG;
10350 return OP_X86_AND_MEMBASE_REG;
10352 return OP_X86_OR_MEMBASE_REG;
10354 return OP_X86_XOR_MEMBASE_REG;
10356 return OP_X86_ADD_MEMBASE_IMM;
10358 return OP_X86_SUB_MEMBASE_IMM;
10360 return OP_X86_AND_MEMBASE_IMM;
10362 return OP_X86_OR_MEMBASE_IMM;
10364 return OP_X86_XOR_MEMBASE_IMM;
10366 return OP_AMD64_ADD_MEMBASE_REG;
10368 return OP_AMD64_SUB_MEMBASE_REG;
10370 return OP_AMD64_AND_MEMBASE_REG;
10372 return OP_AMD64_OR_MEMBASE_REG;
10374 return OP_AMD64_XOR_MEMBASE_REG;
10377 return OP_AMD64_ADD_MEMBASE_IMM;
10380 return OP_AMD64_SUB_MEMBASE_IMM;
10383 return OP_AMD64_AND_MEMBASE_IMM;
10386 return OP_AMD64_OR_MEMBASE_IMM;
10389 return OP_AMD64_XOR_MEMBASE_IMM;
10399 op_to_op_store_membase (int store_opcode, int opcode)
10401 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10404 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10405 return OP_X86_SETEQ_MEMBASE;
10407 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10408 return OP_X86_SETNE_MEMBASE;
10416 op_to_op_src1_membase (int load_opcode, int opcode)
10419 /* FIXME: This has sign extension issues */
10421 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10422 return OP_X86_COMPARE_MEMBASE8_IMM;
10425 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10430 return OP_X86_PUSH_MEMBASE;
10431 case OP_COMPARE_IMM:
10432 case OP_ICOMPARE_IMM:
10433 return OP_X86_COMPARE_MEMBASE_IMM;
10436 return OP_X86_COMPARE_MEMBASE_REG;
10440 #ifdef TARGET_AMD64
10441 /* FIXME: This has sign extension issues */
10443 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10444 return OP_X86_COMPARE_MEMBASE8_IMM;
10449 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10450 return OP_X86_PUSH_MEMBASE;
10452 /* FIXME: This only works for 32 bit immediates
10453 case OP_COMPARE_IMM:
10454 case OP_LCOMPARE_IMM:
10455 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10456 return OP_AMD64_COMPARE_MEMBASE_IMM;
10458 case OP_ICOMPARE_IMM:
10459 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10460 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10464 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
10465 return OP_AMD64_COMPARE_MEMBASE_REG;
10468 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10469 return OP_AMD64_ICOMPARE_MEMBASE_REG;
10478 op_to_op_src2_membase (int load_opcode, int opcode)
10481 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10487 return OP_X86_COMPARE_REG_MEMBASE;
10489 return OP_X86_ADD_REG_MEMBASE;
10491 return OP_X86_SUB_REG_MEMBASE;
10493 return OP_X86_AND_REG_MEMBASE;
10495 return OP_X86_OR_REG_MEMBASE;
10497 return OP_X86_XOR_REG_MEMBASE;
10501 #ifdef TARGET_AMD64
10504 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10505 return OP_AMD64_ICOMPARE_REG_MEMBASE;
10509 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10510 return OP_AMD64_COMPARE_REG_MEMBASE;
10513 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10514 return OP_X86_ADD_REG_MEMBASE;
10516 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10517 return OP_X86_SUB_REG_MEMBASE;
10519 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10520 return OP_X86_AND_REG_MEMBASE;
10522 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10523 return OP_X86_OR_REG_MEMBASE;
10525 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
10526 return OP_X86_XOR_REG_MEMBASE;
10528 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10529 return OP_AMD64_ADD_REG_MEMBASE;
10531 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10532 return OP_AMD64_SUB_REG_MEMBASE;
10534 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10535 return OP_AMD64_AND_REG_MEMBASE;
10537 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10538 return OP_AMD64_OR_REG_MEMBASE;
10540 if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE))
10541 return OP_AMD64_XOR_REG_MEMBASE;
10549 mono_op_to_op_imm_noemul (int opcode)
10552 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
10557 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10565 return mono_op_to_op_imm (opcode);
10569 #ifndef DISABLE_JIT
10572 * mono_handle_global_vregs:
10574 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
10578 mono_handle_global_vregs (MonoCompile *cfg)
10580 gint32 *vreg_to_bb;
10581 MonoBasicBlock *bb;
10584 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
10586 #ifdef MONO_ARCH_SIMD_INTRINSICS
10587 if (cfg->uses_simd_intrinsics)
10588 mono_simd_simplify_indirection (cfg);
10591 /* Find local vregs used in more than one bb */
10592 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10593 MonoInst *ins = bb->code;
10594 int block_num = bb->block_num;
10596 if (cfg->verbose_level > 2)
10597 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
10600 for (; ins; ins = ins->next) {
10601 const char *spec = INS_INFO (ins->opcode);
10602 int regtype = 0, regindex;
10605 if (G_UNLIKELY (cfg->verbose_level > 2))
10606 mono_print_ins (ins);
10608 g_assert (ins->opcode >= MONO_CEE_LAST);
10610 for (regindex = 0; regindex < 4; regindex ++) {
10613 if (regindex == 0) {
10614 regtype = spec [MONO_INST_DEST];
10615 if (regtype == ' ')
10618 } else if (regindex == 1) {
10619 regtype = spec [MONO_INST_SRC1];
10620 if (regtype == ' ')
10623 } else if (regindex == 2) {
10624 regtype = spec [MONO_INST_SRC2];
10625 if (regtype == ' ')
10628 } else if (regindex == 3) {
10629 regtype = spec [MONO_INST_SRC3];
10630 if (regtype == ' ')
10635 #if SIZEOF_REGISTER == 4
10636 /* In the LLVM case, the long opcodes are not decomposed */
10637 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
10639 * Since some instructions reference the original long vreg,
10640 * and some reference the two component vregs, it is quite hard
10641 * to determine when it needs to be global. So be conservative.
10643 if (!get_vreg_to_inst (cfg, vreg)) {
10644 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10646 if (cfg->verbose_level > 2)
10647 printf ("LONG VREG R%d made global.\n", vreg);
10651 * Make the component vregs volatile since the optimizations can
10652 * get confused otherwise.
10654 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
10655 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
10659 g_assert (vreg != -1);
10661 prev_bb = vreg_to_bb [vreg];
10662 if (prev_bb == 0) {
10663 /* 0 is a valid block num */
10664 vreg_to_bb [vreg] = block_num + 1;
10665 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
10666 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
10669 if (!get_vreg_to_inst (cfg, vreg)) {
10670 if (G_UNLIKELY (cfg->verbose_level > 2))
10671 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
10675 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
10678 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
10681 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
10684 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
10687 g_assert_not_reached ();
10691 /* Flag as having been used in more than one bb */
10692 vreg_to_bb [vreg] = -1;
10698 /* If a variable is used in only one bblock, convert it into a local vreg */
10699 for (i = 0; i < cfg->num_varinfo; i++) {
10700 MonoInst *var = cfg->varinfo [i];
10701 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
10703 switch (var->type) {
10709 #if SIZEOF_REGISTER == 8
10712 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
10713 /* Enabling this screws up the fp stack on x86 */
10716 /* Arguments are implicitly global */
10717 /* Putting R4 vars into registers doesn't work currently */
10718 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
10720 * Make that the variable's liveness interval doesn't contain a call, since
10721 * that would cause the lvreg to be spilled, making the whole optimization
10724 /* This is too slow for JIT compilation */
10726 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
10728 int def_index, call_index, ins_index;
10729 gboolean spilled = FALSE;
10734 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
10735 const char *spec = INS_INFO (ins->opcode);
10737 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
10738 def_index = ins_index;
10740 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
10741 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
10742 if (call_index > def_index) {
10748 if (MONO_IS_CALL (ins))
10749 call_index = ins_index;
10759 if (G_UNLIKELY (cfg->verbose_level > 2))
10760 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
10761 var->flags |= MONO_INST_IS_DEAD;
10762 cfg->vreg_to_inst [var->dreg] = NULL;
10769 * Compress the varinfo and vars tables so the liveness computation is faster and
10770 * takes up less space.
10773 for (i = 0; i < cfg->num_varinfo; ++i) {
10774 MonoInst *var = cfg->varinfo [i];
10775 if (pos < i && cfg->locals_start == i)
10776 cfg->locals_start = pos;
10777 if (!(var->flags & MONO_INST_IS_DEAD)) {
10779 cfg->varinfo [pos] = cfg->varinfo [i];
10780 cfg->varinfo [pos]->inst_c0 = pos;
10781 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
10782 cfg->vars [pos].idx = pos;
10783 #if SIZEOF_REGISTER == 4
10784 if (cfg->varinfo [pos]->type == STACK_I8) {
10785 /* Modify the two component vars too */
10788 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
10789 var1->inst_c0 = pos;
10790 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
10791 var1->inst_c0 = pos;
10798 cfg->num_varinfo = pos;
10799 if (cfg->locals_start > cfg->num_varinfo)
10800 cfg->locals_start = cfg->num_varinfo;
10804 * mono_spill_global_vars:
10806 * Generate spill code for variables which are not allocated to registers,
10807 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
10808 * code is generated which could be optimized by the local optimization passes.
10811 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
10813 MonoBasicBlock *bb;
10815 int orig_next_vreg;
10816 guint32 *vreg_to_lvreg;
10818 guint32 i, lvregs_len;
10819 gboolean dest_has_lvreg = FALSE;
10820 guint32 stacktypes [128];
10821 MonoInst **live_range_start, **live_range_end;
10822 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
10824 *need_local_opts = FALSE;
10826 memset (spec2, 0, sizeof (spec2));
10828 /* FIXME: Move this function to mini.c */
10829 stacktypes ['i'] = STACK_PTR;
10830 stacktypes ['l'] = STACK_I8;
10831 stacktypes ['f'] = STACK_R8;
10832 #ifdef MONO_ARCH_SIMD_INTRINSICS
10833 stacktypes ['x'] = STACK_VTYPE;
10836 #if SIZEOF_REGISTER == 4
10837 /* Create MonoInsts for longs */
10838 for (i = 0; i < cfg->num_varinfo; i++) {
10839 MonoInst *ins = cfg->varinfo [i];
10841 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
10842 switch (ins->type) {
10843 #ifdef MONO_ARCH_SOFT_FLOAT
10849 g_assert (ins->opcode == OP_REGOFFSET);
10851 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
10853 tree->opcode = OP_REGOFFSET;
10854 tree->inst_basereg = ins->inst_basereg;
10855 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
10857 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
10859 tree->opcode = OP_REGOFFSET;
10860 tree->inst_basereg = ins->inst_basereg;
10861 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
10871 /* FIXME: widening and truncation */
10874 * As an optimization, when a variable allocated to the stack is first loaded into
10875 * an lvreg, we will remember the lvreg and use it the next time instead of loading
10876 * the variable again.
10878 orig_next_vreg = cfg->next_vreg;
10879 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
10880 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
10884 * These arrays contain the first and last instructions accessing a given
10886 * Since we emit bblocks in the same order we process them here, and we
10887 * don't split live ranges, these will precisely describe the live range of
10888 * the variable, i.e. the instruction range where a valid value can be found
10889 * in the variables location.
10891 /* FIXME: Only do this if debugging info is requested */
10892 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
10893 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
10894 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10895 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
10897 /* Add spill loads/stores */
10898 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10901 if (cfg->verbose_level > 2)
10902 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
10904 /* Clear vreg_to_lvreg array */
10905 for (i = 0; i < lvregs_len; i++)
10906 vreg_to_lvreg [lvregs [i]] = 0;
10910 MONO_BB_FOR_EACH_INS (bb, ins) {
10911 const char *spec = INS_INFO (ins->opcode);
10912 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
10913 gboolean store, no_lvreg;
10914 int sregs [MONO_MAX_SRC_REGS];
10916 if (G_UNLIKELY (cfg->verbose_level > 2))
10917 mono_print_ins (ins);
10919 if (ins->opcode == OP_NOP)
10923 * We handle LDADDR here as well, since it can only be decomposed
10924 * when variable addresses are known.
10926 if (ins->opcode == OP_LDADDR) {
10927 MonoInst *var = ins->inst_p0;
10929 if (var->opcode == OP_VTARG_ADDR) {
10930 /* Happens on SPARC/S390 where vtypes are passed by reference */
10931 MonoInst *vtaddr = var->inst_left;
10932 if (vtaddr->opcode == OP_REGVAR) {
10933 ins->opcode = OP_MOVE;
10934 ins->sreg1 = vtaddr->dreg;
10936 else if (var->inst_left->opcode == OP_REGOFFSET) {
10937 ins->opcode = OP_LOAD_MEMBASE;
10938 ins->inst_basereg = vtaddr->inst_basereg;
10939 ins->inst_offset = vtaddr->inst_offset;
10943 g_assert (var->opcode == OP_REGOFFSET);
10945 ins->opcode = OP_ADD_IMM;
10946 ins->sreg1 = var->inst_basereg;
10947 ins->inst_imm = var->inst_offset;
10950 *need_local_opts = TRUE;
10951 spec = INS_INFO (ins->opcode);
10954 if (ins->opcode < MONO_CEE_LAST) {
10955 mono_print_ins (ins);
10956 g_assert_not_reached ();
10960 * Store opcodes have destbasereg in the dreg, but in reality, it is an
10964 if (MONO_IS_STORE_MEMBASE (ins)) {
10965 tmp_reg = ins->dreg;
10966 ins->dreg = ins->sreg2;
10967 ins->sreg2 = tmp_reg;
10970 spec2 [MONO_INST_DEST] = ' ';
10971 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
10972 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
10973 spec2 [MONO_INST_SRC3] = ' ';
10975 } else if (MONO_IS_STORE_MEMINDEX (ins))
10976 g_assert_not_reached ();
10981 if (G_UNLIKELY (cfg->verbose_level > 2)) {
10982 printf ("\t %.3s %d", spec, ins->dreg);
10983 num_sregs = mono_inst_get_src_registers (ins, sregs);
10984 for (srcindex = 0; srcindex < 3; ++srcindex)
10985 printf (" %d", sregs [srcindex]);
10992 regtype = spec [MONO_INST_DEST];
10993 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
10996 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
10997 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
10998 MonoInst *store_ins;
11000 MonoInst *def_ins = ins;
11001 int dreg = ins->dreg; /* The original vreg */
11003 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11005 if (var->opcode == OP_REGVAR) {
11006 ins->dreg = var->dreg;
11007 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11009 * Instead of emitting a load+store, use a _membase opcode.
11011 g_assert (var->opcode == OP_REGOFFSET);
11012 if (ins->opcode == OP_MOVE) {
11016 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11017 ins->inst_basereg = var->inst_basereg;
11018 ins->inst_offset = var->inst_offset;
11021 spec = INS_INFO (ins->opcode);
11025 g_assert (var->opcode == OP_REGOFFSET);
11027 prev_dreg = ins->dreg;
11029 /* Invalidate any previous lvreg for this vreg */
11030 vreg_to_lvreg [ins->dreg] = 0;
11034 #ifdef MONO_ARCH_SOFT_FLOAT
11035 if (store_opcode == OP_STORER8_MEMBASE_REG) {
11037 store_opcode = OP_STOREI8_MEMBASE_REG;
11041 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11043 if (regtype == 'l') {
11044 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11045 mono_bblock_insert_after_ins (bb, ins, store_ins);
11046 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11047 mono_bblock_insert_after_ins (bb, ins, store_ins);
11048 def_ins = store_ins;
11051 g_assert (store_opcode != OP_STOREV_MEMBASE);
11053 /* Try to fuse the store into the instruction itself */
11054 /* FIXME: Add more instructions */
11055 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11056 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11057 ins->inst_imm = ins->inst_c0;
11058 ins->inst_destbasereg = var->inst_basereg;
11059 ins->inst_offset = var->inst_offset;
11060 spec = INS_INFO (ins->opcode);
11061 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11062 ins->opcode = store_opcode;
11063 ins->inst_destbasereg = var->inst_basereg;
11064 ins->inst_offset = var->inst_offset;
11068 tmp_reg = ins->dreg;
11069 ins->dreg = ins->sreg2;
11070 ins->sreg2 = tmp_reg;
11073 spec2 [MONO_INST_DEST] = ' ';
11074 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11075 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11076 spec2 [MONO_INST_SRC3] = ' ';
11078 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11079 // FIXME: The backends expect the base reg to be in inst_basereg
11080 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11082 ins->inst_basereg = var->inst_basereg;
11083 ins->inst_offset = var->inst_offset;
11084 spec = INS_INFO (ins->opcode);
11086 /* printf ("INS: "); mono_print_ins (ins); */
11087 /* Create a store instruction */
11088 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11090 /* Insert it after the instruction */
11091 mono_bblock_insert_after_ins (bb, ins, store_ins);
11093 def_ins = store_ins;
11096 * We can't assign ins->dreg to var->dreg here, since the
11097 * sregs could use it. So set a flag, and do it after
11100 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11101 dest_has_lvreg = TRUE;
11106 if (def_ins && !live_range_start [dreg]) {
11107 live_range_start [dreg] = def_ins;
11108 live_range_start_bb [dreg] = bb;
11115 num_sregs = mono_inst_get_src_registers (ins, sregs);
11116 for (srcindex = 0; srcindex < 3; ++srcindex) {
11117 regtype = spec [MONO_INST_SRC1 + srcindex];
11118 sreg = sregs [srcindex];
11120 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11121 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11122 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11123 MonoInst *use_ins = ins;
11124 MonoInst *load_ins;
11125 guint32 load_opcode;
11127 if (var->opcode == OP_REGVAR) {
11128 sregs [srcindex] = var->dreg;
11129 //mono_inst_set_src_registers (ins, sregs);
11130 live_range_end [sreg] = use_ins;
11131 live_range_end_bb [sreg] = bb;
11135 g_assert (var->opcode == OP_REGOFFSET);
11137 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11139 g_assert (load_opcode != OP_LOADV_MEMBASE);
11141 if (vreg_to_lvreg [sreg]) {
11142 g_assert (vreg_to_lvreg [sreg] != -1);
11144 /* The variable is already loaded to an lvreg */
11145 if (G_UNLIKELY (cfg->verbose_level > 2))
11146 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11147 sregs [srcindex] = vreg_to_lvreg [sreg];
11148 //mono_inst_set_src_registers (ins, sregs);
11152 /* Try to fuse the load into the instruction */
11153 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11154 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11155 sregs [0] = var->inst_basereg;
11156 //mono_inst_set_src_registers (ins, sregs);
11157 ins->inst_offset = var->inst_offset;
11158 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11159 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11160 sregs [1] = var->inst_basereg;
11161 //mono_inst_set_src_registers (ins, sregs);
11162 ins->inst_offset = var->inst_offset;
11164 if (MONO_IS_REAL_MOVE (ins)) {
11165 ins->opcode = OP_NOP;
11168 //printf ("%d ", srcindex); mono_print_ins (ins);
11170 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11172 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11173 if (var->dreg == prev_dreg) {
11175 * sreg refers to the value loaded by the load
11176 * emitted below, but we need to use ins->dreg
11177 * since it refers to the store emitted earlier.
11181 g_assert (sreg != -1);
11182 vreg_to_lvreg [var->dreg] = sreg;
11183 g_assert (lvregs_len < 1024);
11184 lvregs [lvregs_len ++] = var->dreg;
11188 sregs [srcindex] = sreg;
11189 //mono_inst_set_src_registers (ins, sregs);
11191 if (regtype == 'l') {
11192 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11193 mono_bblock_insert_before_ins (bb, ins, load_ins);
11194 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11195 mono_bblock_insert_before_ins (bb, ins, load_ins);
11196 use_ins = load_ins;
11199 #if SIZEOF_REGISTER == 4
11200 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11202 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11203 mono_bblock_insert_before_ins (bb, ins, load_ins);
11204 use_ins = load_ins;
11208 if (var->dreg < orig_next_vreg) {
11209 live_range_end [var->dreg] = use_ins;
11210 live_range_end_bb [var->dreg] = bb;
11214 mono_inst_set_src_registers (ins, sregs);
11216 if (dest_has_lvreg) {
11217 g_assert (ins->dreg != -1);
11218 vreg_to_lvreg [prev_dreg] = ins->dreg;
11219 g_assert (lvregs_len < 1024);
11220 lvregs [lvregs_len ++] = prev_dreg;
11221 dest_has_lvreg = FALSE;
11225 tmp_reg = ins->dreg;
11226 ins->dreg = ins->sreg2;
11227 ins->sreg2 = tmp_reg;
11230 if (MONO_IS_CALL (ins)) {
11231 /* Clear vreg_to_lvreg array */
11232 for (i = 0; i < lvregs_len; i++)
11233 vreg_to_lvreg [lvregs [i]] = 0;
11235 } else if (ins->opcode == OP_NOP) {
11237 MONO_INST_NULLIFY_SREGS (ins);
11240 if (cfg->verbose_level > 2)
11241 mono_print_ins_index (1, ins);
11245 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11247 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11248 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11250 for (i = 0; i < cfg->num_varinfo; ++i) {
11251 int vreg = MONO_VARINFO (cfg, i)->vreg;
11254 if (live_range_start [vreg]) {
11255 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11257 ins->inst_c1 = vreg;
11258 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11260 if (live_range_end [vreg]) {
11261 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11263 ins->inst_c1 = vreg;
11264 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11269 g_free (live_range_start);
11270 g_free (live_range_end);
11271 g_free (live_range_start_bb);
11272 g_free (live_range_end_bb);
11277 * - use 'iadd' instead of 'int_add'
11278 * - handling ovf opcodes: decompose in method_to_ir.
11279 * - unify iregs/fregs
11280 * -> partly done, the missing parts are:
11281 * - a more complete unification would involve unifying the hregs as well, so
11282 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11283 * would no longer map to the machine hregs, so the code generators would need to
11284 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11285 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11286 * fp/non-fp branches speeds it up by about 15%.
11287 * - use sext/zext opcodes instead of shifts
11289 * - get rid of TEMPLOADs if possible and use vregs instead
11290 * - clean up usage of OP_P/OP_ opcodes
11291 * - cleanup usage of DUMMY_USE
11292 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11294 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11295 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11296 * - make sure handle_stack_args () is called before the branch is emitted
11297 * - when the new IR is done, get rid of all unused stuff
11298 * - COMPARE/BEQ as separate instructions or unify them ?
11299 * - keeping them separate allows specialized compare instructions like
11300 * compare_imm, compare_membase
11301 * - most back ends unify fp compare+branch, fp compare+ceq
11302 * - integrate mono_save_args into inline_method
11303 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11304 * - handle long shift opts on 32 bit platforms somehow: they require
11305 * 3 sregs (2 for arg1 and 1 for arg2)
11306 * - make byref a 'normal' type.
11307 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11308 * variable if needed.
11309 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11310 * like inline_method.
11311 * - remove inlining restrictions
11312 * - fix LNEG and enable cfold of INEG
11313 * - generalize x86 optimizations like ldelema as a peephole optimization
11314 * - add store_mem_imm for amd64
11315 * - optimize the loading of the interruption flag in the managed->native wrappers
11316 * - avoid special handling of OP_NOP in passes
11317 * - move code inserting instructions into one function/macro.
11318 * - try a coalescing phase after liveness analysis
11319 * - add float -> vreg conversion + local optimizations on !x86
11320 * - figure out how to handle decomposed branches during optimizations, ie.
11321 * compare+branch, op_jump_table+op_br etc.
11322 * - promote RuntimeXHandles to vregs
11323 * - vtype cleanups:
11324 * - add a NEW_VARLOADA_VREG macro
11325 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11326 * accessing vtype fields.
11327 * - get rid of I8CONST on 64 bit platforms
11328 * - dealing with the increase in code size due to branches created during opcode
11330 * - use extended basic blocks
11331 * - all parts of the JIT
11332 * - handle_global_vregs () && local regalloc
11333 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11334 * - sources of increase in code size:
11337 * - isinst and castclass
11338 * - lvregs not allocated to global registers even if used multiple times
11339 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11341 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11342 * - add all micro optimizations from the old JIT
11343 * - put tree optimizations into the deadce pass
11344 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11345 * specific function.
11346 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11347 * fcompare + branchCC.
11348 * - create a helper function for allocating a stack slot, taking into account
11349 * MONO_CFG_HAS_SPILLUP.
11351 * - merge the ia64 switch changes.
11352 * - optimize mono_regstate2_alloc_int/float.
11353 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11354 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11355 * parts of the tree could be separated by other instructions, killing the tree
11356 * arguments, or stores killing loads etc. Also, should we fold loads into other
11357 * instructions if the result of the load is used multiple times ?
11358 * - make the REM_IMM optimization in mini-x86.c arch-independent.
11359 * - LAST MERGE: 108395.
11360 * - when returning vtypes in registers, generate IR and append it to the end of the
11361 * last bb instead of doing it in the epilog.
11362 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
11370 - When to decompose opcodes:
11371 - earlier: this makes some optimizations hard to implement, since the low level IR
11372 no longer contains the neccessary information. But it is easier to do.
11373 - later: harder to implement, enables more optimizations.
11374 - Branches inside bblocks:
11375 - created when decomposing complex opcodes.
11376 - branches to another bblock: harmless, but not tracked by the branch
11377 optimizations, so need to branch to a label at the start of the bblock.
11378 - branches to inside the same bblock: very problematic, trips up the local
11379 reg allocator. Can be fixed by spitting the current bblock, but that is a
11380 complex operation, since some local vregs can become global vregs etc.
11381 - Local/global vregs:
11382 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
11383 local register allocator.
11384 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
11385 structure, created by mono_create_var (). Assigned to hregs or the stack by
11386 the global register allocator.
11387 - When to do optimizations like alu->alu_imm:
11388 - earlier -> saves work later on since the IR will be smaller/simpler
11389 - later -> can work on more instructions
11390 - Handling of valuetypes:
11391 - When a vtype is pushed on the stack, a new temporary is created, an
11392 instruction computing its address (LDADDR) is emitted and pushed on
11393 the stack. Need to optimize cases when the vtype is used immediately as in
11394 argument passing, stloc etc.
11395 - Instead of the to_end stuff in the old JIT, simply call the function handling
11396 the values on the stack before emitting the last instruction of the bb.
11399 #endif /* DISABLE_JIT */